Skip to content

Instantly share code, notes, and snippets.

@ericness
Last active September 23, 2025 13:12
Show Gist options
  • Select an option

  • Save ericness/7b43b5108fa0b7fe3af9e8b049836d9f to your computer and use it in GitHub Desktop.

Select an option

Save ericness/7b43b5108fa0b7fe3af9e8b049836d9f to your computer and use it in GitHub Desktop.
Vercel AI Gateway Structured Output Error
#!/usr/bin/env python3
"""
Standalone script to demonstrate structured output issue with Vercel AI Gateway.
This script replicates the functionality of src/ai_news_agent/nodes/summarize_episode.py
but uses Vercel AI Gateway with LangChain ChatOpenAI and structured output.
Expected to demonstrate that .with_structured_output() doesn't work with Vercel AI Gateway.
"""
import os
from typing import Optional
from dotenv import load_dotenv
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field, SecretStr
load_dotenv()
class EpisodeSummary(BaseModel):
"""Structured output model for episode summary."""
title: str = Field(description="The episode title")
summary: str = Field(description="A concise summary of the episode")
key_topics: list[str] = Field(description="List of key topics discussed")
duration: Optional[str] = Field(description="Episode duration", default=None)
published_date: Optional[str] = Field(description="Publication date", default=None)
def main():
"""Main function to test structured output with Vercel AI Gateway."""
# Sample episode data (similar to what would come from RSS feed)
sample_episode = {
"title": "AI Daily Brief: Latest Developments in Large Language Models",
"description": (
"In today's episode, we discuss the latest breakthroughs in LLM technology, "
"including new model releases from major AI labs, improvements in reasoning "
"capabilities, and the growing importance of structured outputs in production "
"systems. We also cover recent developments in AI safety and alignment research, "
"the impact of multimodal AI systems, and emerging trends in enterprise AI adoption."
),
"published_date": "2024-01-15",
"duration": "15 minutes"
}
print("Testing Vercel AI Gateway with structured output...")
print(f"Episode Title: {sample_episode['title']}")
print(f"Episode Duration: {sample_episode['duration']}")
print(f"Published: {sample_episode['published_date']}")
print("-" * 80)
# Configure ChatOpenAI to use Vercel AI Gateway
vercel_token = os.getenv("VERCEL_AI_GATEWAY_TOKEN")
if not vercel_token:
raise ValueError("VERCEL_AI_GATEWAY_TOKEN environment variable is required")
llm = ChatOpenAI(
model="openai/gpt-4o-mini",
temperature=0.3,
base_url="https://ai-gateway.vercel.sh/v1",
api_key=SecretStr(vercel_token),
default_headers={
"Authorization": f"Bearer {vercel_token}"
}
)
# Create prompt template (similar to LangSmith prompt but simplified)
prompt = ChatPromptTemplate.from_messages([
("system", (
"You are an AI assistant that summarizes podcast episodes. "
"Extract the key information and provide a structured summary with "
"the title, a concise summary, key topics discussed, duration, and published date."
)),
("user", (
"Please summarize the following podcast episode:\n\n"
"Title: {title}\n\n"
"Description: {description}\n\n"
"Duration: {duration}\n\n"
"Published Date: {published_date}"
))
])
print("Creating structured LLM with .with_structured_output()...")
# This is where the issue likely occurs - using .with_structured_output() with Vercel Gateway
structured_llm = llm.with_structured_output(EpisodeSummary)
# Create the chain
chain = prompt | structured_llm
print("Invoking chain with structured output...")
# Invoke the chain - this should either work or raise an exception
result = chain.invoke({
"title": sample_episode["title"],
"description": sample_episode["description"],
"duration": sample_episode["duration"],
"published_date": sample_episode["published_date"]
})
# If we get here, structured output worked
print("SUCCESS: Structured output worked!")
print("Result type:", type(result))
print("Result:")
if isinstance(result, EpisodeSummary):
print(f" Title: {result.title}")
print(f" Summary: {result.summary}")
print(f" Key Topics: {result.key_topics}")
print(f" Duration: {result.duration}")
print(f" Published Date: {result.published_date}")
else:
print(f" Raw result: {result}")
return result
if __name__ == "__main__":
main()
@ericness
Copy link
Author

Error:

    raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': 'Invalid input', 'type': 'invalid_request_error', 'param': 'response_format', 'code': 'invalid_request_error'}}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment