Mix.install([
{:zoi, "0.8.1"},
{:req_llm, "~> 1.0.0"},
{:kino, "~> 0.17.0"}
])
# List OpenAI Models
{:ok, models} = ReqLLM.Provider.Registry.list_models(:openai)
# List Google Models
{:ok, models} = ReqLLM.Provider.Registry.list_models(:google)
{:ok, openai_model} =
ReqLLM.Provider.Registry.get_model(:openai, "gpt-5")
{:ok, openai_model} =
ReqLLM.Provider.Registry.get_model(:openai, "gpt-5")
{:ok, google_model} =
ReqLLM.Provider.Registry.get_model(:google, "gemini-2.5-flash")
System.fetch_env!("LB_GOOGLE_API_KEY")
ReqLLM.put_key(:openai_api_key, System.fetch_env!("LB_OPENAI_API_KEY"))
ReqLLM.put_key(:google_api_key, System.fetch_env!("LB_GOOGLE_API_KEY"))
ReqLLM.generate_text!(openai_model, "Who are you?")
ReqLLM.generate_text!(google_model, "Who are you?")
output_frame = Kino.Frame.new()
Kino.render(output_frame)
{:ok, response} =
ReqLLM.stream_text(
openai_model,
"Write a short markdown-formatted report on the history of the Shure SM7B microphone.",
provider_options: [reasoning_effort: :low]
) |> IO.inspect()
response
|> ReqLLM.StreamResponse.tokens()
|> IO.inspect()
|> Enum.reduce("", fn new_values, accumulator ->
accumulator = accumulator <> new_values
Kino.Frame.render(output_frame, Kino.Markdown.new(accumulator))
accumulator
end)
ReqLLM.Providers.OpenAI.supported_provider_options()
output_frame = Kino.Frame.new()
Kino.render(output_frame)
schema = [
language_name: [type: :string, required: true, doc: "Language name in English"],
language_code: [type: :string, required: true, doc: "ISO-639 language code"],
language_family: [type: :string, required: true, doc: "Language family"]
]
context = ReqLLM.Context.new([
ReqLLM.Context.system("You are a helpful language analysis assistant. Given a text you will analyse it and respond according to schema"),
ReqLLM.Context.user("Jag skulle vilja ha en kaffe")
])
{:ok, response} = ReqLLM.stream_object(openai_model,
context,
schema,
provider_options: [response_format: %{
type: "json_schema",
json_schema: %{
name: "language_analysis",
schema: schema
}
}])
response
|> ReqLLM.StreamResponse.tokens()
|> Enum.reduce("", fn new_values, accumulator ->
new_values |> IO.inspect()
accumulator = accumulator <> new_values
Kino.Frame.render(output_frame, Kino.Markdown.new(accumulator))
accumulator
end)