Created
November 17, 2025 14:53
-
-
Save mikehostetler/02a951aa669aad47ed0369e02d1b66bb to your computer and use it in GitHub Desktop.
ReqLLM V1 Streaming Agent
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env elixir | |
| Mix.install([ | |
| {:req_llm, path: ".."} | |
| ]) | |
| Logger.configure(level: :warning) | |
| defmodule SimpleAgent.V1 do | |
| use GenServer | |
| alias ReqLLM.Context | |
| import ReqLLM.Context | |
| defstruct [:model, :context] | |
| def start_link(opts \\ []) do | |
| GenServer.start_link(__MODULE__, opts, name: Keyword.get(opts, :name)) | |
| end | |
| @impl true | |
| def init(opts) do | |
| system_prompt = """ | |
| You are a helpful teacher. Keep explanations short and clear. | |
| """ | |
| model = Keyword.get(opts, :model) | |
| context = Context.new([system(system_prompt)]) | |
| {:ok, %__MODULE__{model: model, context: context}} | |
| end | |
| def ask(pid, user_text) when is_binary(user_text) do | |
| GenServer.call(pid, {:ask, user_text}, 30_000) | |
| end | |
| @impl true | |
| def handle_call({:ask, user_text}, _from, %{model: model, context: context} = state) do | |
| context = Context.append(context, user(user_text)) | |
| case ReqLLM.stream_text(model, context.messages) do | |
| {:ok, stream_response} -> | |
| # See the full chunk structure | |
| # stream_response.stream | |
| # |> Enum.each(&IO.inspect(&1, label: "Chunk")) | |
| # See only the text content | |
| final_text = | |
| stream_response.stream | |
| |> Stream.filter(&(&1.type == :content)) | |
| |> Enum.reduce("", fn chunk, acc -> | |
| IO.write(chunk.text) | |
| acc <> chunk.text | |
| end) | |
| IO.write("\n\n") | |
| # Uncomment to see streaming metadata (usage, finish_reason): | |
| # usage = ReqLLM.StreamResponse.usage(stream_response) | |
| # finish_reason = ReqLLM.StreamResponse.finish_reason(stream_response) | |
| # IO.inspect(usage, label: "Usage") | |
| # IO.inspect(finish_reason, label: "Finish Reason") | |
| context = if final_text != "", do: Context.append(context, assistant(final_text)), else: context | |
| {:reply, {:ok, final_text}, %{state | context: context}} | |
| {:error, error} -> | |
| IO.puts("\nError: #{inspect(error)}\n") | |
| {:reply, {:error, error}, state} | |
| end | |
| end | |
| end | |
| model = System.get_env("REQ_LLM_MODEL") || "anthropic:claude-sonnet-4-5" | |
| IO.puts("=== SimpleAgent V1 - Streaming Demo ===\n") | |
| IO.puts("Model: #{model}") | |
| {:ok, pid} = SimpleAgent.V1.start_link(model: model) | |
| IO.puts("\nQuestion: In 2 short sentences, explain what streaming means for LLM responses.\n") | |
| {:ok, _response} = SimpleAgent.V1.ask(pid, "In 2 short sentences, explain what streaming means for LLM responses.") | |
| IO.puts("\n\nV1 Demo Complete!") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment