Last active
January 30, 2026 16:36
-
-
Save themadarchitect/39b667268e6b33559fa067e6504a907b to your computer and use it in GitHub Desktop.
openai
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| version: 1.2.8 | |
| cache: true | |
| registration: | |
| socialLogins: | |
| - "discord" | |
| - "facebook" | |
| - "github" | |
| - "google" | |
| - "openid" | |
| endpoints: | |
| custom: | |
| - name: "ModelHub" | |
| apiKey: "${OPENROUTER_KEY}" | |
| baseURL: "https://openrouter.ai/api/v1" | |
| models: | |
| default: | |
| - "x-ai/grok-4-fast" | |
| - "x-ai/grok-4.1-fast" | |
| - "xiaomi/mimo-v2-flash:free" | |
| - "prime-intellect/intellect-3" | |
| - "deepseek/deepseek-v3.2-speciale" | |
| - "anthropic/claude-haiku-4.5" | |
| - "google/gemini-2.5-flash-preview-09-2025" | |
| - "openai/gpt-5.1-codex-max" | |
| - "openai/gpt-5.2" | |
| - "google/gemini-3-flash-preview" | |
| - "gpt-oss/gpt-oss-120b" | |
| - "cognitivecomputations/dolphin3.0-mistral-24b" | |
| - "cognitivecomputations/dolphin3.0-r1-mistral-24b" | |
| - "deepseek/deepseek-chat" | |
| - "deepseek/deepseek-r1-distill-llama-70b" | |
| - "deepseek/deepseek-r1-distill-qwen-14b" | |
| - "deepseek/deepseek-r1-distill-qwen-32b" | |
| - "deepseek/deepseek-r1-zero" | |
| - "deepseek/deepseek-r1" | |
| - "google/gemini-3-pro-preview" | |
| - "google/gemma-2-9b-it" | |
| - "google/gemma-3-12b-it" | |
| - "google/gemma-3-1b-it" | |
| - "google/gemma-3-27b-it" | |
| - "google/gemma-3-4b-it" | |
| - "google/learnlm-1.5-pro-experimental" | |
| - "gryphe/mythomax-l2-13b" | |
| - "huggingfaceh4/zephyr-7b-beta" | |
| - "meta-llama/llama-3-8b-instruct" | |
| - "meta-llama/llama-3.1-8b-instruct" | |
| - "meta-llama/llama-3.2-11b-vision-instruct" | |
| - "meta-llama/llama-3.2-1b-instruct" | |
| - "meta-llama/llama-3.2-3b-instruct" | |
| - "meta-llama/llama-3.3-70b-instruct" | |
| - "microsoft/phi-3-medium-128k-instruct" | |
| - "microsoft/phi-3-mini-128k-instruct" | |
| - "mistralai/mistral-7b-instruct" | |
| - "mistralai/mistral-nemo" | |
| - "mistralai/mistral-small-24b-instruct-2501" | |
| - "mistralai/mistral-small-3.1-24b-instruct" | |
| - "moonshotai/moonlight-16b-a3b-instruct" | |
| - "nousresearch/deephermes-3-llama-3-8b-preview" | |
| - "nvidia/llama-3.1-nemotron-70b-instruct" | |
| - "open-r1/olympiccoder-32b" | |
| - "open-r1/olympiccoder-7b" | |
| - "openchat/openchat-7b" | |
| - "qwen/qwen-2-7b-instruct" | |
| - "qwen/qwen-2.5-72b-instruct" | |
| - "qwen/qwen-2.5-coder-32b-instruct" | |
| - "qwen/qwen2.5-vl-72b-instruct" | |
| - "qwen/qwq-32b-preview" | |
| - "qwen/qwq-32b" | |
| - "rekaai/reka-flash-3" | |
| - "sophosympatheia/rogue-rose-103b-v0.2" | |
| - "undi95/toppy-m-7b" | |
| - "deepseek/deepseek-r1-0528" | |
| # (provider catalogs—IDs only) | |
| - "anthropic/claude-3.5-haiku" | |
| - "anthropic/claude-3.5-sonnet" | |
| - "anthropic/claude-3.7-sonnet" | |
| - "anthropic/claude-sonnet-4" | |
| - "anthropic/claude-sonnet-4.5" | |
| - "anthropic/claude-opus-4" | |
| - "anthropic/claude-opus-4.1" | |
| - "anthropic/claude-opus-4.5" | |
| - "amazon/nova-2-lite-v1" | |
| - "x-ai/grok-4.1-fast:free" | |
| - "x-ai/grok-code-fast-1" | |
| - "01-ai/yi-large" | |
| - "aetherwiing/mn-starcannon-12b" | |
| - "ai21/jamba-1-5-large" | |
| - "ai21/jamba-1-5-mini" | |
| - "ai21/jamba-1.6-large" | |
| - "ai21/jamba-1.6-mini" | |
| - "ai21/jamba-instruct" | |
| - "aion-labs/aion-1.0" | |
| - "aion-labs/aion-1.0-mini" | |
| - "aion-labs/aion-rp-llama-3.1-8b" | |
| - "allenai/llama-3.1-tulu-3-405b" | |
| - "allenai/olmo-2-0325-32b-instruct" | |
| - "alpindale/goliath-120b" | |
| - "alpindale/magnum-72b" | |
| - "amazon/nova-lite-v1" | |
| - "amazon/nova-micro-v1" | |
| - "amazon/nova-pro-v1" | |
| - "anthracite-org/magnum-v2-72b" | |
| - "anthracite-org/magnum-v4-72b" | |
| - "cognitivecomputations/dolphin-mixtral-8x22b" | |
| - "cognitivecomputations/dolphin-mixtral-8x7b" | |
| - "cohere/command" | |
| - "cohere/command-a" | |
| - "cohere/command-r" | |
| - "cohere/command-r-03-2024" | |
| - "cohere/command-r-08-2024" | |
| - "cohere/command-r-plus" | |
| - "cohere/command-r-plus-04-2024" | |
| - "cohere/command-r-plus-08-2024" | |
| - "cohere/command-r7b-12-2024" | |
| - "deepseek/deepseek-v3.1-terminus" | |
| - "deepseek/deepseek-chat-v3-0324" | |
| - "deepseek/deepseek-r1-0528-qwen3-8b" | |
| - "deepseek/deepseek-r1-distill-llama-8b" | |
| - "deepseek/deepseek-r1-distill-qwen-1.5b" | |
| - "deepseek/deepseek-prover-v2" | |
| - "eva-unit-01/eva-llama-3.33-70b" | |
| - "eva-unit-01/eva-qwen-2.5-32b" | |
| - "eva-unit-01/eva-qwen-2.5-72b" | |
| - "google/gemini-2.5-flash-preview-09-2025" | |
| - "google/gemini-2.5-flash-image-nano-banana" | |
| - "google/gemini-2.5-pro" | |
| - "google/gemini-2.5-pro-preview" | |
| - "google/gemini-2.5-pro-preview-05-06" | |
| - "google/gemini-2.5-pro-preview-03-25" | |
| - "google/gemini-2.5-flash" | |
| - "moonshotai/kimi-k2-thinking" | |
| - "moonshotai/kimi-k2.5" | |
| - "minimax/minimax-m2-her" | |
| - "minimax/minimax-m2.1" | |
| - "z-ai/glm-4.7" | |
| - "openai/gpt-5.2-codex" | |
| - "openai/gpt-5.2" | |
| - "z-ai/glm-4.6v" | |
| - "writer/palmyra-x5" | |
| - "google/gemini-3-pro-image-preview" | |
| - "google/gemini-pro" | |
| - "google/gemini-pro-1.5" | |
| - "google/gemini-pro-vision" | |
| - "google/gemma-2-27b-it" | |
| - "google/gemma-7b-it" | |
| - "google/palm-2-chat-bison" | |
| - "google/palm-2-chat-bison-32k" | |
| - "google/palm-2-codechat-bison" | |
| - "google/palm-2-codechat-bison-32k" | |
| - "infermatic/mn-inferor-12b" | |
| - "inflection/inflection-3-pi" | |
| - "inflection/inflection-3-productivity" | |
| - "jondurbin/airoboros-l2-70b" | |
| - "latitudegames/wayfarer-large-70b-llama-3.3" | |
| - "allenai/olmo-3-32b-think" | |
| - "liquid/lfm-3b" | |
| - "liquid/lfm-40b" | |
| - "liquid/lfm-7b" | |
| - "mancer/weaver" | |
| - "meta-llama/llama-2-13b-chat" | |
| - "meta-llama/llama-2-70b-chat" | |
| - "meta-llama/llama-3-70b-instruct" | |
| - "meta-llama/llama-3.1-405b" | |
| - "meta-llama/llama-3.1-405b-instruct" | |
| - "meta-llama/llama-3.1-70b-instruct" | |
| - "meta-llama/llama-3.2-90b-vision-instruct" | |
| - "meta-llama/llama-3.3-70b-instruct" | |
| - "meta-llama/llama-guard-2-8b" | |
| - "meta-llama/llama-guard-3-8b" | |
| - "microsoft/phi-3.5-mini-128k-instruct" | |
| - "microsoft/phi-4" | |
| - "microsoft/phi-4-multimodal-instruct" | |
| - "microsoft/wizardlm-2-7b" | |
| - "microsoft/wizardlm-2-8x22b" | |
| - "minimax/minimax-01" | |
| - "mistralai/codestral-2501" | |
| - "mistralai/codestral-mamba" | |
| - "mistralai/ministral-3b" | |
| - "mistralai/ministral-8b" | |
| - "mistralai/mistral-large" | |
| - "mistralai/mistral-large-2407" | |
| - "mistralai/mistral-large-2411" | |
| - "mistralai/mistral-medium" | |
| - "mistralai/mistral-saba" | |
| - "mistralai/mistral-small" | |
| - "mistralai/mistral-tiny" | |
| - "mistralai/mixtral-8x22b-instruct" | |
| - "mistralai/mixtral-8x7b" | |
| - "mistralai/mixtral-8x7b-instruct" | |
| - "mistralai/pixtral-12b" | |
| - "mistralai/pixtral-large-2411" | |
| - "neversleep/llama-3-lumimaid-70b" | |
| - "neversleep/llama-3.1-lumimaid-70b" | |
| - "neversleep/llama-3.1-lumimaid-8b" | |
| - "neversleep/noromaid-20b" | |
| - "nothingiisreal/mn-celeste-12b" | |
| - "nousresearch/hermes-4-405b" | |
| - "nousresearch/hermes-2-pro-llama-3-8b" | |
| - "nousresearch/hermes-3-llama-3.1-405b" | |
| - "nousresearch/hermes-3-llama-3.1-70b" | |
| - "nousresearch/nous-hermes-2-mixtral-8x7b-dpo" | |
| - "nousresearch/nous-hermes-llama2-13b" | |
| - "nvidia/llama-3.1-nemotron-70b-instruct" | |
| - "openai/gpt-5" | |
| - "openai/gpt-5-chat" | |
| - "openai/gpt-5-codex" | |
| - "openai/gpt-4o-audio" | |
| - "openai/gpt-5-nano" | |
| - "openai/gpt-5-mini" | |
| - "openai/chatgpt-4o-latest" | |
| - "openai/gpt-4o" | |
| - "openai/gpt-4o-mini" | |
| - "openai/gpt-4o-mini-search-preview" | |
| - "openai/gpt-4o-search-preview" | |
| - "openai/gpt-4.1" | |
| - "openai/gpt-4.1-nano" | |
| - "openai/o1" | |
| - "openai/o1-mini" | |
| - "openai/o1-pro" | |
| - "openai/o3-mini" | |
| - "openai/o3-mini-high" | |
| - "openai/o4-mini" | |
| - "openai/o4-mini-high" | |
| - "openai/codex-mini" | |
| - "openrouter/auto" | |
| - "perplexity/llama-3.1-sonar-large-128k-online" | |
| - "perplexity/llama-3.1-sonar-small-128k-online" | |
| - "perplexity/r1-1776" | |
| - "perplexity/sonar" | |
| - "perplexity/sonar-deep-research" | |
| - "perplexity/sonar-pro" | |
| - "perplexity/sonar-reasoning" | |
| - "perplexity/sonar-reasoning-pro" | |
| - "pygmalionai/mythalion-13b" | |
| - "qwen/qwen3-vl-235b-a22b-thinking" | |
| - "qwen/qwen-max" | |
| - "qwen/qwen3-coder-plus" | |
| - "qwen/tongyi-deepresearch-30b-a3b" | |
| - "qwen/qwen3-coder-flash" | |
| - "qwen/qwen3-next-80b-a3b-thinking" | |
| - "qwen/qwen-plus-0728" | |
| - "qwen/qwen3-coder-30b-a3b-instruct" | |
| - "qwen/qwen3-235b-a22b" | |
| - "qwen/qwen3-32b" | |
| - "qwen/qwen3-14b" | |
| - "qwen/qwen3-30b-a3b" | |
| - "qwen/qwen3-1.7b" | |
| - "qwen/qwen-2-72b-instruct" | |
| - "qwen/qwen-2-7b-instruct" | |
| - "qwen/qwen-2.5-72b-instruct" | |
| - "qwen/qwen-2.5-7b-instruct" | |
| - "qwen/qwen-2.5-coder-32b-instruct" | |
| - "qwen/qwen-2.5-vl-72b-instruct" | |
| - "qwen/qwen-2.5-vl-7b-instruct" | |
| - "qwen/qwen-plus" | |
| - "qwen/qwen-turbo" | |
| - "qwen/qwen-vl-max" | |
| - "qwen/qwen-vl-plus" | |
| - "qwen/qwen2.5-32b-instruct" | |
| - "qwen/qwq-32b" | |
| - "qwen/qwq-32b-preview" | |
| - "raifle/sorcererlm-8x22b" | |
| - "sao10k/fimbulvetr-11b-v2" | |
| - "sao10k/l3-euryale-70b" | |
| - "sao10k/l3-lunaris-8b" | |
| - "sao10k/l3.1-70b-hanami-x1" | |
| - "sao10k/l3.1-euryale-70b" | |
| - "sao10k/l3.3-euryale-70b" | |
| - "sophosympatheia/midnight-rose-70b" | |
| - "steelskull/l3.3-electra-r1-70b" | |
| - "teknium/openhermes-2.5-mistral-7b" | |
| - "thedrummer/anubis-70b-v1.1" | |
| - "thedrummer/anubis-pro-105b-v1" | |
| - "thedrummer/rocinante-12b" | |
| - "thedrummer/skyfall-36b-v2" | |
| - "thedrummer/unslopnemo-12b" | |
| - "tokyotech-llm/llama-3.1-swallow-70b-instruct-v0.3" | |
| - "ui-tars/ui-tars-7b" | |
| - "undi95/remm-slerp-l2-13b" | |
| - "undi95/toppy-m-7b" | |
| - "xwin-lm/xwin-lm-70b" | |
| - "z-ai/glm-4.6" | |
| - "black-forest-labs/flux.2-pro" | |
| - "black-forest-labs/flux.2-flex" | |
| fetch: false | |
| dropParams: | |
| - "stop" | |
| titleConvo: true | |
| titleModel: "google/gemini-2.5-flash" | |
| summarize: true | |
| summaryModel: "google/gemini-2.5-flash" | |
| forcePrompt: false | |
| modelDisplayLabel: "OpenRouter" | |
| - name: "Perplexity" | |
| apiKey: "${PERPLEXITY_API_KEY}" | |
| baseURL: "https://api.perplexity.ai/" | |
| models: | |
| default: | |
| - "perplexity/r1-1776" | |
| - "perplexity/sonar" | |
| - "perplexity/sonar-deep-research" | |
| - "perplexity/sonar-pro" | |
| - "perplexity/sonar-reasoning" | |
| - "perplexity/sonar-reasoning-pro" | |
| - "perplexity/llama-3.1-sonar-large-128k-online" | |
| - "perplexity/llama-3.1-sonar-small-128k-online" | |
| fetch: false | |
| titleConvo: true | |
| titleModel: "perplexity/llama-3.1-sonar-small-128k-online" | |
| summarize: false | |
| summaryModel: "perplexity/llama-3.1-sonar-small-128k-online" | |
| forcePrompt: false | |
| dropParams: | |
| - "stop" | |
| - "frequency_penalty" | |
| modelDisplayLabel: "Perplexity" | |
| memory: | |
| disabled: false | |
| validKeys: | |
| - "user_preferences" | |
| - "conversation_context" | |
| - "learned_facts" | |
| - "personal_information" | |
| - "project_information" | |
| - "research_context" | |
| - "scientific_research_plan_information" | |
| - "software_engineering_stack" | |
| tokenLimit: 10000 | |
| personalize: true | |
| messageWindowSize: 9 | |
| agent: | |
| provider: "ModelHub" | |
| model: "google/gemini-2.5-flash" | |
| instructions: | | |
| Here are the expanded instructions for storing memory, covering all the specified validKeys. Store memory using only the specified validKeys... | |
| model_parameters: | |
| temperature: 0.2 | |
| max_tokens: 8000 | |
| top_p: 0.8 | |
| frequency_penalty: 0.2 | |
| webSearch: | |
| serperApiKey: "${SERPER_API_KEY}" | |
| searchProvider: "serper" | |
| firecrawlApiKey: "${FIRECRAWL_API_KEY}" | |
| firecrawlApiUrl: "${FIRECRAWL_API_URL}" | |
| scraperType: "firecrawl" | |
| jinaApiKey: "${JINA_API_KEY}" | |
| cohereApiKey: "${COHERE_API_KEY}" | |
| rerankerType: "jina" | |
| scraperTimeout: 30000 | |
| safeSearch: 0 | |
| speech: | |
| stt: | |
| openai: | |
| apiKey: "${OPENAI_API_KEY}" | |
| model: "whisper-1" | |
| tts: | |
| elevenlabs: | |
| apiKey: "${ELEVENLABS_API_KEY}" | |
| model: "eleven_multilingual_v2" | |
| voices: ["kcQkGnn0HAT2JRDQ4Ljp", "L0yTtpRXzdyzQlzALhgD"] | |
| # openai: | |
| # apiKey: "${OPENAI_API_KEY}" | |
| # model: "tts-1" | |
| # voices: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] | |
| speechTab: | |
| conversationMode: true | |
| advancedMode: false | |
| speechToText: | |
| engineSTT: "external" | |
| languageSTT: "English (US)" | |
| autoTranscribeAudio: true | |
| decibelValue: -45 | |
| autoSendText: 0 | |
| textToSpeech: | |
| engineTTS: "external" | |
| voice: "alloy" | |
| languageTTS: "en" | |
| automaticPlayback: true | |
| playbackRate: 1.0 | |
| cacheTTS: true | |
| mcpServers: | |
| slr_mcp: | |
| type: "websocket" | |
| url: "ws://slr-mcp:9009" | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment