Skip to content

Instantly share code, notes, and snippets.

@MoffKalast
Forked from jefftriplett/hubcap.py
Last active November 7, 2025 14:36
Show Gist options
  • Select an option

  • Save MoffKalast/6e98b2d615f4b7a7ceeccff27eab532e to your computer and use it in GitHub Desktop.

Select an option

Save MoffKalast/6e98b2d615f4b7a7ceeccff27eab532e to your computer and use it in GitHub Desktop.
Hubcap for Qwen3-Coder-30B-A3B
import json
import subprocess
import typer
from openai import OpenAI
from rich import print
from rich.markup import escape
# Configure for llama.cpp server
client = OpenAI(
base_url="http://localhost:8080/v1",
api_key="no-key-required"
)
SYSTEM = """You are a coding agent that can execute shell commands and ask the user for information using tool calls.
When you need to execute a shell command, use the following format:
<|tool_call_start|>
{"name": "execute_shell", "arguments": {"command": "<command>"}}
<|tool_call_end|>
When you need to ask the user for information, use the following format:
<|tool_call_start|>
{"name": "ask_user", "arguments": {"question": "<your question>"}}
<|tool_call_end|>
After seeing command output or user responses, analyze them and decide on next steps.
When you've completed the goal, or can't proceed further, use the following exit format:
<|tool_call_start|>
{"name": "finish", "arguments": {"result": "Task completed!"}}
<|tool_call_end|>"""
def execute_shell_command(command: str) -> dict:
"""Execute a shell command and return the result"""
try:
output = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=True,
text=True
)
return {
"return_code": 0,
"output": output
}
except subprocess.CalledProcessError as e:
return {
"return_code": e.returncode,
"output": e.output
}
def confirm_execution(command: str) -> bool:
"""Ask user to confirm command execution"""
print(f"[bold red]⚠️ About to execute:[/bold red] {command}")
response = input("Press ENTER to confirm, or 'n' to skip: ").strip().lower()
return response != 'n'
def ask_user(question: str) -> str:
"""Ask the user a question and return their response"""
print(f"[bold cyan]❓ Question:[/bold cyan] {question}")
response = input("[bold cyan]Your answer:[/bold cyan] ").strip()
return response
def parse_tool_call(text: str) -> dict | None:
"""Extract tool_call from text and return parsed JSON"""
start_marker = "<|tool_call_start|>"
end_marker = "<|tool_call_end|>"
start_idx = text.find(start_marker)
end_idx = text.find(end_marker)
if start_idx == -1 or end_idx == -1:
return None
# Extract JSON string
json_str = text[start_idx + len(start_marker):end_idx].strip()
try:
tool_call = json.loads(json_str)
return tool_call
except json.JSONDecodeError:
return None
def main(prompt: str):
messages = [
{"role": "system", "content": SYSTEM},
{"role": "user", "content": f"GOAL: {prompt}\n\nWhat is your plan and first step?"}
]
print(f"[blue][USER][/blue] {prompt}\n")
max_iterations = 20
iteration = 0
while iteration < max_iterations:
iteration += 1
# Call llama.cpp server
response = client.chat.completions.create(
model="qwen",
messages=messages,
temperature=0.7,
max_tokens=512
)
assistant_message = response.choices[0].message.content
messages.append({"role": "assistant", "content": assistant_message})
print(f"[yellow][ASSISTANT][/yellow] {assistant_message}\n")
# Parse tool call
tool_call = parse_tool_call(assistant_message)
if tool_call:
tool_name = tool_call.get("name")
tool_args = tool_call.get("arguments", {})
if tool_name == "ask_user":
question = tool_args.get("question")
if question:
print("[cyan]Processing user question...[/cyan]")
user_answer = ask_user(question)
feedback = f"User's answer: {escape(user_answer)}\n\nWhat's next?"
messages.append({"role": "user", "content": feedback})
else:
messages.append({"role": "user", "content": "Invalid ask_user tool call: missing 'question' argument"})
elif tool_name == "execute_shell":
command = tool_args.get("command")
if command:
print(f"[cyan]Executing command:[/cyan] {command}")
if not confirm_execution(command):
result = {
"return_code": -1,
"output": "Command execution cancelled by user"
}
else:
result = execute_shell_command(command)
print(f"[green][EXECUTED][/green] Return code: {result['return_code']}")
print(f"[dim]Output:[/dim]\n{escape(result['output'])}\n")
feedback = f"Command executed with return code {result['return_code']}.\n\nOutput:\n{escape(result['output'])}\n\nWhat's next?"
messages.append({"role": "user", "content": feedback})
else:
messages.append({"role": "user", "content": "Invalid execute_shell tool call: missing 'command' argument"})
elif tool_name == "finish":
message = escape(tool_args.get("result"))
print(f"[bold green]{message}[/bold green]")
break
else:
messages.append({"role": "user", "content": f"Unknown tool name '{tool_name}': use 'ask_user' or 'execute_shell'"})
else:
# No tool call found, ask for clarification
print("[yellow]No valid tool call found in response. Asking for next step...[/yellow]\n")
messages.append({"role": "user", "content": "Please provide the next action using the tool_call format, or output DONE if finished."})
if iteration >= max_iterations:
print("[bold red]⚠️ Maximum iterations reached[/bold red]")
if __name__ == "__main__":
typer.run(main)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment