Last active
September 2, 2025 16:05
-
-
Save freaker2k7/c7018cc6e3b32eb9d1e67de2c117fa9f to your computer and use it in GitHub Desktop.
Chat between AI's
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| This script facilitates a chat between multiple AI models, allowing them to take turns responding. | |
| It is designed to be run in a terminal environment on linux/macOS, but should work on windows as well. | |
| Finally, it saves the chat history to a file, by default `/tmp/chat_history_<timestamp>.txt`. | |
| # Prerequisites: | |
| - Python 3.7 or higher | |
| - Required Python packages: `pip install ollama` | |
| - Ollama server running: https://ollama.com/ | |
| - (Optional) Install `say` command on macOS if not installed, `spd-say` on linux (if not installed) or `ptts` on windows for voice output. | |
| # Usage: chat2chat.py [-h] [-d DESCRIPTION] | |
| [-f FILENAME] | |
| [-l TALKERS [TALKERS ...]] | |
| [-m MODELS [MODELS ...]] [-q] | |
| [-p PROMPT] [-r RATE] | |
| [-t TEMPERATURE] [-x MAX_TOKENS] | |
| `Ctrl + C` to stop the program. | |
| ## Options: | |
| -h, --help show this help message and exit | |
| -c COMMAND, --command COMMAND | |
| Say command to execute (default: say) | |
| -d DESCRIPTION, --description DESCRIPTION | |
| Description for the AI model (default: You are curious but like when | |
| others ask you questions as well. You answer very briefly!) | |
| -f FILENAME, --file FILENAME, --out FILENAME, --output FILENAME | |
| File to save chat history (default: /tmp/chat_history_<timestamp>.txt) | |
| -l TALKERS [TALKERS ...], --talkers TALKERS [TALKERS ...] | |
| Names of the talkers (default: ['Karen', 'Samantha']) | |
| -m MODELS [MODELS ...], --models MODELS [MODELS ...] | |
| Models to use (default: ['llama3.2']) | |
| -q, --quiet Suppress voice output (default: False) | |
| -p PROMPT, --prompt PROMPT | |
| Prompt for the AI model (default: Ask me any short random question.) | |
| -r RATE, --rate RATE Speech rate for voice output (words per minute) (default: 186) | |
| -t TEMPERATURE, --temp TEMPERATURE, --temperature TEMPERATURE | |
| Temperature for response randomness (default: 0.8) | |
| -x MAX_TOKENS, --max-tokens MAX_TOKENS | |
| Maximum tokens for response (in Kilobytes) (default: 16) | |
| # Example commands (all parameters are optional): | |
| 1. All defaults | |
| `python chat2chat.py` | |
| 2. Running on linux with `spd-say` with a custom model | |
| `ollama pull gemma3` | |
| `python chat2chat.py -m gemma3 -c spd-say -r 50 -l male1 female2` | |
| NOTE: For `spd-say`, please see https://manpages.ubuntu.com/manpages/questing/en/man1/spd-say.1.html | |
| 3. Running in quiet mode with many parameters | |
| `python chat2chat.py -c /usr/bin/say -d "You are a helpful assistant." -f /tmp/chat_history.txt -l Keren Daniel Samantha -m gemma3 llama3.2 -p "What is the capital of France?" -r 190 -t 0.7 -x 32 -q` | |
| 4. Running with `ptts` on windows and a bit of heat :) | |
| `python chat2chat.py -c ptts -t 0.9` | |
| NOTE: For `ptts`, please see https://jampal.sourceforge.net/ptts.html | |
| # License: WTFPL | |
| NOTE: If you don't have the `say` command, you must can use an alternative like `spd-say` on linux or `ptts` on windows, otherwise just run the command with the -q (quiet) flag. | |
| NOTE: To get the full list of voices for the talkers, run `say -v '?'` in your terminal, as for `spd-say`, you can run `spd-say -L` to list all available voices. For `ptts`, run the following in the powershell `cscript "C:\Program Files\Jampal\ptts.vbs"` | |
| NOTE: The script will exit if the response contains "bye" and less than 10 chars. | |
| """ | |
| import argparse | |
| import subprocess | |
| import sys | |
| from io import TextIOWrapper | |
| from random import choice, randint | |
| from re import S, sub | |
| from time import sleep, time | |
| from ollama import ChatResponse, chat | |
| from ollama import list as ollama_list | |
| from ollama import pull as ollama_pull | |
| def say( | |
| args: argparse.Namespace, | |
| file_handler: TextIOWrapper, | |
| turn: int, | |
| content: str, | |
| ): | |
| """ | |
| Speak the content using the specified voice and rate and save it to the file. | |
| """ | |
| role = args.talkers[turn] | |
| msg = f"- {role}: {content.strip()}\n" | |
| file_handler.write(f"{msg}\n") | |
| print(msg) | |
| if not args.quiet: | |
| if "spd-say" in args.command.lower(): # Linux | |
| subprocess.run([args.command, "-r", args.rate, "-t", role, content.strip()]) | |
| elif "ptts" in args.command.lower(): # Windows | |
| subprocess.run( | |
| ["echo", content.strip(), "|", "ptts", "-r", args.rate, "-voice", role] | |
| ) | |
| elif "say" in args.command.lower(): # macOS | |
| subprocess.run([args.command, "-r", args.rate, "-v", role, content.strip()]) | |
| else: | |
| raise ValueError(f"Unsupported command: {args.command}") | |
| def get_text(response: ChatResponse): | |
| """ | |
| Extract text from the response, removing unnecessary tags and emojis. | |
| """ | |
| # remove <think>...</think> tags | |
| text = sub(r"<think>.*</think>", "", response.message.content, flags=S).strip() | |
| # remove emojis | |
| text = sub(r"[^\w\s,.!?'-:;]", "", text) | |
| return text | |
| def clean_messages(args: argparse.Namespace, messages: list[dict]): | |
| """ | |
| When the size of messages exceeds the limit, remove messages 2 batches | |
| """ | |
| if sys.getsizeof(messages) > int(args.max_tokens) * 1024: # Limit to max_tokens | |
| return [messages[0], *messages[2 * len(args.talkers) :]] | |
| return messages | |
| if __name__ == "__main__": | |
| default_prompt = "Ask me any short random question." | |
| parser = argparse.ArgumentParser( | |
| description="AI's chat", formatter_class=argparse.ArgumentDefaultsHelpFormatter | |
| ) | |
| parser.add_argument( | |
| "-c", | |
| dest="command", | |
| type=str, | |
| default="say", | |
| help="Say command to execute", | |
| ) | |
| parser.add_argument( | |
| "-d", | |
| dest="description", | |
| type=str, | |
| default="You are curious but like when others ask you questions as well. You answer very briefly!", | |
| help="Description for the AI model", | |
| ) | |
| parser.add_argument( | |
| "-f", | |
| dest="filename", | |
| type=str, | |
| default=f"/tmp/chat_history_{int(time())}.txt", | |
| help="File to save chat history", | |
| ) | |
| parser.add_argument( | |
| "-l", | |
| dest="talkers", | |
| type=str, | |
| nargs="+", | |
| default=["Karen", "Samantha"], | |
| help="Names of the talkers", | |
| ) | |
| parser.add_argument( | |
| "-m", | |
| dest="models", | |
| type=str, | |
| nargs="+", | |
| default=["llama3.2"], | |
| help="Models to use", | |
| ) | |
| parser.add_argument( | |
| "-q", dest="quiet", action="store_true", help="Suppress voice output" | |
| ) | |
| parser.add_argument( | |
| "-p", | |
| dest="prompt", | |
| type=str, | |
| default=default_prompt, | |
| help="Prompt for the AI model", | |
| ) | |
| parser.add_argument( | |
| "-r", | |
| dest="rate", | |
| type=str, | |
| default="186", | |
| help="Speech rate for voice output (words per minute)", | |
| ) | |
| parser.add_argument( | |
| "-t", | |
| dest="temperature", | |
| type=float, | |
| default=0.8, | |
| help="Temperature for response randomness", | |
| ) | |
| parser.add_argument( | |
| "-x", | |
| dest="max_tokens", | |
| type=int, | |
| default=16, | |
| help="Maximum tokens for response (in Kilobytes)", | |
| ) | |
| # Parse command-line arguments | |
| args = parser.parse_args() | |
| # Initialize | |
| messages = [] | |
| iteration = 1 | |
| # Get available models. | |
| models = list(map(lambda m: m.model, ollama_list().models)) | |
| # If the requested models are not available, pull them. | |
| for model in args.models: | |
| _model = model if ":" in model else f"{model}:latest" | |
| if _model not in models: | |
| print(f"Pulling model: {_model}") | |
| ollama_pull(_model) | |
| if args.prompt != default_prompt: | |
| # If a prompt is specified, use it. | |
| text = args.prompt | |
| else: | |
| # Otherwise, generate a random prompt using the first model. | |
| response: ChatResponse = chat( | |
| model=args.models[0], | |
| messages=[ | |
| { | |
| "role": "user", | |
| "content": args.prompt, | |
| } | |
| ], | |
| options={ | |
| "seed": int(time()), | |
| "temperature": 1.0, | |
| }, | |
| ) | |
| text = get_text(response) | |
| # Prepare messages for each talker | |
| for i, talker in enumerate(args.talkers): | |
| messages.append( | |
| [ | |
| { | |
| "role": "system", | |
| "content": ( | |
| f"You are {talker}, a {randint(22, 55)} years old " | |
| f"{choice(['man', 'woman'])}! {args.description}" | |
| ), | |
| }, | |
| { | |
| "role": "user" if i else "assistant", | |
| "content": text, | |
| }, | |
| ] | |
| ) | |
| print(f"Chat history will be saved to {args.filename}") | |
| print("Speakers:") | |
| for msg in messages: | |
| print(f" - {msg[0]['content'].split('!')[0].replace('You are ', '').strip()}") | |
| print() # Print an empty line for better readability. | |
| with open(args.filename, "w") as f: | |
| say(args, f, 0, text) | |
| while True: | |
| try: | |
| # Get the current turn. | |
| turn = iteration % len(args.talkers) | |
| # Get the current message. | |
| response = chat( | |
| model=args.models[iteration % len(args.models)], | |
| messages=messages[turn], | |
| options={"temperature": float(args.temperature)}, | |
| ) | |
| # Get the clean response text. | |
| text = get_text(response) | |
| # Append the assistant's message. | |
| messages[turn].append({"role": "assistant", "content": text}) | |
| messages[turn] = clean_messages(args, messages[turn]) | |
| # Append the user's message for the next talkers. | |
| for i in range(1, len(args.talkers)): | |
| next_turn = (iteration + i) % len(args.talkers) | |
| messages[next_turn].append({"role": "user", "content": text}) | |
| messages[next_turn] = clean_messages(args, messages[next_turn]) | |
| # We do this every time instead of opening the file once, | |
| # because otherwise the file is empty until the end of the program | |
| # and we can't tail it if we want. | |
| with open(args.filename, "a") as f: | |
| say(args, f, turn, text) | |
| iteration += 1 | |
| # If the conversation comes to an end, then exit. | |
| if "bye" in text.lower() and len(text) < 10: | |
| break | |
| except KeyboardInterrupt as e: | |
| # Ctrl+C to stop the conversation. | |
| print(f"Exiting...") | |
| break | |
| print(f"Chat history was saved to {args.filename}") |
Author
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
I found it very amusing to listen to AI's chat 😄