Skip to content

Instantly share code, notes, and snippets.

@colinrizzman
Last active November 6, 2025 13:24
Show Gist options
  • Select an option

  • Save colinrizzman/aea36674527d4708875bc680b6157cff to your computer and use it in GitHub Desktop.

Select an option

Save colinrizzman/aea36674527d4708875bc680b6157cff to your computer and use it in GitHub Desktop.
Bourne Again SHell (Bash) functions to streamline the use of llama.cpp
install_llamacpp()
{
sudo apt install -y build-essential git cmake ninja-build pkg-config vulkan-tools mesa-vulkan-drivers libvulkan-dev
sudo apt install -y glslc glslang-tools spirv-tools vulkan-tools
cd $HOME
git clone https://github.com/ggerganov/llama.cpp.git
cd llama.cpp
cmake -B build -DGGML_VULKAN=ON -DLLAMA_CURL=OFF -DCMAKE_BUILD_TYPE=Release -G Ninja
cmake --build build -j
}
function rebuild_llamacpp
{
cd $HOME/llama.cpp
rm -r build
git pull
cmake -B build -DGGML_VULKAN=ON -DLLAMA_CURL=OFF -DCMAKE_BUILD_TYPE=Release -G Ninja
cmake --build build -j
}
mdview()
{
if [ -z "$1" ]; then
echo "Usage: mdview <markdown-file>"
return 1
fi
python3 - <<'EOF' "$1"
import sys
from rich.console import Console
from rich.markdown import Markdown
from pathlib import Path
md_path = Path(sys.argv[1])
text = md_path.read_text(encoding="utf-8")
console = Console()
console.print(Markdown(text))
EOF
}
alias llama-cli='$HOME/llama.cpp/build/bin/llama-cli'
alias llama-server='$HOME/llama.cpp/build/bin/llama-server'
alias ggufunsplit='$HOME/llama.cpp/build/bin/llama-gguf-split --merge'
alias llamadev='$HOME/llama.cpp/build/bin/llama-cli --list-devices'
alias rmllamalogs='rm -r ~/.llamalogs'
alias gguf-uncensored='x-www-browser "https://huggingface.co/models?library=gguf&search=uncensored" "https://huggingface.co/models?library=gguf&search=abliterated" "https://huggingface.co/models?library=gguf&search=ablated"'
gguf()
{
if [ -z "$1" ]; then
x-www-browser "https://huggingface.co/models?library=gguf"
else
query="$*"
x-www-browser "https://huggingface.co/models?library=gguf&search=$query" \
"https://huggingface.co/models?pipeline_tag=text-generation&library=gguf&search=$query"
fi
}
llama() {
local ts safe1
ts=$(date +%s)
safe1=$(echo "$1" | sed 's/[^A-Za-z0-9._-]/_/g' | cut -b 1-64)
[ -d "$HOME/.llamalogs" ] || mkdir -p "$HOME/.llamalogs"
$HOME/llama.cpp/build/bin/llama-cli \
--offline \
-p "$1" \
"${@:2}" \
--log-file "$HOME/.llamalogs/${ts}_${safe1}.log"
echo "Log File: $HOME/.llamalogs/${ts}_${safe1}.log"
}
llamaf() {
local ts safe1
ts=$(date +%s)
safe1=$(echo "$1" | sed 's/[^A-Za-z0-9._-]/_/g' | cut -b 1-64)
[ -d "$HOME/.llamalogs" ] || mkdir -p "$HOME/.llamalogs"
$HOME/llama.cpp/build/bin/llama-cli \
--offline \
-f "$1" \
"${@:2}" \
--log-file "$HOME/.llamalogs/${ts}_${safe1}.log"
echo "Log File: $HOME/.llamalogs/${ts}_${safe1}.log"
}
llama1() {
local ts safe1
ts=$(date +%s)
safe1=$(echo "$1" | sed 's/[^A-Za-z0-9._-]/_/g' | cut -b 1-64)
[ -d "$HOME/.llamalogs" ] || mkdir -p "$HOME/.llamalogs"
$HOME/llama.cpp/build/bin/llama-cli \
--offline \
--single-turn \
--device none \
--cpu-strict 1 \
--threads 32 \
--mlock \
-p "$1" \
-m "$2" \
--log-file "$HOME/.llamalogs/${ts}_${safe1}.log"
echo "Log File: $HOME/.llamalogs/${ts}_${safe1}.log"
}
llama1f() {
local ts safe1
ts=$(date +%s)
safe1=$(echo "$1" | sed 's/[^A-Za-z0-9._-]/_/g' | cut -b 1-64)
[ -d "$HOME/.llamalogs" ] || mkdir -p "$HOME/.llamalogs"
$HOME/llama.cpp/build/bin/llama-cli \
--offline \
--single-turn \
--device none \
--cpu-strict 1 \
--threads 32 \
--mlock \
--ctx-size 8192 \
-f "$1" \
-m "$2" \
--log-file "$HOME/.llamalogs/${ts}_${safe1}.log"
echo "Log File: $HOME/.llamalogs/${ts}_${safe1}.log"
}
ff() {
local ts safe1
ts=$(date +%s)
safe1=$(echo "$*" | sed 's/[^A-Za-z0-9._-]/_/g' | cut -b 1-64)
[ -d "$HOME/.llamalogs" ] || mkdir -p "$HOME/.llamalogs"
[ -d "$HOME/.llamalogs/user" ] || mkdir -p "$HOME/.llamalogs/user"
$HOME/llama.cpp/build/bin/llama-cli \
--offline \
--device none \
--cpu-strict 1 \
--threads 32 \
--mlock \
-p "$*" \
-m "$HOME/.llms/Qwen3-30B-A3B-Instruct-2507-Q8_0.gguf" \
--log-file "$HOME/.llamalogs/user/${ts}_${safe1}.log"
echo "Log File: $HOME/.llamalogs/user/${ts}_${safe1}.log"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment