#!/bin/sh -e # shellcheck disable=SC1090,SC1091 FZF_COLOUR="bg+:8,fg:5,fg+:5,gutter:0,pointer:6,hl:6,hl+:6,border:0" LLAMA_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/chatllama" LLAMA_DATA="${XDG_DATA_HOME:-$HOME/.local/share}/chatllama" LLAMA_STATE="${XDG_STATE_HOME:-$HOME/.local/state}/chatllama" [ -f "$LLAMA_CONFIG/config" ] && . "$LLAMA_CONFIG/config" if ! find "$LLAMA_DATA/models/"*".gguf" 1>/dev/null 2>&1; then [ ! -d "$LLAMA_DATA/models" ] && mkdir -p "$LLAMA_DATA" printf "\033[1;31merror: \033[0mno models found in %s/models\n" "$LLAMA_DATA" exit 1 fi [ ! -d "$LLAMA_STATE" ] && mkdir -p "$LLAMA_STATE" cd "$LLAMA_STATE" # Get a user choice out of all available presets and models CHOICES="$(find "$LLAMA_CONFIG/presets/"* "$LLAMA_DATA/models/"* -printf "%f\n" | sed "s/@.*$//" | sort | uniq)" if [ -n "$DEFAULT_CHOICE" ] && echo "$CHOICES" | grep -Eq "$DEFAULT_CHOICE"; then CHOICES="$(printf "%s\n\n%s" "$DEFAULT_CHOICE" "$CHOICES" | awk '!x[$0]++')" fi CHOICE="$(echo "$CHOICES" | fzf --color="$FZF_COLOUR")" [ -z "$CHOICE" ] && exit 1 # Get a model name based on the choice if [ -f "$LLAMA_CONFIG/presets/$CHOICE" ]; then . "$LLAMA_CONFIG/presets/$CHOICE" if [ -n "$EXTENDS" ]; then . "$LLAMA_CONFIG/presets/$EXTENDS" . "$LLAMA_CONFIG/presets/$PRESET" fi fi [ -z "$MODEL" ] && MODEL="$CHOICE" # Get a quant and its options QUANTS="$(find "$LLAMA_DATA/models/$MODEL@"*".gguf" -exec basename {} \; | sed "s/^.*@//; s/\.gguf$//")" if [ -n "$DEFAULT_QUANT" ] && echo "$QUANTS" | grep -E "$DEFAULT_QUANT" 1>/dev/null 2>&1; then QUANTS="$(printf "%s\n\n%s" "$DEFAULT_QUANT" "$QUANTS" | awk '!x[$0]++')" fi if [ "$(echo "$QUANTS" | wc -l)" -gt 1 ]; then QUANT="$(echo "$QUANTS" | fzf --color="$FZF_COLOUR")" else QUANT="$QUANTS" fi [ -z "$QUANT" ] && exit 1 QUANT_OPTIONS_VARNAME="OPTIONS_$QUANT" QUANT_OPTIONS="$(eval "echo \"\$$QUANT_OPTIONS_VARNAME\"")" # Run llama.cpp MODEL_FILENAME="$MODEL@$QUANT.gguf" printf "\033[1;35m:: \033[0m\033[1mLoading %s \033[38;5;243m(%s)\033[0m\n" "$CHOICE" "$MODEL_FILENAME" # shellcheck disable=SC2086 llama-cli \ --color \ --conversation \ --ctx-size 0 \ --log-disable \ $DEFAULT_OPTIONS \ $OPTIONS \ $QUANT_OPTIONS \ --model "$LLAMA_DATA/models/$MODEL_FILENAME" \ "$@"