1- #! /usr/bin/env sh
1+ #! /usr/bin/env bash
2+ set -eu
3+
4+ if [ " ${EUID} " -eq 0 ]; then
5+ echo " This script should not be run as root."
6+ exit 1
7+ fi
8+
9+ SCRIPT_DIR=" $( cd " $( dirname " ${BASH_SOURCE[0]} " ) " & > /dev/null && pwd ) "
210
311# Nix is required for llama.cpp
412if ! command -v nix & > /dev/null; then
@@ -11,13 +19,28 @@ if ! command -v ollama &> /dev/null; then
1119 curl -fsSL https://ollama.com/install.sh | sh
1220fi
1321
14- nix profile install nixpkgs#llama-cpp --extra-experimental-features nix-command --extra-experimental-features flakes
22+ if ! command -v llama-server & > /dev/null; then
23+ echo " Installing llama.cpp."
24+ nix profile install nixpkgs#llama-cpp --extra-experimental-features nix-command --extra-experimental-features flakes
25+ fi
26+
27+ # if [ ! -f "${SCRIPT_DIR}/LM-Studio-"* ]; then
28+ # echo "Downloading LM Studio."
29+ # # This does not pick up the name properly.
30+ # curl -JLO "https://lmstudio.ai/download/latest/linux/x64" --output-dir "${SCRIPT_DIR}"
31+ # chmod +x "${SCRIPT_DIR}/LM-Studio-"*
32+ # fi
33+
34+ echo " Creating symlink."
35+ ln -f -s " ${SCRIPT_DIR} /lm-studio.desktop" " ${HOME} /.local/share/applications/lm-studio.desktop"
36+
37+ ollama --version
1538
1639which llama-server
1740llama-server --version
1841
1942which llama-cli
20- llama-cli -hf Qwen/Qwen2.5-7B-Instruct-GGUF
43+ # llama-cli -hf Qwen/Qwen2.5-7B-Instruct-GGUF
2144
2245llama-bench --list-devices
23- llama-bench --model ${HOME} /.cache/llama.cpp/Qwen_Qwen2.5-7B-Instruct-GGUF_qwen2.5-7b-instruct-q2_k.gguf
46+ # llama-bench --model ${HOME}/.cache/llama.cpp/Qwen_Qwen2.5-7B-Instruct-GGUF_qwen2.5-7b-instruct-q2_k.gguf
0 commit comments