欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 新闻 > 会展 > linux内网部署deepseek大模型(ollama+anythingllm)

linux内网部署deepseek大模型(ollama+anythingllm)

2025/2/20 17:13:36 来源:https://blog.csdn.net/qq_40744423/article/details/145603955  浏览:    关键词:linux内网部署deepseek大模型(ollama+anythingllm)

一、安装ollama

来源:ollama/docs/linux.md at main · ollama/ollama · GitHub

1.下载安装包

ollama下载链接:https://ollama.com/download/ollama-linux-amd64.tgz,下载后放在用户目录下

2.运行脚本

#!/bin/sh
# This script installs Ollama on Linux.
# It detects the current operating system architecture and installs the appropriate version of Ollama.set -eured="$( (/usr/bin/tput bold || :; /usr/bin/tput setaf 1 || :) 2>&-)"
plain="$( (/usr/bin/tput sgr0 || :) 2>&-)"status() { echo ">>> $*" >&2; }
error() { echo "${red}ERROR:${plain} $*"; exit 1; }
warning() { echo "${red}WARNING:${plain} $*"; }TEMP_DIR=$(mktemp -d)
cleanup() { rm -rf $TEMP_DIR; }
trap cleanup EXITavailable() { command -v $1 >/dev/null; }
require() {local MISSING=''for TOOL in $*; doif ! available $TOOL; thenMISSING="$MISSING $TOOL"fidoneecho $MISSING
}[ "$(uname -s)" = "Linux" ] || error 'This script is intended to run on Linux only.'ARCH=$(uname -m)
case "$ARCH" inx86_64) ARCH="amd64" ;;aarch64|arm64) ARCH="arm64" ;;*) error "Unsupported architecture: $ARCH" ;;
esacIS_WSL2=falseKERN=$(uname -r)
case "$KERN" in*icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2=true;;*icrosoft) error "Microsoft WSL1 is not currently supported. Please use WSL2 with 'wsl --set-version <distro> 2'" ;;*) ;;
esacVER_PARAM="${OLLAMA_VERSION:+?version=$OLLAMA_VERSION}"SUDO=
if [ "$(id -u)" -ne 0 ]; then# Running as root, no need for sudoif ! available sudo; thenerror "This script requires superuser permissions. Please re-run as root."fiSUDO="sudo"
fiNEEDS=$(require awk grep sed tee xargs)
if [ -n "$NEEDS" ]; thenstatus "ERROR: The following tools are required but missing:"for NEED in $NEEDS; doecho "  - $NEED"doneexit 1
fifor BINDIR in /usr/local/bin /usr/bin /bin; doecho $PATH | grep -q $BINDIR && break || continue
done
OLLAMA_INSTALL_DIR=$(dirname ${BINDIR})if [ -d "$OLLAMA_INSTALL_DIR/lib/ollama" ] ; thenstatus "Cleaning up old version at $OLLAMA_INSTALL_DIR/lib/ollama"$SUDO rm -rf "$OLLAMA_INSTALL_DIR/lib/ollama"
fi
status "Installing ollama to $OLLAMA_INSTALL_DIR"
$SUDO install -o0 -g0 -m755 -d $BINDIR
$SUDO install -o0 -g0 -m755 -d "$OLLAMA_INSTALL_DIR"
status "Downloading Linux ${ARCH} bundle"
#curl --fail --show-error --location --progress-bar \
#    "https://ollama.com/download/ollama-linux-${ARCH}.tgz${VER_PARAM}" | \
$SUDO tar -xzf ollama-linux-amd64.tgz -C "$OLLAMA_INSTALL_DIR"
if [ "$OLLAMA_INSTALL_DIR/bin/ollama" != "$BINDIR/ollama" ] ; thenstatus "Making ollama accessible in the PATH in $BINDIR"$SUDO ln -sf "$OLLAMA_INSTALL_DIR/ollama" "$BINDIR/ollama"
fi# Check for NVIDIA JetPack systems with additional downloads
if [ -f /etc/nv_tegra_release ] ; thenif grep R36 /etc/nv_tegra_release > /dev/null ; thenstatus "Downloading JetPack 6 components"#curl --fail --show-error --location --progress-bar \#    "https://ollama.com/download/ollama-linux-${ARCH}-jetpack6.tgz${VER_PARAM}" | \$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"elif grep R35 /etc/nv_tegra_release > /dev/null ; thenstatus "Downloading JetPack 5 components"#curl --fail --show-error --location --progress-bar \#    "https://ollama.com/download/ollama-linux-${ARCH}-jetpack5.tgz${VER_PARAM}" | \$SUDO tar -xzf - -C "$OLLAMA_INSTALL_DIR"elsewarning "Unsupported JetPack version detected.  GPU may not be supported"fi
fiinstall_success() {status 'The Ollama API is now available at 127.0.0.1:11434.'status 'Install complete. Run "ollama" from the command line.'
}
trap install_success EXIT# Everything from this point onwards is optional.
configure_systemd() {if ! id ollama >/dev/null 2>&1; thenstatus "Creating ollama user..."$SUDO useradd -r -s /bin/false -U -m -d /usr/share/ollama ollamafiif getent group render >/dev/null 2>&1; thenstatus "Adding ollama user to render group..."$SUDO usermod -a -G render ollamafiif getent group video >/dev/null 2>&1; thenstatus "Adding ollama user to video group..."$SUDO usermod -a -G video ollamafistatus "Adding current user to ollama group..."$SUDO usermod -a -G ollama $(whoami)status "Creating ollama systemd service..."cat <<EOF | $SUDO tee /etc/systemd/system/ollama.service >/dev/null
[Unit]
Description=Ollama Service
After=network-online.target[Service]
ExecStart=$BINDIR/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
Environment="PATH=$PATH"[Install]
WantedBy=default.target
EOFSYSTEMCTL_RUNNING="$(systemctl is-system-running || true)"case $SYSTEMCTL_RUNNING inrunning|degraded)status "Enabling and starting ollama service..."$SUDO systemctl daemon-reload$SUDO systemctl enable ollamastart_service() { $SUDO systemctl restart ollama; }trap start_service EXIT;;*)warning "systemd is not running"if [ "$IS_WSL2" = true ]; thenwarning "see https://learn.microsoft.com/en-us/windows/wsl/systemd#how-to-enable-systemd to enable it"fi;;esac
}if available systemctl; thenconfigure_systemd
fi# WSL2 only supports GPUs via nvidia passthrough
# so check for nvidia-smi to determine if GPU is available
if [ "$IS_WSL2" = true ]; thenif available nvidia-smi && [ -n "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; thenstatus "Nvidia GPU detected."fiinstall_successexit 0
fi# Don't attempt to install drivers on Jetson systems
if [ -f /etc/nv_tegra_release ] ; thenstatus "NVIDIA JetPack ready."install_successexit 0
fi# Install GPU dependencies on Linux
if ! available lspci && ! available lshw; thenwarning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."exit 0
ficheck_gpu() {# Look for devices based on vendor ID for NVIDIA and AMDcase $1 inlspci)case $2 innvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;;amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ;;esac ;;lshw)case $2 innvidia) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[10DE\]' || return 1 ;;amdgpu) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[1002\]' || return 1 ;;esac ;;nvidia-smi) available nvidia-smi || return 1 ;;esac
}if check_gpu nvidia-smi; thenstatus "NVIDIA GPU installed."exit 0
fi

3.验证是否安装成功

ollama -v

在这里插入图片描述

二、ollama加载本地模型文件

1.下载gguf格式模型

下载地址:DeepSeek-R1-Distill-Qwen-1.5B-GGUF · 模型库 (modelscope.cn)

2.在ollama中添加模型

准备本地Modelfile文件:

vi deepseek-r1.mf

内容如下:

FROM后面是模型路径

FROM /root/deepseek/models/DeepSeek-R1-Distill-Qwen-1.5B-Q8_0.ggufTEMPLATE """{{ if .System }}<|im_start|>system
{{ .System }}<|im_end|>{{ end }}<|im_start|>user
{{ .Prompt }}<|im_end|>
<|im_start|>assistant
"""
PARAMETER stop "<|im_start|>"
PARAMETER stop "<|im_end|>"

这里可以多写多个FROM,加载多个模型。

加载模型:

ollama create deepseek-r1-1.5B -f deepseek-r1.mf

3.运行模型

ollama run DeepSeek-R1-1.5B

三、使用docker安装anythingllm

1.把镜像转移到内网

先下载镜像再打包:

docker pull --platform linux/arm64 mintplexlabs/anythingllm
docker save mintplexlabs/anythingllm -o /opt/anythingllm.tar

拷到内网,加载镜像:

docker load -i /opt/anythingllm.tar

2.启动anythingllm

创建docker要映射到本地的文件夹:

export STORAGE_LOCATION=$HOME/anythingllm && \
mkdir -p $STORAGE_LOCATION && \
touch "$STORAGE_LOCATION/.env" && \

运行docker服务:

docker run -d -p 3001:3001 \
--name anythingllm \
--cap-add SYS_ADMIN \
-v ${STORAGE_LOCATION}:/app/server/storage \
-v ${STORAGE_LOCATION}/.env:/app/server/.env \
-e STORAGE_DIR="/app/server/storage" \
mintplexlabs/anythingllm

查看log确认docker服务启动成功:

docker logs -f anythingllm

如果日志中出现报错:Assertion (0)==(uv_thread_create(t.get(), start_thread, this))failed

(1)停止删除docker容器:

docker stop anythingllm
docker rm anythingllm

(2)用以下命令启动anythingllm容器,这里比官方的多了个--privileged=true

docker run -d -p 3001:3001 \
--name anythingllm \
--privileged=true \
--cap-add SYS_ADMIN \
-v ${STORAGE_LOCATION}:/app/server/storage \
-v ${STORAGE_LOCATION}/.env:/app/server/.env \
-e STORAGE_DIR="/app/server/storage" \
mintplexlabs/anythingllm

3.访问anythingllm

访问 http://localhost:3001,正常出现页面即为成功。

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com

热搜词