https://hub.docker.com/r/localai/localai

https://github.com/mudler/LocalAI/releases


docker run -p 8080:8080 --name local-ai -ti localai/localai:latest-aio-cpu

docker run -d -p 8080:8080 --gpus all --name localai  --hostname  localai  --restart=always --network mgr  --ip 172.18.0.41  -v /etc/localtime:/etc/localtime:ro  --privileged=true --user=root   -e TZ='Asia/Shanghai'   --ulimit nofile=262144:262144   -v  /data/file:/data/file/  -v /data/site/docker/data/local-ai:/models   --models-path /models  localai/localai:master-aio-gpu-nvidia-cuda-12


docker pull  localai/localai:master-aio-gpu-nvidia-cuda-12

docker save localai/localai:master-aio-gpu-nvidia-cuda-12  | gzip > /data/site/htmltoo.f/htmltoo.up/soft/docker.tar/localai-cuda12-2.20.1.tar.gz


docker exec -it  localai  /bin/bash

-

local-ai run llama3.1-8b-chinese-chat


-install only the model

local-ai models install llama3.1-8b-chinese-chat

local-ai models install flux.1-schnell

local-ai models install dreamshaper

local-ai models install voice-zh_CN-huayan-medium

local-ai models install whisper-small-q5_1

-

local-ai run huggingface://TheBloke/phi-2-GGUF/phi-2.Q8_0.gguf

local-ai run https://gist.githubusercontent.com/.../phi-2.yaml


签名:这个人很懒,什么也没有留下!
最新回复 (0)
返回