Read.me.txt
2.74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
一、cuda11.3容器启动过程
1、拷贝Dockerfile文件到任意磁盘目录,然后执行下面的命令
docker build -t nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda .
docker images
2、启动容器
打开镜像(常规模式--支持使用GPU)
docker run -i -t --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda /bin/bash
打开镜像(增强模式--支持使用GPU、映射目录、设置内存)
docker run -i -t -v /home/liguopu/:/guopu:rw --gpus all --shm-size 16G nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04 /bin/bash
测试环境(使用端口映射,把服务映射出去)
docker run -i -td --name metehuman --gpus -p 8000:8000 all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda /bin/bash
正式使用(8000端口为业务对外的服务端口,根据情况可以自行增加)
docker run -it --rm -p 8886:8888 -p 8000:8000 --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
docker run -itd -p 8886:8888 -p 8000:8000 --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
docker run -itd --name metehuman -p 8886:8888 -p 8000:8000 --gpus all nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
docker run --gpus '"device=vgpu,id=0"' -it --rm nvidia/cuda:11.0-base nvidia-smi
docker run -itd --name metehuman \
-p 8885:8888 -p 8001:8000 \
-e GRANT_SUDO=yes \
-e JUPYTER_ENABLE_LAB=yes \
--user root \
--gpus all \
nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
3、查看token
token=$(docker exec -it metehuman jupyter server list | grep -oP '(?<=token=)[a-zA-Z0-9]+')
echo $token
二、启动默认测试镜像
docker pull m11007322/cuda11.3.0-cudnn8-devel-ubuntu20.04-jupyterlab
docker run -it \
-d \
--gpus all \
-p 8887:8888 \
-p 8001:8000 \
--name metehuman2 \
--user root \
-e NB_USER="ubuntu" \
-e CHOWN_HOME=yes \
-e GRANT_SUDO=yes \
-w "/home/${NB_USER}" \
-v "$PWD":"/home/$USER/work" \
m11007322/cuda11.3.0-cudnn8-devel-ubuntu20.04-jupyterlab
三、启动jupter镜像测试
docker run -itd --name test \
-p 8886:8888 -p 8000:8000 \
-e GRANT_SUDO=yes \
-e JUPYTER_ENABLE_LAB=yes \
--user root \
--gpus '"device=vgpu,id=0"' \
nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04-jupyter-conda
docker run -it --name test --network=host --dns 8.8.8.8 --dns 8.8.4.4 --rm ubuntu
docker run -it --gpus all --network=host --rm registry.cn-hangzhou.aliyuncs.com/lipku/nerfstream:v1.3
四、查看容器IP
docker inspect bceda087524e | grep IPAddress
curl https://openai.api2d.net/v1/chat/completions \
-H 'Content-Type: application/json' \
-H 'Authorization: Bearer fk193752-RlcPi2mBQqPOU5u1F8SFkG2z0gtxD0HS' \
-d '{
"model": "gpt-3.5-turbo",
"messages": [{"role": "user", "content": "你好!给我讲个笑话。"}]
}'