!5 add dockerfile to support building llama docker image
From: @zhoupengcheng11 Reviewed-by: @xiezhipeng1 Signed-off-by: @xiezhipeng1
This commit is contained in:
commit
3bb4617b3b
24
Dockerfile-llama
Normal file
24
Dockerfile-llama
Normal file
@ -0,0 +1,24 @@
|
||||
#Usage:
|
||||
#1.build image:
|
||||
# docker build -f Dockerfile-llama -t llama_image .
|
||||
#2.run image:
|
||||
# docker run -it --security-opt seccomp=unconfined llama_image:latest
|
||||
|
||||
#base image
|
||||
FROM openeuler/openeuler:22.03
|
||||
|
||||
#update openEuler2309 source and install chatglm
|
||||
RUN echo '[everything]' > /etc/yum.repos.d/openEuler.repo && \
|
||||
echo 'name=everything' >> /etc/yum.repos.d/openEuler.repo && \
|
||||
echo 'baseurl=http://121.36.84.172/dailybuild/EBS-openEuler-23.09/EBS-openEuler-23.09/everything/$basearch/' >> /etc/yum.repos.d/openEuler.repo && \
|
||||
echo 'enabled=1' >> /etc/yum.repos.d/openEuler.repo && \
|
||||
echo 'gpgcheck=0' >> /etc/yum.repos.d/openEuler.repo && \
|
||||
yum install -y llama.cpp wget
|
||||
|
||||
#download ggml model
|
||||
WORKDIR /model_path
|
||||
RUN wget -P /model_path https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin
|
||||
|
||||
# run ggml model
|
||||
CMD /usr/bin/llama_cpp_main -m /model_path/llama-2-13b-chat.ggmlv3.q4_0.bin --color --ctx_size 2048 -n -1 -ins -b 256 --top_k 10000 --temp 0.2 --repeat_penalty 1.1 -t 8
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
|
||||
Name: llama.cpp
|
||||
Version: 20230815
|
||||
Release: 1
|
||||
Release: 2
|
||||
License: MIT
|
||||
Summary: Port of English lagre model LLaMA implemented based on C/C++
|
||||
|
||||
@ -38,6 +38,9 @@ popd
|
||||
%{_libdir}/libembdinput.a
|
||||
|
||||
%changelog
|
||||
* Tue Sep 19 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-2
|
||||
- add dockerfile
|
||||
|
||||
* Wed Aug 16 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-1
|
||||
- Init package
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user