56 lines
1.4 KiB
RPMSpec
56 lines
1.4 KiB
RPMSpec
%define debug_package %{nil}
|
|
%global llama_commitid master-3ebb009
|
|
|
|
Name: llama.cpp
|
|
Version: 20230815
|
|
Release: 4
|
|
License: MIT
|
|
Summary: Port of English lagre model LLaMA implemented based on C/C++
|
|
|
|
URL: https://github.com/ggerganov/llama.cpp
|
|
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/tags/%{llama_commitid}.tar.gz
|
|
Patch0: add-loongarch64-support.patch
|
|
|
|
BuildRequires: gcc,gcc-c++,cmake
|
|
|
|
%description
|
|
Port of English lagre model LLaMA implemented based on C/C++,
|
|
it can be used for model dialogue based on local laptops.
|
|
|
|
%prep
|
|
%autosetup -b 0 -n %{name}-%{llama_commitid} -p1
|
|
|
|
%build
|
|
mkdir llama_builddir
|
|
pushd llama_builddir
|
|
cmake ..
|
|
%make_build
|
|
popd
|
|
|
|
%install
|
|
pushd llama_builddir
|
|
%make_install
|
|
mv %{buildroot}%{_prefix}/local/bin/main %{buildroot}%{_prefix}/local/bin/llama_cpp_main
|
|
mv %{buildroot}%{_prefix}/local/bin/convert.py %{buildroot}%{_prefix}/local/bin/llama_convert.py
|
|
mv %{buildroot}%{_prefix}/local/* %{buildroot}%{_prefix}
|
|
popd
|
|
|
|
%files
|
|
%{_bindir}/*
|
|
%{_libdir}/libembdinput.a
|
|
|
|
%changelog
|
|
* Tue May 14 2024 wangshuo <wangshuo@kylinos.cn> - 20230815-4
|
|
- add loongarch64 support
|
|
|
|
* Wed Sep 20 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-3
|
|
- rename /usr/bin/convert.py
|
|
- update long-term yum.repo in dockerfile
|
|
|
|
* Tue Sep 19 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-2
|
|
- add dockerfile
|
|
|
|
* Wed Aug 16 2023 zhoupengcheng <zhoupengcheng11@huawei.com> - 20230815-1
|
|
- Init package
|
|
|