Closed
Description
I prepare to use text-generation-webui,and enable CUBLAS; so I try build libllama.so
for myself. then I got this error:
[root@A12-213P llama.cpp]# LLAMA_CUBLAS=1 make libllama.so
I llama.cpp build info:
I UNAME_S: Linux
I UNAME_P: x86_64
I UNAME_M: x86_64
I CFLAGS: -I. -O3 -DNDEBUG -std=c11 -fPIC -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -pthread -march=native -mtune=native -DGGML_USE_CUBLAS -I/usr/local/cuda/include
I CXXFLAGS: -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar -pthread -march=native -mtune=native
I LDFLAGS: -lcublas_static -lculibos -lcudart_static -lcublasLt_static -lpthread -ldl -L/usr/local/cuda/lib64 -lrt
I CC: cc (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9)
I CXX: g++ (GCC) 11.2.1 20220127 (Red Hat 11.2.1-9)
g++ -I. -I./examples -O3 -DNDEBUG -std=c++11 -fPIC -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar -pthread -march=native -mtune=native -shared -fPIC -o libllama.so llama.o ggml.o ggml-cuda.o -lcublas_static -lculibos -lcudart_static -lcublasLt_static -lpthread -ldl -L/usr/local/cuda/lib64
/opt/rh/devtoolset-11/root/usr/libexec/gcc/x86_64-redhat-linux/11/ld: ggml-cuda.o: relocation R_X86_64_32 against `.bss' can not be used when making a shared object; recompile with -fPIC
collect2: error: ld returned 1 exit status
make: *** [Makefile:184:libllama.so] 错误 1
[root@A12-213P llama.cpp]#
Metadata
Metadata
Assignees
Labels
No labels