Skip to content

Commit cdbfd3f

Browse files
committed
Updates for Ubuntu CPU
1 parent b2204bc commit cdbfd3f

File tree

4 files changed

+9
-5
lines changed

4 files changed

+9
-5
lines changed

install.sh

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,12 @@ if [ "$moduleInstallErrors" = "" ]; then
4848

4949
HF_HUB_DISABLE_SYMLINKS_WARNING=1
5050

51-
write "Looking for model: ${phi3_fileId} in ${phi3_folder}"
51+
write "Looking for model: ${phi3_fileId} in ${phi3_folder}..."
5252
if [ ! -d "${moduleDirPath}/${phi3_folder}/" ]; then
5353
write "downloading..."
5454
installPythonPackagesByName "huggingface-hub[cli]"
55-
huggingface-cli download ${phi3_fileId} --include ${phi3_folder}/* --local-dir .
55+
${venvPythonCmdPath} ${packagesDirPath}/huggingface_hub/commands/huggingface_cli.py download ${phi3_fileId} --include ${phi3_folder}\* --local-dir .
56+
# huggingface-cli download ${phi3_fileId} --include ${phi3_folder}/* --local-dir .
5657
writeLine "Done." "$color_success"
5758
else
5859
writeLine "${fileToGet} already downloaded." "$color_success"

multimode_llm_adapter.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,15 @@ def initialise(self) -> None:
2929
self.model_filename = None # "Phi-3-vision-128k-instruct.gguf"
3030
self.models_dir = "cuda-int4-rtn-block-32"
3131
else:
32+
print("*** Multi-modal LLM using CPU only: This module requires > 16Gb RAM")
3233
self.inference_device = "CPU"
3334
self.device = "cpu"
3435
self.inference_library = "ONNX"
3536
self.model_repo = "microsoft/Phi-3-vision-128k-instruct-onnx-cpu"
3637
self.model_filename = None # "Phi-3-vision-128k-instruct.gguf"
37-
self.models_dir = "pu-int4-rtn-block-32-acc-level-4"
38+
self.models_dir = "cpu-int4-rtn-block-32-acc-level-4"
3839
else:
40+
print("*** Multi-modal LLM using CPU only: This module requires > 16Gb RAM")
3941
# If only...
4042
# if self.system_info.cpu_vendor == 'Apple' and self.system_info.cpu_arch == 'arm64':
4143
# self.inference_device = "GPU"

requirements.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
#! Python3.7
1+
#! Python3.10
22

33
# For Phi-3 ONNX / CPU
44
numpy # Installing NumPy, a package for scientific computing
5+
--pre
56
onnxruntime-genai # Installing onnxruntime-genai, the ONNX Runtime generate() API
67

78
# torch # Installing PyTorch, an open source machine learning framework

requirements.windows.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#
2-
! Python3.7
2+
! Python3.10
33

44
# For Phi-3 ONNX / DirectML
55
numpy # Installing NumPy, a package for scientific computing

0 commit comments

Comments
 (0)