Skip to content

Commit 7fa8b3b

Browse files
authored
Add a symlink for llama_models (#201)
* Add a symlink for llama_models * Update README.md paths
1 parent e517d3f commit 7fa8b3b

7 files changed

+17
-16
lines changed

README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -53,15 +53,15 @@ You need to install the following dependencies (in addition to the `requirements
5353
pip install torch fairscale fire blobfile
5454
```
5555

56-
After installing the dependencies, you can run the example scripts (within `models/scripts/` sub-directory) as follows:
56+
After installing the dependencies, you can run the example scripts (within `llama_models/scripts/` sub-directory) as follows:
5757
```bash
5858
#!/bin/bash
5959

6060
CHECKPOINT_DIR=~/.llama/checkpoints/Meta-Llama3.1-8B-Instruct
61-
PYTHONPATH=$(git rev-parse --show-toplevel) torchrun models/scripts/example_chat_completion.py $CHECKPOINT_DIR
61+
PYTHONPATH=$(git rev-parse --show-toplevel) torchrun llama_models/scripts/example_chat_completion.py $CHECKPOINT_DIR
6262
```
6363

64-
The above script should be used with an Instruct (Chat) model. For a Base model, use the script `models/scripts/example_text_completion.py`. Note that you can use these scripts with both Llama3 and Llama3.1 series of models.
64+
The above script should be used with an Instruct (Chat) model. For a Base model, use the script `llama_models/scripts/example_text_completion.py`. Note that you can use these scripts with both Llama3 and Llama3.1 series of models.
6565

6666
For running larger models with tensor parallelism, you should modify as:
6767
```bash
@@ -70,7 +70,7 @@ For running larger models with tensor parallelism, you should modify as:
7070
NGPUS=8
7171
PYTHONPATH=$(git rev-parse --show-toplevel) torchrun \
7272
--nproc_per_node=$NGPUS \
73-
models/scripts/example_chat_completion.py $CHECKPOINT_DIR \
73+
llama_models/scripts/example_chat_completion.py $CHECKPOINT_DIR \
7474
--model_parallel_size $NGPUS
7575
```
7676

llama_models

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
models

models/scripts/example_chat_completion.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@
1212

1313
import fire
1414

15-
from models.llama3.api.datatypes import (
15+
from llama_models.llama3.api.datatypes import (
1616
CompletionMessage,
1717
StopReason,
1818
SystemMessage,
1919
UserMessage,
2020
)
2121

22-
from models.llama3.reference_impl.generation import Llama
22+
from llama_models.llama3.reference_impl.generation import Llama
2323

2424

2525
def run_main(

models/scripts/example_text_completion.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@
1111
from typing import Optional
1212

1313
import fire
14-
from termcolor import cprint
1514

16-
from models.llama3.reference_impl.generation import Llama
15+
from llama_models.llama3.reference_impl.generation import Llama
16+
from termcolor import cprint
1717

1818

1919
def run_main(

models/scripts/multimodal_example_chat_completion.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212

1313
import fire
1414

15-
from PIL import Image as PIL_Image
15+
from llama_models.llama3.api.datatypes import ImageMedia, UserMessage
1616

17-
from models.llama3.api.datatypes import ImageMedia, UserMessage
17+
from llama_models.llama3.reference_impl.generation import Llama
1818

19-
from models.llama3.reference_impl.generation import Llama
19+
from PIL import Image as PIL_Image
2020

2121

2222
def run_main(

models/scripts/multimodal_example_text_completion.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -12,12 +12,12 @@
1212

1313
import fire
1414

15-
from PIL import Image as PIL_Image
16-
from termcolor import cprint
15+
from llama_models.llama3.api.datatypes import ImageMedia
1716

18-
from models.llama3.api.datatypes import ImageMedia
17+
from llama_models.llama3.reference_impl.generation import Llama
1918

20-
from models.llama3.reference_impl.generation import Llama
19+
from PIL import Image as PIL_Image
20+
from termcolor import cprint
2121

2222

2323
def run_main(

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ def read_requirements():
3737
long_description=open("README.md").read(),
3838
long_description_content_type="text/markdown",
3939
url="https://github.com/meta-llama/llama-models",
40-
package_dir={"llama_models": "models"},
40+
package_dir={"llama_models": "llama_models"},
4141
classifiers=[],
4242
python_requires=">=3.10",
4343
install_requires=read_requirements(),

0 commit comments

Comments
 (0)