Skip to content

Commit 908f8c6

Browse files
committed
Break: Argument name and versioning
1 parent 5e0c5ec commit 908f8c6

File tree

3 files changed

+15
-7
lines changed

3 files changed

+15
-7
lines changed

.github/workflows/package.json

+8
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@
2222
"tag": "Add",
2323
"release": "minor"
2424
},
25+
{
26+
"tag": "Break",
27+
"release": "major"
28+
},
2529
{
2630
"tag": "Improve",
2731
"release": "patch"
@@ -46,6 +50,10 @@
4650
"tag": "Add",
4751
"release": "minor"
4852
},
53+
{
54+
"tag": "Break",
55+
"release": "major"
56+
},
4957
{
5058
"tag": "Improve",
5159
"release": "patch"

README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,8 @@ decoded_text = processor.batch_decode(output[:, prompt_len:])[0]
141141

142142
The generative models can be used for chat-like experiences, where the user can provide both text and images as input.
143143
To use that feature, you can start with the following CLI command:
144-
145-
```bash
144+
image
145+
```bashimage
146146
uform-chat --model unum-cloud/uform-gen-chat --image_path=zebra.jpg
147147
uform-chat --model unum-cloud/uform-gen-chat --image_path=zebra.jpg --device="cuda:0" --fp16
148148
```

src/uform/chat.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ def parse_args():
1414
parser = ArgumentParser(description="Chat with UForm generative model")
1515

1616
parser.add_argument("--model", type=str, default="unum-cloud/uform-gen-chat")
17-
parser.add_argument("--image_path", type=str, help="", required=True)
17+
parser.add_argument("--image", type=str, help="", required=True)
1818
parser.add_argument("--device", type=str, required=True)
19-
parser.add_argument("--fp16", action="store_true")
19+
parser.add_argument("--fp16", action="store_true")
2020

2121
return parser.parse_args()
2222

@@ -30,18 +30,18 @@ def run_chat(opts, model, processor):
3030

3131
messages = [{"role": "system", "content": "You are a helpful assistant."}]
3232
is_first_message = True
33-
if opts.image_path.startswith("http"):
33+
if opts.image.startswith("http"):
3434
image = (
3535
processor.image_processor(
36-
Image.open(requests.get(opts.image_path, stream=True).raw)
36+
Image.open(requests.get(opts.image, stream=True).raw)
3737
)
3838
.unsqueeze(0)
3939
.to(torch.bfloat16 if opts.fp16 else torch.float32)
4040
.to(opts.device)
4141
)
4242
else:
4343
image = (
44-
processor.image_processor(Image.open(opts.image_path))
44+
processor.image_processor(Image.open(opts.image))
4545
.unsqueeze(0)
4646
.to(torch.bfloat16 if opts.fp16 else torch.float32)
4747
.to(opts.device)

0 commit comments

Comments
 (0)