Update README.md
Browse files
README.md
CHANGED
|
@@ -182,7 +182,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 182 |
device_map="auto",
|
| 183 |
)
|
| 184 |
|
| 185 |
-
prompt = "Who would
|
| 186 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 187 |
|
| 188 |
outputs = model.generate(
|
|
@@ -208,8 +208,7 @@ sampling_params = SamplingParams(
|
|
| 208 |
max_tokens=32768,
|
| 209 |
)
|
| 210 |
|
| 211 |
-
prompt = "Who would
|
| 212 |
-
|
| 213 |
outputs = llm.generate(prompt, sampling_params)
|
| 214 |
print(outputs[0].outputs[0].text)
|
| 215 |
```
|
|
|
|
| 182 |
device_map="auto",
|
| 183 |
)
|
| 184 |
|
| 185 |
+
prompt = "Who would win in a fight - a dinosaur or a cow named MooMoo?"
|
| 186 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 187 |
|
| 188 |
outputs = model.generate(
|
|
|
|
| 208 |
max_tokens=32768,
|
| 209 |
)
|
| 210 |
|
| 211 |
+
prompt = "Who would win in a fight - a dinosaur or a cow named MooMoo?"
|
|
|
|
| 212 |
outputs = llm.generate(prompt, sampling_params)
|
| 213 |
print(outputs[0].outputs[0].text)
|
| 214 |
```
|