Spaces:
Sleeping
Sleeping
minor
Browse files
app.py
CHANGED
|
@@ -11,15 +11,6 @@ login(token=token)
|
|
| 11 |
|
| 12 |
# Load your model and tokenizer
|
| 13 |
|
| 14 |
-
model, tokenizer = load("Rafii/f1llama")
|
| 15 |
-
|
| 16 |
-
prompt="hello"
|
| 17 |
-
|
| 18 |
-
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
|
| 19 |
-
messages = [{"role": "user", "content": prompt}]
|
| 20 |
-
prompt = tokenizer.apply_chat_template(
|
| 21 |
-
messages, tokenize=False, add_generation_prompt=True
|
| 22 |
-
)
|
| 23 |
|
| 24 |
# response = generate(model, tokenizer, prompt=prompt, verbose=True)
|
| 25 |
|
|
@@ -30,12 +21,22 @@ user_input = st.text_input("Enter text:")
|
|
| 30 |
|
| 31 |
|
| 32 |
if st.button("Submit"):
|
| 33 |
-
print("
|
| 34 |
st.write("mai print toh ho hi raha hu na bantai")
|
| 35 |
|
| 36 |
# Tokenize input and make predictions
|
| 37 |
# inputs = tokenizer(user_input, return_tensors="pt")
|
| 38 |
# outputs = model(**inputs)
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
st.write(response)
|
|
|
|
| 11 |
|
| 12 |
# Load your model and tokenizer
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
# response = generate(model, tokenizer, prompt=prompt, verbose=True)
|
| 16 |
|
|
|
|
| 21 |
|
| 22 |
|
| 23 |
if st.button("Submit"):
|
| 24 |
+
print("Habibi mereko dhundti kya")
|
| 25 |
st.write("mai print toh ho hi raha hu na bantai")
|
| 26 |
|
| 27 |
# Tokenize input and make predictions
|
| 28 |
# inputs = tokenizer(user_input, return_tensors="pt")
|
| 29 |
# outputs = model(**inputs)
|
| 30 |
+
model, tokenizer = load("Rafii/f1llama")
|
| 31 |
+
|
| 32 |
+
prompt="hello"
|
| 33 |
+
|
| 34 |
+
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
|
| 35 |
+
messages = [{"role": "user", "content": prompt}]
|
| 36 |
+
prompt = tokenizer.apply_chat_template(
|
| 37 |
+
messages, tokenize=False, add_generation_prompt=True
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
response = generate(model, tokenizer, prompt=user_input, verbose=True)
|
| 41 |
|
| 42 |
st.write(response)
|