Spaces:
Runtime error
Runtime error
Update chain_app.py
Browse files- chain_app.py +12 -21
chain_app.py
CHANGED
|
@@ -2503,10 +2503,7 @@ async def main(message: cl.Message):
|
|
| 2503 |
elif chat_profile == "zephyr-7B":
|
| 2504 |
result = hf_text_client.predict(
|
| 2505 |
message=message.content,
|
| 2506 |
-
|
| 2507 |
-
param_3=512,
|
| 2508 |
-
param_4=0.7,
|
| 2509 |
-
param_5=0.95,
|
| 2510 |
api_name="/chat"
|
| 2511 |
)
|
| 2512 |
for token in result:
|
|
@@ -2528,15 +2525,12 @@ async def main(message: cl.Message):
|
|
| 2528 |
stop=None,
|
| 2529 |
)
|
| 2530 |
|
| 2531 |
-
complete_content = ""
|
| 2532 |
-
|
| 2533 |
for chunk in completion:
|
| 2534 |
-
content
|
| 2535 |
-
|
| 2536 |
-
if
|
| 2537 |
-
|
| 2538 |
|
| 2539 |
-
await cl.Message(content=complete_content).send()
|
| 2540 |
|
| 2541 |
elif chat_profile == 'mistral-nemo-12B':
|
| 2542 |
client = Client("0x7o/Mistral-Nemo-Instruct", hf_token=hf_token)
|
|
@@ -2547,24 +2541,21 @@ async def main(message: cl.Message):
|
|
| 2547 |
top_p=0.95,
|
| 2548 |
api_name="/chat"
|
| 2549 |
)
|
| 2550 |
-
|
| 2551 |
-
|
| 2552 |
-
|
| 2553 |
-
).send()
|
| 2554 |
|
| 2555 |
elif chat_profile == 'mistral-7B-v2':
|
| 2556 |
client = InferenceClient(
|
| 2557 |
"mistralai/Mistral-7B-Instruct-v0.2",
|
| 2558 |
token=f"{hf_token_llama_3_1}",
|
| 2559 |
)
|
| 2560 |
-
|
| 2561 |
messages=[{"role": "user", "content": message.content}],
|
| 2562 |
max_tokens=500,
|
| 2563 |
-
|
| 2564 |
-
|
| 2565 |
-
|
| 2566 |
-
|
| 2567 |
-
).send()
|
| 2568 |
|
| 2569 |
elif chat_profile == 'Yi-1.5-34B':
|
| 2570 |
final_answer = await cl.Message(content="").send()
|
|
|
|
| 2503 |
elif chat_profile == "zephyr-7B":
|
| 2504 |
result = hf_text_client.predict(
|
| 2505 |
message=message.content,
|
| 2506 |
+
system_message=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
|
|
|
|
|
|
|
|
|
|
| 2507 |
api_name="/chat"
|
| 2508 |
)
|
| 2509 |
for token in result:
|
|
|
|
| 2525 |
stop=None,
|
| 2526 |
)
|
| 2527 |
|
|
|
|
|
|
|
| 2528 |
for chunk in completion:
|
| 2529 |
+
# Retrieve the content from the current chunk
|
| 2530 |
+
# Check if the content is not None before concatenating it
|
| 2531 |
+
if chunk is not None:
|
| 2532 |
+
await msg.stream_token(chunk.choices[0].delta.content)
|
| 2533 |
|
|
|
|
| 2534 |
|
| 2535 |
elif chat_profile == 'mistral-nemo-12B':
|
| 2536 |
client = Client("0x7o/Mistral-Nemo-Instruct", hf_token=hf_token)
|
|
|
|
| 2541 |
top_p=0.95,
|
| 2542 |
api_name="/chat"
|
| 2543 |
)
|
| 2544 |
+
for i in list(result[1][0][1]):
|
| 2545 |
+
await msg.stream_token(i)
|
|
|
|
|
|
|
| 2546 |
|
| 2547 |
elif chat_profile == 'mistral-7B-v2':
|
| 2548 |
client = InferenceClient(
|
| 2549 |
"mistralai/Mistral-7B-Instruct-v0.2",
|
| 2550 |
token=f"{hf_token_llama_3_1}",
|
| 2551 |
)
|
| 2552 |
+
for res in client.chat_completion(
|
| 2553 |
messages=[{"role": "user", "content": message.content}],
|
| 2554 |
max_tokens=500,
|
| 2555 |
+
stream=True,
|
| 2556 |
+
):
|
| 2557 |
+
if res.choices[0].delta.content is not None:
|
| 2558 |
+
await msg.stream_token(res.choices[0].delta.conten)
|
|
|
|
| 2559 |
|
| 2560 |
elif chat_profile == 'Yi-1.5-34B':
|
| 2561 |
final_answer = await cl.Message(content="").send()
|