Spaces:
Runtime error
Runtime error
Update chain_app.py
Browse files- chain_app.py +63 -6
chain_app.py
CHANGED
|
@@ -104,9 +104,13 @@ async def chat_profile():
|
|
| 104 |
name="Llama-3-8B",
|
| 105 |
markdown_description="Meta Open Source model Llama-2 with 7B parameters",
|
| 106 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
cl.ChatProfile(
|
| 108 |
name = "gemma-7B",
|
| 109 |
-
markdown_description = 'Google Open Source LLM'
|
| 110 |
),
|
| 111 |
cl.ChatProfile(
|
| 112 |
name="zephyr-7B",
|
|
@@ -187,7 +191,7 @@ async def on_chat_start():
|
|
| 187 |
Select(
|
| 188 |
id="OpenAI-Model",
|
| 189 |
label="OpenAI - Model",
|
| 190 |
-
values=["
|
| 191 |
initial_index=0,
|
| 192 |
),
|
| 193 |
Slider(
|
|
@@ -334,7 +338,7 @@ async def on_chat_start():
|
|
| 334 |
Select(
|
| 335 |
id="Meta-Model",
|
| 336 |
label="Meta - Model",
|
| 337 |
-
values=["Llama-3-
|
| 338 |
initial_index=0,
|
| 339 |
),
|
| 340 |
Slider(
|
|
@@ -357,7 +361,7 @@ async def on_chat_start():
|
|
| 357 |
Select(
|
| 358 |
id="Meta-Model",
|
| 359 |
label="Meta - Model",
|
| 360 |
-
values=["Llama-3-70B"],
|
| 361 |
initial_index=0,
|
| 362 |
),
|
| 363 |
Slider(
|
|
@@ -379,7 +383,7 @@ async def on_chat_start():
|
|
| 379 |
Select(
|
| 380 |
id="Meta-Model",
|
| 381 |
label="Meta - Model",
|
| 382 |
-
values=["Llama-3-
|
| 383 |
initial_index=0,
|
| 384 |
),
|
| 385 |
Slider(
|
|
@@ -440,6 +444,29 @@ async def on_chat_start():
|
|
| 440 |
await cl.Message(
|
| 441 |
content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
|
| 442 |
).send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
if chat_profile == 'gemma-7B':
|
| 444 |
await cl.ChatSettings(
|
| 445 |
[
|
|
@@ -807,6 +834,36 @@ async def main(message: cl.Message):
|
|
| 807 |
# Send the concatenated content as a message
|
| 808 |
await cl.Message(content=complete_content).send()
|
| 809 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 810 |
elif chat_profile == 'gemma-7B':
|
| 811 |
completion = groq_client.chat.completions.create(
|
| 812 |
model="gemma-7b-it",
|
|
@@ -836,7 +893,7 @@ async def main(message: cl.Message):
|
|
| 836 |
|
| 837 |
# Send the concatenated content as a message
|
| 838 |
await cl.Message(content=complete_content).send()
|
| 839 |
-
|
| 840 |
elif chat_profile == "zephyr-7B":
|
| 841 |
result = hf_text_client.predict(
|
| 842 |
message=message.content,
|
|
|
|
| 104 |
name="Llama-3-8B",
|
| 105 |
markdown_description="Meta Open Source model Llama-2 with 7B parameters",
|
| 106 |
),
|
| 107 |
+
cl.ChatProfile(
|
| 108 |
+
name = "gemma2-9B",
|
| 109 |
+
markdown_description = 'Google Generation 2 Open Source LLM with 9B parameters'
|
| 110 |
+
),
|
| 111 |
cl.ChatProfile(
|
| 112 |
name = "gemma-7B",
|
| 113 |
+
markdown_description = 'Google Generation 1 Open Source LLM with 7B parameters'
|
| 114 |
),
|
| 115 |
cl.ChatProfile(
|
| 116 |
name="zephyr-7B",
|
|
|
|
| 191 |
Select(
|
| 192 |
id="OpenAI-Model",
|
| 193 |
label="OpenAI - Model",
|
| 194 |
+
values=["gpt4-o-mini"],
|
| 195 |
initial_index=0,
|
| 196 |
),
|
| 197 |
Slider(
|
|
|
|
| 338 |
Select(
|
| 339 |
id="Meta-Model",
|
| 340 |
label="Meta - Model",
|
| 341 |
+
values=["Llama-3.1-405B"],
|
| 342 |
initial_index=0,
|
| 343 |
),
|
| 344 |
Slider(
|
|
|
|
| 361 |
Select(
|
| 362 |
id="Meta-Model",
|
| 363 |
label="Meta - Model",
|
| 364 |
+
values=["Llama-3.1-70B"],
|
| 365 |
initial_index=0,
|
| 366 |
),
|
| 367 |
Slider(
|
|
|
|
| 383 |
Select(
|
| 384 |
id="Meta-Model",
|
| 385 |
label="Meta - Model",
|
| 386 |
+
values=["Llama-3.1-8B"],
|
| 387 |
initial_index=0,
|
| 388 |
),
|
| 389 |
Slider(
|
|
|
|
| 444 |
await cl.Message(
|
| 445 |
content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
|
| 446 |
).send()
|
| 447 |
+
if chat_profile == 'gemma-9B':
|
| 448 |
+
await cl.ChatSettings(
|
| 449 |
+
[
|
| 450 |
+
Select(
|
| 451 |
+
id="Google-Model",
|
| 452 |
+
label="Google - Model",
|
| 453 |
+
values=["Gemma-9B"],
|
| 454 |
+
initial_index=0,
|
| 455 |
+
),
|
| 456 |
+
Slider(
|
| 457 |
+
id="Temperature",
|
| 458 |
+
label="Model Temperature",
|
| 459 |
+
initial=0.7,
|
| 460 |
+
min=0,
|
| 461 |
+
max=1,
|
| 462 |
+
step=0.1,
|
| 463 |
+
),
|
| 464 |
+
]
|
| 465 |
+
).send()
|
| 466 |
+
await cl.Message(
|
| 467 |
+
content="Im Gemma2. the 9B version of google second generation open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? "
|
| 468 |
+
).send()
|
| 469 |
+
|
| 470 |
if chat_profile == 'gemma-7B':
|
| 471 |
await cl.ChatSettings(
|
| 472 |
[
|
|
|
|
| 834 |
# Send the concatenated content as a message
|
| 835 |
await cl.Message(content=complete_content).send()
|
| 836 |
|
| 837 |
+
elif chat_profile == 'gemma-9B':
|
| 838 |
+
completion = groq_client.chat.completions.create(
|
| 839 |
+
model="gemma-9b-it",
|
| 840 |
+
messages=[
|
| 841 |
+
{
|
| 842 |
+
"role": "user",
|
| 843 |
+
"content": message.content
|
| 844 |
+
}
|
| 845 |
+
],
|
| 846 |
+
temperature=1,
|
| 847 |
+
max_tokens=1024,
|
| 848 |
+
top_p=1,
|
| 849 |
+
stream=True,
|
| 850 |
+
stop=None,
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
complete_content = ""
|
| 854 |
+
|
| 855 |
+
# Iterate over each chunk
|
| 856 |
+
for chunk in completion:
|
| 857 |
+
# Retrieve the content from the current chunk
|
| 858 |
+
content = chunk.choices[0].delta.content
|
| 859 |
+
|
| 860 |
+
# Check if the content is not None before concatenating it
|
| 861 |
+
if content is not None:
|
| 862 |
+
complete_content += content
|
| 863 |
+
|
| 864 |
+
# Send the concatenated content as a message
|
| 865 |
+
await cl.Message(content=complete_content).send()
|
| 866 |
+
|
| 867 |
elif chat_profile == 'gemma-7B':
|
| 868 |
completion = groq_client.chat.completions.create(
|
| 869 |
model="gemma-7b-it",
|
|
|
|
| 893 |
|
| 894 |
# Send the concatenated content as a message
|
| 895 |
await cl.Message(content=complete_content).send()
|
| 896 |
+
|
| 897 |
elif chat_profile == "zephyr-7B":
|
| 898 |
result = hf_text_client.predict(
|
| 899 |
message=message.content,
|