mrolando commited on
Commit
9ed763d
·
1 Parent(s): 2078c11

first commit

Browse files
Files changed (2) hide show
  1. .gitignore +2 -0
  2. app.py +84 -49
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .env
2
+ env
app.py CHANGED
@@ -1,31 +1,47 @@
1
  from transformers import pipeline, Conversation
2
  import gradio as gr
3
- chatbot = pipeline(model="microsoft/DialoGPT-medium")
4
- conversation = Conversation("Hi")
5
- response = chatbot(conversation)
6
- #conversation.mark_processed()
7
- #conversation.append_response(response)
8
- conversation.add_user_input("How old are you?")
9
-
10
- conversation2 = chatbot(conversation)
11
- print(conversation2)
12
-
13
- def respond(text, conversation):
14
- chatbot = pipeline(model="microsoft/DialoGPT-medium")
15
-
16
- if len(conversation)==0:
17
- conversation = Conversation(text)
18
- conversation = chatbot(conversation)
19
- print(conversation.iter_texts())
20
- # test = []
21
- # for user,text in conversation.iter_texts():
 
 
 
 
 
22
 
23
 
24
- return text, conversation.iter_texts()
25
- else:
26
- conversation.add_user_input(text)
27
- conversation = chatbot(conversation)
28
- return text, conversation.iter_texts()
 
 
 
 
 
 
 
 
 
 
 
29
  # def format_chat_prompt(message, chat_history, instruction):
30
  # prompt = f"System:{instruction}"
31
  # for turn in chat_history:
@@ -34,31 +50,50 @@ def respond(text, conversation):
34
  # prompt = f"{prompt}\nUser: {message}\nAssistant:"
35
  # return prompt
36
 
37
- # def respond(message, chat_history, instruction, temperature=0.7):
38
- # prompt = format_chat_prompt(message, chat_history, instruction)
39
- # chat_history = chat_history + [[message, ""]]
40
- # stream = client.generate_stream(prompt,
41
- # max_new_tokens=1024,
42
- # stop_sequences=["\nUser:", "<|endoftext|>"],
43
- # temperature=temperature)
44
- # #stop_sequences to not generate the user answer
45
- # acc_text = ""
46
- # #Streaming the tokens
47
- # for idx, response in enumerate(stream):
48
- # text_token = response.token.text
49
-
50
- # if response.details:
51
- # return
52
-
53
- # if idx == 0 and text_token.startswith(" "):
54
- # text_token = text_token[1:]
55
-
56
- # acc_text += text_token
57
- # last_turn = list(chat_history.pop(-1))
58
- # last_turn[-1] += acc_text
59
- # chat_history = chat_history + [last_turn]
60
- # yield "", chat_history
61
- # acc_text = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  with gr.Blocks() as demo:
64
  chatbot = gr.Chatbot(height=240) #just to fit the notebook
 
1
  from transformers import pipeline, Conversation
2
  import gradio as gr
3
+
4
+ from dotenv import load_dotenv
5
+
6
+ # Load environment variables from the .env file
7
+ load_dotenv()
8
+ # chatbot = pipeline(model="microsoft/DialoGPT-medium")
9
+ # conversation = Conversation("Hi")
10
+ # response = chatbot(conversation)
11
+ # #conversation.mark_processed()
12
+ # #conversation.append_response(response)
13
+ # conversation.add_user_input("How old are you?")
14
+
15
+ # conversation2 = chatbot(conversation)
16
+ # print(conversation2)
17
+
18
+ # def respond(text, conversation):
19
+ # chatbot = pipeline(model="microsoft/DialoGPT-medium")
20
+
21
+ # if len(conversation)==0:
22
+ # conversation = Conversation(text)
23
+ # conversation = chatbot(conversation)
24
+ # print(conversation.iter_texts())
25
+ # # test = []
26
+ # # for user,text in conversation.iter_texts():
27
 
28
 
29
+ # return text, conversation.iter_texts()
30
+ # else:
31
+ # conversation.add_user_input(text)
32
+ # conversation = chatbot(conversation)
33
+ # return text, conversation.iter_texts()
34
+
35
+ import os
36
+ import openai
37
+ openai.api_key = os.getenv("OPENAI_API_KEY")
38
+
39
+ # response = openai.ChatCompletion.create(
40
+ # model="gpt-3.5-turbo",
41
+ # messages=[],
42
+ # temperature=0.5,
43
+ # max_tokens=256
44
+ # )
45
  # def format_chat_prompt(message, chat_history, instruction):
46
  # prompt = f"System:{instruction}"
47
  # for turn in chat_history:
 
50
  # prompt = f"{prompt}\nUser: {message}\nAssistant:"
51
  # return prompt
52
 
53
+
54
+ def add_new_message(message,chat_history):
55
+ new_chat = []
56
+ for turn in chat_history:
57
+ user, bot = turn
58
+ new_chat.append({"role": "user", "content": user})
59
+ new_chat.append({"role": "assistant","content":bot})
60
+ new_chat.append({"role": "user","content":message})
61
+ return new_chat
62
+
63
+
64
+
65
+ def respond(message, chat_history):
66
+ prompt = add_new_message(message, chat_history)
67
+ # stream = client.generate_stream(prompt,
68
+ # max_new_tokens=1024,
69
+ # stop_sequences=["\nUser:", "<|endoftext|>"],
70
+ # temperature=temperature)
71
+ # #stop_sequences to not generate the user answer
72
+ # acc_text = ""
73
+ response = openai.ChatCompletion.create(
74
+ model="gpt-3.5-turbo",
75
+ messages= prompt,
76
+ temperature=0.5,
77
+ max_tokens=120
78
+ ).choices[0].message.content
79
+ chat_history.append((message, response))
80
+ return "",chat_history
81
+ #Streaming the tokens
82
+ # for idx, response in enumerate(stream):
83
+ # text_token = response.token.text
84
+
85
+ # if response.details:
86
+ # return
87
+
88
+ # if idx == 0 and text_token.startswith(" "):
89
+ # text_token = text_token[1:]
90
+
91
+ # acc_text += text_token
92
+ # last_turn = list(chat_history.pop(-1))
93
+ # last_turn[-1] += acc_text
94
+ # chat_history = chat_history + [last_turn]
95
+ # yield "", chat_history
96
+ # acc_text = ""
97
 
98
  with gr.Blocks() as demo:
99
  chatbot = gr.Chatbot(height=240) #just to fit the notebook