mrolando commited on
Commit
2078c11
·
1 Parent(s): 85472b8

para no perder código

Browse files
Files changed (2) hide show
  1. app.py +72 -0
  2. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, Conversation
2
+ import gradio as gr
3
+ chatbot = pipeline(model="microsoft/DialoGPT-medium")
4
+ conversation = Conversation("Hi")
5
+ response = chatbot(conversation)
6
+ #conversation.mark_processed()
7
+ #conversation.append_response(response)
8
+ conversation.add_user_input("How old are you?")
9
+
10
+ conversation2 = chatbot(conversation)
11
+ print(conversation2)
12
+
13
+ def respond(text, conversation):
14
+ chatbot = pipeline(model="microsoft/DialoGPT-medium")
15
+
16
+ if len(conversation)==0:
17
+ conversation = Conversation(text)
18
+ conversation = chatbot(conversation)
19
+ print(conversation.iter_texts())
20
+ # test = []
21
+ # for user,text in conversation.iter_texts():
22
+
23
+
24
+ return text, conversation.iter_texts()
25
+ else:
26
+ conversation.add_user_input(text)
27
+ conversation = chatbot(conversation)
28
+ return text, conversation.iter_texts()
29
+ # def format_chat_prompt(message, chat_history, instruction):
30
+ # prompt = f"System:{instruction}"
31
+ # for turn in chat_history:
32
+ # user_message, bot_message = turn
33
+ # prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
34
+ # prompt = f"{prompt}\nUser: {message}\nAssistant:"
35
+ # return prompt
36
+
37
+ # def respond(message, chat_history, instruction, temperature=0.7):
38
+ # prompt = format_chat_prompt(message, chat_history, instruction)
39
+ # chat_history = chat_history + [[message, ""]]
40
+ # stream = client.generate_stream(prompt,
41
+ # max_new_tokens=1024,
42
+ # stop_sequences=["\nUser:", "<|endoftext|>"],
43
+ # temperature=temperature)
44
+ # #stop_sequences to not generate the user answer
45
+ # acc_text = ""
46
+ # #Streaming the tokens
47
+ # for idx, response in enumerate(stream):
48
+ # text_token = response.token.text
49
+
50
+ # if response.details:
51
+ # return
52
+
53
+ # if idx == 0 and text_token.startswith(" "):
54
+ # text_token = text_token[1:]
55
+
56
+ # acc_text += text_token
57
+ # last_turn = list(chat_history.pop(-1))
58
+ # last_turn[-1] += acc_text
59
+ # chat_history = chat_history + [last_turn]
60
+ # yield "", chat_history
61
+ # acc_text = ""
62
+
63
+ with gr.Blocks() as demo:
64
+ chatbot = gr.Chatbot(height=240) #just to fit the notebook
65
+ msg = gr.Textbox(label="Prompt")
66
+ btn = gr.Button("Submit")
67
+ clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
68
+
69
+ btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
70
+ msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit
71
+
72
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ einops