| from transformers import pipeline, Conversation | |
| import gradio as gr | |
| from dotenv import load_dotenv | |
| # Load environment variables from the .env file | |
| load_dotenv() | |
| # chatbot = pipeline(model="microsoft/DialoGPT-medium") | |
| # conversation = Conversation("Hi") | |
| # response = chatbot(conversation) | |
| # #conversation.mark_processed() | |
| # #conversation.append_response(response) | |
| # conversation.add_user_input("How old are you?") | |
| # conversation2 = chatbot(conversation) | |
| # print(conversation2) | |
| # def respond(text, conversation): | |
| # chatbot = pipeline(model="microsoft/DialoGPT-medium") | |
| # if len(conversation)==0: | |
| # conversation = Conversation(text) | |
| # conversation = chatbot(conversation) | |
| # print(conversation.iter_texts()) | |
| # # test = [] | |
| # # for user,text in conversation.iter_texts(): | |
| # return text, conversation.iter_texts() | |
| # else: | |
| # conversation.add_user_input(text) | |
| # conversation = chatbot(conversation) | |
| # return text, conversation.iter_texts() | |
| import os | |
| import openai | |
| openai.api_key = os.getenv("OPENAI_API_KEY") | |
| # response = openai.ChatCompletion.create( | |
| # model="gpt-3.5-turbo", | |
| # messages=[], | |
| # temperature=0.5, | |
| # max_tokens=256 | |
| # ) | |
| # def format_chat_prompt(message, chat_history, instruction): | |
| # prompt = f"System:{instruction}" | |
| # for turn in chat_history: | |
| # user_message, bot_message = turn | |
| # prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}" | |
| # prompt = f"{prompt}\nUser: {message}\nAssistant:" | |
| # return prompt | |
| def add_new_message(message,chat_history): | |
| new_chat = [] | |
| for turn in chat_history: | |
| user, bot = turn | |
| new_chat.append({"role": "user", "content": user}) | |
| new_chat.append({"role": "assistant","content":bot}) | |
| new_chat.append({"role": "user","content":message}) | |
| return new_chat | |
| def respond(message, chat_history): | |
| prompt = add_new_message(message, chat_history) | |
| # stream = client.generate_stream(prompt, | |
| # max_new_tokens=1024, | |
| # stop_sequences=["\nUser:", "<|endoftext|>"], | |
| # temperature=temperature) | |
| # #stop_sequences to not generate the user answer | |
| # acc_text = "" | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages= prompt, | |
| temperature=0.5, | |
| max_tokens=120 | |
| ).choices[0].message.content | |
| chat_history.append((message, response)) | |
| return "",chat_history | |
| #Streaming the tokens | |
| # for idx, response in enumerate(stream): | |
| # text_token = response.token.text | |
| # if response.details: | |
| # return | |
| # if idx == 0 and text_token.startswith(" "): | |
| # text_token = text_token[1:] | |
| # acc_text += text_token | |
| # last_turn = list(chat_history.pop(-1)) | |
| # last_turn[-1] += acc_text | |
| # chat_history = chat_history + [last_turn] | |
| # yield "", chat_history | |
| # acc_text = "" | |
| with gr.Blocks() as demo: | |
| chatbot = gr.Chatbot(height=240) #just to fit the notebook | |
| msg = gr.Textbox(label="Prompt") | |
| btn = gr.Button("Submit") | |
| clear = gr.ClearButton(components=[msg, chatbot], value="Clear console") | |
| btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) | |
| msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit | |
| demo.launch() |