from transformers import pipeline, Conversation import gradio as gr from dotenv import load_dotenv # Load environment variables from the .env file de forma local load_dotenv() import base64 with open("Iso_Logotipo_Ceibal.png", "rb") as image_file: encoded_image = base64.b64encode(image_file.read()).decode() # chatbot = pipeline(model="microsoft/DialoGPT-medium") # conversation = Conversation("Hi") # response = chatbot(conversation) # #conversation.mark_processed() # #conversation.append_response(response) # conversation.add_user_input("How old are you?") # conversation2 = chatbot(conversation) # print(conversation2) # def respond(text, conversation): # chatbot = pipeline(model="microsoft/DialoGPT-medium") # if len(conversation)==0: # conversation = Conversation(text) # conversation = chatbot(conversation) # print(conversation.iter_texts()) # # test = [] # # for user,text in conversation.iter_texts(): # return text, conversation.iter_texts() # else: # conversation.add_user_input(text) # conversation = chatbot(conversation) # return text, conversation.iter_texts() import os import openai openai.api_key = os.environ['OPENAI_API_KEY'] def clear_chat(message, chat_history): return "", [] def add_new_message(message,chat_history): new_chat = [] for turn in chat_history: user, bot = turn new_chat.append({"role": "user", "content": user}) new_chat.append({"role": "assistant","content":bot}) new_chat.append({"role": "user","content":message}) return new_chat def respond(message, chat_history): prompt = add_new_message(message, chat_history) # stream = client.generate_stream(prompt, # max_new_tokens=1024, # stop_sequences=["\nUser:", "<|endoftext|>"], # temperature=temperature) # #stop_sequences to not generate the user answer # acc_text = "" response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages= prompt, temperature=0.5, max_tokens=120 ).choices[0].message.content chat_history.append((message, response)) return "",chat_history #Streaming the tokens # for idx, response in enumerate(stream): # text_token = response.token.text # if response.details: # return # if idx == 0 and text_token.startswith(" "): # text_token = text_token[1:] # acc_text += text_token # last_turn = list(chat_history.pop(-1)) # last_turn[-1] += acc_text # chat_history = chat_history + [last_turn] # yield "", chat_history # acc_text = "" with gr.Blocks() as demo: gr.Markdown("""

Uso de AI para un chatbot.

Con este espacio podrás hablar en formato conversación con ChatGTP!

""".format(encoded_image)) with gr.Row(): chatbot = gr.Chatbot() #just to fit the notebook with gr.Row(): with gr.Row(): with gr.Column(scale=4): msg = gr.Textbox(label="Texto de entrada") with gr.Column(scale=1): btn = gr.Button("Enviar") clear = gr.ClearButton(components=[msg, chatbot], value="Borrar chat") btn.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) msg.submit(respond, inputs=[msg, chatbot], outputs=[msg, chatbot]) #Press enter to submit clear.click(clear_chat,inputs=[msg, chatbot], outputs=[msg, chatbot]) demo.launch()