Search

Create LLM Chatbot using Gradio, response in same screen from multiple models

Python code , that takes the input from user and from backend it connects with OPENAI_ChatGPT and Google Gemini Modes. 

As a response it prints the outcome on same screen. 


files:
.env

Content of .env file:

OPEN_API_KEY="<Add your Key Here"
GOOGLE_API_KEY="Add your Key Here"

Content of AImalio.ipynb

Refer the code here...
import os
import requests
from bs4 import BeautifulSoup
from typing import List
from dotenv import load_dotenv
from openai import OpenAI

import gradio as gr



# Load the environments for OPENAI_API_KEY & GEMINI_API_KEY

load_dotenv()  # Load environment variables from .env file
os.environ["OPENAI_API_KEY"] = os.getenv("OPEN_API_KEY")
print(os.environ["OPENAI_API_KEY"]  )

# Create instance of the AI MODEL

openai=OpenAI(api_key=os.environ["OPENAI_API_KEY"])

system_message="You are a helpful assistant."

def dual_response(prompt):
    import google.generativeai as genai
    genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
    gemini_model = genai.GenerativeModel("gemini-2.5-flash-image-preview")

 # Streaming ChatGPT response

    messages = [
        {"role": "system", "content": system_message},
        {"role": "user", "content": prompt}
    ]
    chatgpt_stream = openai.chat.completions.create(
        model="gpt-4",
        messages=messages,
        stream=True
    )
    chatgpt_response = ""
    for chunk in chatgpt_stream:
        if hasattr(chunk.choices[0].delta, "content") and chunk.choices[0].delta.content:
            chatgpt_response += chunk.choices[0].delta.content
            yield chatgpt_response, ""  # Stream ChatGPT response as it arrives

# Gemini response (handle non-text parts)

    gemini_result = gemini_model.generate_content(prompt)
    gemini_response = ""
    for part in gemini_result.candidates[0].content.parts:
        if hasattr(part, "text"):
            gemini_response += part.text
    if not gemini_response:
        gemini_response = "[No text response from Gemini]"

    yield chatgpt_response, gemini_response

# Add the Gradio UI

with gr.Blocks(theme=gr.themes.Glass()) as demo:
    with gr.Row(elem_id="response-row", elem_classes="dynamic-height"):
        output_text1 = gr.Textbox(label="ChatGPT Response", show_copy_button=True, interactive=False)
        # gr.Markdown("<div style='width:2px; background:#bbbfff; height:100%; margin:0 0px; display:inline-block;'></div>")
        output_text2 = gr.Textbox(label="Google Gemini Response", show_copy_button=True, interactive=False)
    with gr.Column(scale=1):
        input_text = gr.Textbox(label="Enter your text here", placeholder="Type something...", lines=3)
        submit_btn = gr.Button("Submit", elem_id="submit-btn", elem_classes="red-btn")
        # Add custom CSS for red button
        demo.css = """
        #submit-btn.red-btn button {
            background-color: #e53935 !important;
            color: white !important;
            border: none !important;
        }
        #submit-btn.red-btn button:hover {
            background-color: #b71c1c !important;
        }
        """
    submit_btn.click(fn=dual_response, inputs=input_text, outputs=[output_text1, output_text2])

demo.launch()











No comments: