InviteAI / query_handler.py
dhanvanth183's picture
Added Local code for Invitation Generator
07bd23e
import os
from langchain_groq import ChatGroq
from dotenv import load_dotenv
load_dotenv()
class LLMHandler:
def __init__(self, model_name="llama-3.3-70b-versatile"):
"""
Initializes the LLMHandler with the specified Groq model.
"""
self.groq_api_key = os.getenv("GROQ_API_KEY")
if not self.groq_api_key:
raise ValueError("GROQ_API_KEY environment variable not set.")
# Initialize Groq LLM client
self.llm = ChatGroq(groq_api_key=self.groq_api_key, model_name=model_name)
def generate_response(self, user_prompt, data):
"""
Generate a concise response using the LLM based on user prompt and data.
"""
# Create the full prompt using user input and instance data
prompt = (
f"You are a professional AI model tasked with writing personalized invite texts "
f"that are concise (less than 40 words), brochure-suitable, and tailored as per the category in the given sample."
f"\n\n"
f"Consider the user prompt: {user_prompt}\n\n"
f"Details of the individual:\n"
f"- Name: {data['Name']}\n"
f"- Job Title: {data['Job Title']}\n"
f"- Organisation: {data['Organisation']}\n"
f"- Area of Interest: {data['Area of Interest']}\n"
f"- Category: {data['Category']}\n\n"
f"The response can start with 'Hello {data['Name']}'."
f"Write a personalized invitation text for this individual, ensuring the tone and purpose align with the user's instructions."
f"STRICTLY give only one response for the category the sample belongs to."
f"Do NOT mention the category in the response."
f"NO PREAMBLE."
)
# Query the LLM and return the response
response = self.llm.invoke(prompt)
return response.content.strip()
def validate_and_correct_response(self, user_prompt, original_response, data):
"""
Use a secondary LLM to validate and correct the response.
"""
# Initialize the second LLM (validator)
validator = ChatGroq(
groq_api_key=self.groq_api_key,
model_name="gemma2-9b-it"
)
# Validation prompt
validation_prompt = (
f"You are a professional AI model tasked with validating and correcting AI-generated texts. "
f"The original response must align strictly with the provided user prompt and input details. "
f"If the response fails to meet the requirements, generate a corrected response. "
f"\n\n"
f"User prompt: {user_prompt}\n\n"
f"Details of the individual:\n"
f"- Name: {data['Name']}\n"
f"- Job Title: {data['Job Title']}\n"
f"- Organisation: {data['Organisation']}\n"
f"- Area of Interest: {data['Area of Interest']}\n"
f"- Category: {data['Category']}\n\n"
f"Original response: {original_response}\n\n"
f"Instructions:\n"
f"- If the original response is correct, reply with 'Valid Response'.\n"
f"- Otherwise, provide a corrected version."
f"- The corrected version should start with 'Hello {data['Name']}'."
f"- The corrected version is concise (less than 40 words), brochure-suitable, and tailored as per the Category"
f"- NO PREAMBLE "
)
# Query the validator LLM
validation_response = validator.invoke(validation_prompt)
return validation_response.content.strip()