Spaces:
Sleeping
Sleeping
File size: 1,986 Bytes
0d09775 cfd5951 0d09775 a1b20d6 3b08195 a1b20d6 0d09775 8922910 0d09775 8922910 0d09775 a1b20d6 0d09775 a1b20d6 308d42c 376d3ef 0d09775 5ffdc56 0d09775 8922910 a1b20d6 0d09775 8922910 0d09775 a1cac95 0048548 a1cac95 a1b20d6 5ffdc56 0d09775 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from langchain_community.llms import OpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
import streamlit as st
def get_answers(questions,model):
answer_prompt = (f"I want you to become a teacher answer this specific Question: {questions}. You should gave me a straightforward and consise explanation and answer to each one of them")
if model == "Open AI":
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
answers = llm(answer_prompt)
# return questions
elif model == "Gemini":
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
answers = llm.invoke(answer_prompt)
answers = answers.content
# return questions.content
return(answers)
def GetLLMResponse(selected_topic_level, selected_topic, num_quizzes, selected_Question_Difficulty, selected_level, model):
question_prompt = (f'You are an AI interview assistant that helps generate customized interview questions for various technical and non-technical roles. Your task is to create a set of interview questions based on the {selected_topic_level} and topic : {selected_topic}.Ensure the questions match the indicated level of understanding:{selected_level} and difficulty:{selected_Question_Difficulty}. Generate only {num_quizzes} questions and give it in a python list with variable name question_list')
if model == "Open AI":
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
questions = llm(question_prompt)
elif model == "Gemini":
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
questions = llm.invoke(question_prompt)
questions = questions.content
# return questions.content
# answers = "testing"
answers = get_answers(questions,model)
return(questions,answers)
|