Spaces:
Sleeping
Sleeping
Update function.py
Browse files- function.py +7 -5
function.py
CHANGED
@@ -7,17 +7,18 @@ import streamlit as st
|
|
7 |
def get_answers(questions,model):
|
8 |
|
9 |
|
10 |
-
answer_prompt = ( "I want you to become a teacher answer this specific Question
|
11 |
|
12 |
|
13 |
if model == "Open AI":
|
14 |
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
15 |
-
answers =
|
16 |
# return questions
|
17 |
|
18 |
elif model == "Gemini":
|
19 |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
20 |
-
answers =
|
|
|
21 |
# return questions.content
|
22 |
|
23 |
return(answers)
|
@@ -31,12 +32,13 @@ def GetLLMResponse(selected_topic_level, selected_topic,num_quizzes, model):
|
|
31 |
|
32 |
if model == "Open AI":
|
33 |
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
34 |
-
questions =
|
35 |
|
36 |
|
37 |
elif model == "Gemini":
|
38 |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
39 |
-
questions =
|
|
|
40 |
# return questions.content
|
41 |
|
42 |
answers = get_answers(questions,model)
|
|
|
7 |
def get_answers(questions,model):
|
8 |
|
9 |
|
10 |
+
answer_prompt = ( "I want you to become a teacher answer this specific Question: {questions}. You should gave me a straightforward and consise explanation and answer to each one of them")
|
11 |
|
12 |
|
13 |
if model == "Open AI":
|
14 |
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
15 |
+
answers = llm(answer_prompt)
|
16 |
# return questions
|
17 |
|
18 |
elif model == "Gemini":
|
19 |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
20 |
+
answers = llm.invoke(answer_prompt)
|
21 |
+
answers = answers.content
|
22 |
# return questions.content
|
23 |
|
24 |
return(answers)
|
|
|
32 |
|
33 |
if model == "Open AI":
|
34 |
llm = OpenAI(temperature=0.7, openai_api_key=st.secrets["OPENAI_API_KEY"])
|
35 |
+
questions = llm(question_prompt)
|
36 |
|
37 |
|
38 |
elif model == "Gemini":
|
39 |
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=st.secrets["GOOGLE_API_KEY"])
|
40 |
+
questions = llm.invoke(question_prompt)
|
41 |
+
questions = questions.content
|
42 |
# return questions.content
|
43 |
|
44 |
answers = get_answers(questions,model)
|