File size: 2,947 Bytes
d8fbbc5
585b37a
 
 
 
5b7ed98
297523b
585b37a
 
 
a1c3cc8
585b37a
 
 
860d47f
585b37a
 
 
 
 
cf1b72d
585b37a
 
 
 
 
 
 
 
 
 
 
 
 
cf1b72d
585b37a
5b7ed98
585b37a
 
 
 
af1d962
 
585b37a
860d47f
 
d4db789
860d47f
3ca613c
860d47f
04f9406
4a9cff4
 
860d47f
585b37a
860d47f
585b37a
 
 
 
 
 
 
 
 
 
262f111
 
 
 
2bb6554
585b37a
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import openai
from openai import OpenAI
import streamlit as st
from streamlit import session_state
import numpy as np
import json
import os
client = OpenAI()
openai.api_key = os.getenv("OPENAI_API_KEY")
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, util, InputExample
model = SentenceTransformer('models/transformer_model/')
from scipy import spatial
def cosine_similarity(x,y):
    return 1 - spatial.distance.cosine(x,y)
def gpt_scorer(m,s):
    response = client.chat.completions.create(
      model="gpt-4",
      messages=[
        {
          "role": "system",
          "content": "You are UPSC answers evaluater. You will be given model answer and student answer. Evaluate it by comparing with the model answer. \n<<REMEMBER>>\nIt is 10 marks question. Give marks in the range of 0.5. (ex. 0,0.5,1...)\nPlease give marks generously. If the student answer body matches more than 70% with the model answer then give full marks for body. \nIf the student answer and model answer is not relevant then give 0 marks.\ngive output in json format. Give output in this format {\"total\":}\n<<OUTPUT>>"
        },
        {
          "role": "user",
          "content": f"Model answer: {m}"},
        {
          "role": "user",
          "content": f"Student answer: {s}"
        }
      ],
      temperature=0,
      max_tokens=256,
      top_p=1,
      frequency_penalty=0,
      presence_penalty=0,seed = 10
    )
    return json.loads(response.choices[0].message.content)
def embeddings_cosine(s1,s2,model):
    embeddings1 = model.encode(s1)
    embeddings2 = model.encode(s2)
    cosine_scores = cosine_similarity(embeddings1, embeddings2)
    cosine_scores = cosine_scores*10
    return np.abs(np.round(cosine_scores * 2) / 2)
def main_score(m,s):
    score1 = embeddings_cosine(m,s,model)
    score_g = gpt_scorer(m,s)
    print(score_g)
    score2 = score_g['total']
    print(score1,"our model")
    percentage_difference = (abs(score1 - score2) / 10) * 100
    if percentage_difference > 30:
        return score2
    elif score2 == 0:
        return score2
    else:
        return score1
    
from st_pages import Page, Section, show_pages, add_page_title,add_indentation
st.set_page_config(page_title="Auto score Openai", page_icon="πŸ“ˆ")

st.markdown("<h1 style='text-align: center; color: black;'> Welcome to Our App! πŸ‘‹</h1>", unsafe_allow_html=True)

if 'result' not in session_state:
    session_state['result']= ""
    
st.title("Auto score")
text1= st.text_area(label= "Please write the model answer bellow", 
              placeholder="What does the teacher say?")
text2= st.text_area(label= "Please write the student answer bellow", 
              placeholder="What does the student say?")
def classify(text1,text2):
    session_state['result'] = main_score(text1,text2)


st.text_area("result", value=session_state['result'])

st.button("Classify", on_click=classify, args=[text1,text2])