yasserrmd commited on
Commit
3a09107
·
verified ·
1 Parent(s): e1825f0

Upload 4 files

Browse files
Files changed (4) hide show
  1. groq_client.py +78 -0
  2. main.py +43 -0
  3. models.py +9 -0
  4. requirements.txt +10 -0
groq_client.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from groq import Groq
2
+ from typing import List, Dict
3
+
4
+ client = Groq()
5
+
6
+ async def generate_questions(career: str, qualifications: str) -> Dict:
7
+ # Use JSON mode to ensure we get structured data
8
+ system_prompt = (
9
+ "You are a helpful assistant that generates a JSON object containing 5 psychological questions as an array of objects."
10
+ "Each object has a question string and four choices labeled A, B, C, D."
11
+ "The questions should assess capability for the career as per the qualification"
12
+ "and should be psychological not anyhting related to the career . Respond ONLY with valid JSON."
13
+ " The question should be understandable by the qualifications."
14
+ " It should always follow the json schema."
15
+ "{ \"$schema\": \"http://json-schema.org/draft-07/schema#\", \"$id\": \"http://example.com/schemas/questions.schema.json\", \"title\": \"Questions Schema\", \"description\": \"A schema describing a list of questions and their choices.\", \"type\": \"object\", \"properties\": { \"questions\": { \"type\": \"array\", \"description\": \"An array of questions.\", \"items\": { \"type\": \"object\", \"properties\": { \"question\": { \"type\": \"string\", \"description\": \"The text of the question.\" }, \"choices\": { \"type\": \"object\", \"description\": \"An object containing multiple choice options.\", \"properties\": { \"A\": { \"type\": \"string\", \"description\": \"Option A answer text.\" }, \"B\": { \"type\": \"string\", \"description\": \"Option B answer text.\" }, \"C\": { \"type\": \"string\", \"description\": \"Option C answer text.\" }, \"D\": { \"type\": \"string\", \"description\": \"Option D answer text.\" } }, \"required\": [\"A\", \"B\", \"C\", \"D\"] } }, \"required\": [\"question\", \"choices\"] } } }, \"required\": [\"questions\"] } "
16
+ )
17
+
18
+ response = client.chat.completions.create(
19
+ messages=[
20
+ {"role": "system", "content": system_prompt},
21
+ {
22
+ "role": "user",
23
+ "content": f"Career: {career}\nQualifications: {qualifications}\n"
24
+ f"Generate 5 psychological questions to determine if capable. make the question understable as per the qualification"
25
+ }
26
+ ],
27
+ model="llama-3.3-70b-versatile",
28
+ temperature=0.5,
29
+ max_tokens=1024,
30
+ top_p=1,
31
+ stop=None,
32
+ stream=False,
33
+ response_format={"type": "json_object"} # Ensures JSON response
34
+ )
35
+ print(response.choices[0].message.content)
36
+ return response.choices[0].message.content
37
+
38
+ async def evaluate_answers(career: str, qualifications: str, answers: Dict[str, str]) -> Dict:
39
+ # answers is a dict mapping question_index to chosen answer, e.g. {"0": "A", "1": "C", ...}
40
+
41
+
42
+ system_prompt = (
43
+ "You are a helpful assistant. You receive a career, qualifications, and "
44
+ "user answers to previously generated psychological questions. Your primary goal is to assess "
45
+ "the user's psychological readiness for the career based on their answers. If psychologically capable, "
46
+ "provide a roadmap tailored to their current qualifications and stage in life to help them achieve the career. "
47
+ "If they are not psychologically ready, explain the challenges they might face and suggest alternative careers "
48
+ "aligned with their strengths, along with motivational guidance."
49
+ "career options and provide motivation."
50
+ "in a structured JSON: { 'capable': true, 'roadmap': ['Step 1: Gain experience', 'Step 2: Apply for jobs'], }. If not capable, "
51
+ "suggest alternative careers: { 'capable': false, 'alternatives': [list_of_careers...] }."
52
+ "and also details evaluation report as statement and also provide some motivation even if capability is false"
53
+ "Respond ONLY with valid JSON.with following json schema"
54
+ '{"$schema": "http://json-schema.org/draft-04/schema#", "type": "object", "properties": {"capable": {"type": "boolean"}, "alternatives": {"type": "array", "items": [{"type": "string"}, {"type": "string"}, {"type": "string"}]}, "roadmap": {"type": "array", "items": [{"type": "string"}, {"type": "string"}]}, "evaluation_report": {"type": "string"}, "motivation": {"type": "string"}}, "required": ["capable", "alternatives", "roadmap", "evaluation_report", "motivation"]}'
55
+
56
+ )
57
+
58
+ user_input = {
59
+ "career": career,
60
+ "qualifications": qualifications,
61
+ "answers": answers
62
+ }
63
+ print(answers)
64
+ response = client.chat.completions.create(
65
+ messages=[
66
+ {"role": "system", "content": system_prompt},
67
+ {"role": "user", "content": f"evaluation should be based on answers {answers} not by qualifications {qualifications} and guide them to achive that as per the qualifations {qualifications} to achieve {career}"}
68
+ ],
69
+ model="llama-3.3-70b-versatile",
70
+ temperature=0.5,
71
+ max_tokens=1024,
72
+ top_p=1,
73
+ stop=None,
74
+ stream=False,
75
+ response_format={"type": "json_object"} # Ensures JSON response
76
+ )
77
+ print(response.choices[0].message.content)
78
+ return response.choices[0].message.content
main.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Form, Request
2
+ from fastapi.responses import JSONResponse, FileResponse
3
+ from fastapi.staticfiles import StaticFiles
4
+ from starlette.middleware.sessions import SessionMiddleware
5
+ from groq_client import generate_questions, evaluate_answers
6
+ import json
7
+ import uvicorn
8
+
9
+ app = FastAPI()
10
+
11
+ # Serve static files (HTML, CSS, JS)
12
+ app.mount("/static", StaticFiles(directory="static"), name="static")
13
+
14
+ # Add session middleware
15
+ app.add_middleware(SessionMiddleware, secret_key="vhjh64@$t4#$%yhgxt45ljo##")
16
+
17
+
18
+ @app.get("/", response_class=FileResponse)
19
+ async def index():
20
+ return FileResponse("static/index.html")
21
+
22
+
23
+ @app.post("/start", response_class=JSONResponse)
24
+ async def start(career: str = Form(...), qualifications: str = Form(...)):
25
+ # Mock question generation
26
+ questions_str = await generate_questions(career, qualifications)
27
+ questions_data = json.loads(questions_str)
28
+ return {"questions": questions_data["questions"]}
29
+
30
+
31
+ @app.post("/evaluate", response_class=JSONResponse)
32
+ async def evaluate(payload: dict):
33
+ # Mock evaluation logic
34
+ career = payload.get("career")
35
+ qualifications = payload.get("qualifications")
36
+ answers = payload.get("answers")
37
+ result = await evaluate_answers(career, qualifications, answers)
38
+ result_data=json.loads(result)
39
+ return {"result": result_data}
40
+
41
+
42
+ if __name__ == "__main__":
43
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
models.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Dict, List
3
+
4
+ class UserInput(BaseModel):
5
+ career: str
6
+ qualifications: str
7
+
8
+ class UserAnswers(BaseModel):
9
+ answers: Dict[int, str]
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.103.2
2
+ uvicorn==0.22.0
3
+ jinja2==3.1.2
4
+ groq==0.13.0
5
+ python-dotenv==1.0.0
6
+ requests==2.31.0
7
+ itsdangerous==2.1.2
8
+ aiofiles==23.1.0
9
+ python-multipart==0.0.19
10
+ uvicorn