Behathenimro commited on
Commit
f855cbe
·
verified ·
1 Parent(s): f4d66e7

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -25
app.py CHANGED
@@ -1,31 +1,25 @@
 
 
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
 
5
- # Load your model and tokenizer
6
- tokenizer = AutoTokenizer.from_pretrained("gpt2")
7
- model = AutoModelForCausalLM.from_pretrained("gpt2")
8
- model.eval()
9
 
10
- # Define chat function
11
- def mediq_chat(message, history=[]):
12
- # You can optionally join past conversation to provide more context
13
- input_ids = tokenizer.encode(message, return_tensors="pt")
14
- with torch.no_grad():
15
- output = model.generate(
16
- input_ids,
17
- max_new_tokens=200,
18
- do_sample=True,
19
- temperature=0.7,
20
- top_p=0.9
21
- )
22
- response = tokenizer.decode(output[0], skip_special_tokens=True)
23
- return response
24
 
25
- # Create the Gradio chat interface
26
  gr.ChatInterface(
27
- fn=mediq_chat,
28
- title="🧠 MediQ - Clinical Reasoning Chatbot",
29
- description="Ask clinical questions. The model will respond based on its training.",
30
- theme="default"
31
  ).launch()
 
1
+ import openai
2
+ from keys import mykey
3
  import gradio as gr
 
 
4
 
5
+ openai.api_key = mykey["mediQ"]
 
 
 
6
 
7
+ def gpt_response(message, history=[]):
8
+ prompt = "\n".join(history + [message])
9
+ response = openai.ChatCompletion.create(
10
+ model="gpt-4",
11
+ messages=[
12
+ {"role": "system", "content": "You are a careful and experienced clinical reasoning expert."},
13
+ {"role": "user", "content": prompt}
14
+ ],
15
+ temperature=0.3,
16
+ max_tokens=300
17
+ )
18
+ reply = response["choices"][0]["message"]["content"]
19
+ return reply
 
20
 
 
21
  gr.ChatInterface(
22
+ fn=gpt_response,
23
+ title="🧠 MediQ GPT-4 Clinical Reasoning",
24
+ description="This private app uses GPT-4 to simulate adaptive question-asking in clinical reasoning."
 
25
  ).launch()