mediQ-chat / expert.py
Behathenimro's picture
Upload expert.py
3e304a7 verified
import random
import expert_functions
class Expert:
"""
Expert system skeleton
"""
def __init__(self, args, inquiry, options):
# Initialize the expert with necessary parameters and the initial context or inquiry
self.args = args
self.inquiry = inquiry
self.options = options
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
raise NotImplementedError
def ask_question(self, patient_state, prev_messages):
# Generate a question based on the current patient state
kwargs = {
"patient_state": patient_state,
"inquiry": self.inquiry,
"options_dict": self.options,
"messages": prev_messages,
"independent_modules": self.args.independent_modules,
"model_name": self.args.expert_model_question_generator,
"use_vllm": self.args.use_vllm,
"use_api": self.args.use_api,
"temperature": self.args.temperature,
"max_tokens": self.args.max_tokens,
"top_p": self.args.top_p,
"top_logprobs": self.args.top_logprobs,
"api_account": self.args.api_account
}
return expert_functions.question_generation(**kwargs)
def get_abstain_kwargs(self, patient_state):
kwargs = {
"max_depth": self.args.max_questions,
"patient_state": patient_state,
"rationale_generation": self.args.rationale_generation,
"inquiry": self.inquiry,
"options_dict": self.options,
"abstain_threshold": self.args.abstain_threshold,
"self_consistency": self.args.self_consistency,
"model_name": self.args.expert_model,
"use_vllm": self.args.use_vllm,
"use_api": self.args.use_api,
"temperature": self.args.temperature,
"max_tokens": self.args.max_tokens,
"top_p": self.args.top_p,
"top_logprobs": self.args.top_logprobs,
"api_account": self.args.api_account
}
return kwargs
class RandomExpert(Expert):
"""
Below is an example Expert system that randomly asks a question or makes a choice based on the current patient state.
This should be replaced with a more sophisticated expert system that can make informed decisions based on the patient state.
"""
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
initial_info = patient_state['initial_info'] # not use because it's random
history = patient_state['interaction_history'] # not use because it's random
# randomly decide to ask a question or make a choice
abstain = random.random() < 0.5
toy_question = "Can you describe your symptoms more?"
toy_decision = self.choice(patient_state)
conf_score = random.random()/2 if abstain else random.random()
return {
"type": "question" if abstain else "choice",
"question": toy_question,
"letter_choice": toy_decision,
"confidence": conf_score, # Optional confidence score
"urgent": True, # Example of another optional flag
"additional_info": "Check for any recent changes." # Any other optional data
}
def choice(self, patient_state):
# Generate a choice or intermediate decision based on the current patient state
# randomly choose an option
return random.choice(list(self.options.keys()))
class BasicExpert(Expert):
def respond(self, patient_state):
kwargs = self.get_abstain_kwargs(patient_state)
abstain_response_dict = expert_functions.implicit_abstention_decision(**kwargs)
return {
"type": "question" if abstain_response_dict["abstain"] else "choice",
"question": abstain_response_dict["atomic_question"],
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
class FixedExpert(Expert):
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
kwargs = self.get_abstain_kwargs(patient_state)
abstain_response_dict = expert_functions.fixed_abstention_decision(**kwargs)
if abstain_response_dict["abstain"] == False:
return {
"type": "choice",
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
question_response_dict = self.ask_question(patient_state, abstain_response_dict["messages"])
abstain_response_dict["usage"]["input_tokens"] += question_response_dict["usage"]["input_tokens"]
abstain_response_dict["usage"]["output_tokens"] += question_response_dict["usage"]["output_tokens"]
return {
"type": "question",
"question": question_response_dict["atomic_question"],
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
class BinaryExpert(Expert):
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
kwargs = self.get_abstain_kwargs(patient_state)
abstain_response_dict = expert_functions.binary_abstention_decision(**kwargs)
if abstain_response_dict["abstain"] == False:
return {
"type": "choice",
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
question_response_dict = self.ask_question(patient_state, abstain_response_dict["messages"])
abstain_response_dict["usage"]["input_tokens"] += question_response_dict["usage"]["input_tokens"]
abstain_response_dict["usage"]["output_tokens"] += question_response_dict["usage"]["output_tokens"]
return {
"type": "question",
"question": question_response_dict["atomic_question"],
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
class NumericalExpert(Expert):
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
kwargs = self.get_abstain_kwargs(patient_state)
abstain_response_dict = expert_functions.numerical_abstention_decision(**kwargs)
if abstain_response_dict["abstain"] == False:
return {
"type": "choice",
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
question_response_dict = self.ask_question(patient_state, abstain_response_dict["messages"])
abstain_response_dict["usage"]["input_tokens"] += question_response_dict["usage"]["input_tokens"]
abstain_response_dict["usage"]["output_tokens"] += question_response_dict["usage"]["output_tokens"]
return {
"type": "question",
"question": question_response_dict["atomic_question"],
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
class NumericalCutOffExpert(Expert):
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
kwargs = self.get_abstain_kwargs(patient_state)
abstain_response_dict = expert_functions.numcutoff_abstention_decision(**kwargs)
if abstain_response_dict["abstain"] == False:
return {
"type": "choice",
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
question_response_dict = self.ask_question(patient_state, abstain_response_dict["messages"])
abstain_response_dict["usage"]["input_tokens"] += question_response_dict["usage"]["input_tokens"]
abstain_response_dict["usage"]["output_tokens"] += question_response_dict["usage"]["output_tokens"]
return {
"type": "question",
"question": question_response_dict["atomic_question"],
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
class ScaleExpert(Expert):
def respond(self, patient_state):
# Decision-making based on the initial information, history of interactions, current inquiry, and options
kwargs = self.get_abstain_kwargs(patient_state)
abstain_response_dict = expert_functions.scale_abstention_decision(**kwargs)
if abstain_response_dict["abstain"] == False:
return {
"type": "choice",
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
question_response_dict = self.ask_question(patient_state, abstain_response_dict["messages"])
abstain_response_dict["usage"]["input_tokens"] += question_response_dict["usage"]["input_tokens"]
abstain_response_dict["usage"]["output_tokens"] += question_response_dict["usage"]["output_tokens"]
return {
"type": "question",
"question": question_response_dict["atomic_question"],
"letter_choice": abstain_response_dict["letter_choice"],
"confidence": abstain_response_dict["confidence"],
"usage": abstain_response_dict["usage"]
}
import openai
from keys import mykey
class GPTExpert:
def __init__(self):
openai.api_key = mykey["mediQ"]
def respond(self, patient_state):
# Build a prompt based on conversation history
history = patient_state.get("message_history", [])
prompt = "\n".join(history) + "\nWhat is your next question or final diagnosis?"
response = openai.ChatCompletion.create(
model="gpt-4", # Or "gpt-3.5-turbo" if you want
messages=[
{"role": "system", "content": "You are a careful and thorough clinical expert."},
{"role": "user", "content": prompt}
],
temperature=0.3,
max_tokens=200
)
return response["choices"][0]["message"]["content"]