Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from gradio_modal import Modal | |
| from huggingface_hub import hf_hub_download, list_repo_files | |
| import os, csv, datetime, sys | |
| import json | |
| from utils import format_chat, append_to_sheet, read_sheet_to_df | |
| import random | |
| import base64 | |
| import re | |
| def encode_image_to_base64(image_path): | |
| """Encodes an image file to a base64 string.""" | |
| try: | |
| with open(image_path, "rb") as image_file: | |
| encoded_string = base64.b64encode(image_file.read()).decode("utf-8") | |
| return encoded_string | |
| except FileNotFoundError: | |
| print(f"Error: Image file not found at {image_path}") | |
| return None | |
| # HTML file for first page | |
| html_file_path = "index.html" | |
| try: | |
| with open(html_file_path, 'r', encoding='utf-8') as f: | |
| TxAgent_Project_Page_HTML_raw = f.read() | |
| TxAgent_Project_Page_HTML = TxAgent_Project_Page_HTML_raw | |
| # Find all image paths matching the pattern | |
| image_path_pattern = r'static/images/([^"]*\.jpg)' | |
| image_paths = re.findall(image_path_pattern, TxAgent_Project_Page_HTML_raw) | |
| unique_image_paths = set(image_paths) | |
| # Encode each unique image and replace the paths | |
| for img_file in unique_image_paths: | |
| full_image_path = os.path.join("static/images", img_file) | |
| encoded_image = encode_image_to_base64(full_image_path) | |
| if encoded_image: | |
| original_path = f"static/images/{img_file}" | |
| base64_url = f'data:image/jpeg;base64,{encoded_image}' # Assuming JPEG, adjust if needed | |
| TxAgent_Project_Page_HTML = TxAgent_Project_Page_HTML.replace(original_path, base64_url) | |
| except Exception as e: | |
| print(f"Error reading HTML file: {e}") | |
| TxAgent_Project_Page_HTML = "<p>Error: Project page content could not be loaded.</p>" | |
| # Load tool lists | |
| fda_drug_labeling_tools_path = "fda_drug_labeling_tools.json" | |
| monarch_tools_path = "monarch_tools.json" | |
| opentarget_tools_path = "opentarget_tools.json" | |
| try: | |
| with open(fda_drug_labeling_tools_path, 'r') as f: | |
| fda_data = json.load(f) | |
| fda_drug_labeling_tools_list = [item['name'] for item in fda_data if 'name' in item] | |
| except Exception as e: | |
| print(f"Error processing {fda_drug_labeling_tools_path}: {e}") | |
| fda_drug_labeling_tools_list = ["Error loading FDA tools"] | |
| try: | |
| with open(monarch_tools_path, 'r') as f: | |
| monarch_data = json.load(f) | |
| monarch_tools_list = [item['name'] for item in monarch_data if 'name' in item] | |
| except Exception as e: | |
| print(f"Error processing {monarch_tools_path}: {e}") | |
| monarch_tools_list = ["Error loading Monarch tools"] | |
| try: | |
| with open(opentarget_tools_path, 'r') as f: | |
| opentarget_data = json.load(f) | |
| opentarget_tools_list = [item['name'] for item in opentarget_data if 'name' in item] | |
| except Exception as e: | |
| print(f"Error processing {opentarget_tools_path}: {e}") | |
| opentarget_tools_list = ["Error loading OpenTarget tools"] | |
| #for labeling the different tool calls in format_chat | |
| tool_database_labels = { | |
| "**from approved FDA drug labels**": fda_drug_labeling_tools_list, | |
| "**from the Monarch Initiative databases**": monarch_tools_list, | |
| "**from the Open Targets database**": opentarget_tools_list, | |
| } | |
| # Define the six evaluation criteria as a list of dictionaries. | |
| criteria = [ | |
| { | |
| "label": "Problem Resolution", | |
| "text": ( | |
| "Problem Resolution: Did the model effectively solve the problem?", | |
| "1️⃣ Did Not Solve the Problem at All. 2️⃣ Attempted to Solve but Largely Incorrect or Incomplete. 3️⃣ Partially Solved the Problem, but with Limitations. 4️⃣ Mostly Solved the Problem, with Minor Issues. 5️⃣ Completely and Effectively Solved the Problem." | |
| ) | |
| }, | |
| { | |
| "label": "Helpfulness", | |
| "text": ( | |
| "Helpfulness: Was the answer and reasoning provided helpful in addressing the question?", | |
| "1️⃣ Not Helpful at All. 2️⃣ Slightly Helpful, but Largely Insufficient. 3️⃣ Moderately Helpful, but Needs Improvement. 4️⃣ Helpful and Mostly Clear, with Minor Issues. 5️⃣ Extremely Helpful and Comprehensive." | |
| ) | |
| }, | |
| { | |
| "label": "Scientific Consensus", | |
| "text": ( | |
| "Clinical Consensus: Does the answer align with established scientific and clinical consensus?", | |
| "1️⃣ Completely Misaligned with Clinical Consensus. 2️⃣ Partially Aligned but Contains Significant Inaccuracies or Misinterpretations. 3️⃣ Generally Aligned but Lacks Rigor or Clarity. 4️⃣ Mostly Aligned with Clinical Consensus, with Minor Omissions or Uncertainties. 5️⃣ Fully Aligned with Established Clinical Consensus." | |
| ) | |
| }, | |
| { | |
| "label": "Accuracy", | |
| "text": ( | |
| "Accuracy of Content: Is there any incorrect or irrelevant content in the answer and the reasoning content?", | |
| "1️⃣ Completely Inaccurate or Irrelevant. 2️⃣ Mostly Inaccurate, with Some Relevant Elements. 3️⃣ Partially Accurate, but Includes Some Errors or Omissions. 4️⃣ Mostly Accurate, with Minor Issues or Unverified Claims. 5️⃣ Completely Accurate and Relevant." | |
| ) | |
| }, | |
| { | |
| "label": "Completeness", | |
| "text": ( | |
| "Completeness: Did the answer omit any essential content necessary for a comprehensive response?", | |
| "1️⃣ Severely Incomplete – Major Content Omissions. 2️⃣ Largely Incomplete – Missing Key Elements. 3️⃣ Somewhat Complete – Covers Basics but Lacks Depth. 4️⃣ Mostly Complete – Minor Omissions or Gaps. 5️⃣ Fully Complete – No Important Omissions." | |
| ) | |
| }, | |
| ] | |
| criteria_for_comparison = [ | |
| { | |
| "label": "Problem Resolution", | |
| "text": ( | |
| "Problem Resolution: Did the model effectively solve the problem?<br>" | |
| ) | |
| }, | |
| { | |
| "label": "Helpfulness", | |
| "text": ( | |
| "Helpfulness: Was the answer and reasoning provided helpful in addressing the question?<br>" | |
| ) | |
| }, | |
| { | |
| "label": "Scientific Consensus", | |
| "text": ( | |
| "Scientific and Clinical Consensus: Does the answer align with established scientific and clinical consensus?<br>" | |
| ) | |
| }, | |
| { | |
| "label": "Accuracy", | |
| "text": ( | |
| "Accuracy of Content: Is there any incorrect or irrelevant content in the answer and the reasoning content?<br>" | |
| ) | |
| }, | |
| { | |
| "label": "Completeness", | |
| "text": ( | |
| "Completeness: Did the answer omit any essential content necessary for a comprehensive response?<br>" | |
| ) | |
| }, | |
| ] | |
| mapping = { #for pairwise mapping between model comparison selections | |
| "👈 Model A": "A", | |
| "👉 Model B": "B", | |
| "🤝 Tie": "tie", | |
| "👎 Neither model did well": "neither" | |
| } | |
| #Prepare data | |
| REPO_ID = "RichardZhu52/TxAgent_human_eval" | |
| CROWDSOURCING_DATA_DIRECTORY = "crowdsourcing_eval_data_0430" | |
| TXAGENT_RESULTS_SHEET_BASE_NAME = "TxAgent_Human_Eval_Results" | |
| DISEASE_SPECIALTY_MAP_FILENAME = "disease_specialty_map.json" | |
| QUESTION_MAP_FILENAME = "question_map.json" | |
| def get_evaluator_questions(evaluator_id, all_files, evaluator_directory, question_map): | |
| # Filter to only the files in that directory | |
| evaluator_files = [f for f in all_files if f.startswith(f"{evaluator_directory}/")] | |
| data_by_filename = {} | |
| for remote_path in evaluator_files: | |
| local_path = hf_hub_download( | |
| repo_id=REPO_ID, | |
| repo_type="dataset", | |
| revision="main", #fetches the most recent version of the dataset each time this command is called | |
| filename=remote_path, | |
| # force_download=True, | |
| ) | |
| with open(local_path, "r") as f: | |
| model_name_key = os.path.basename(remote_path).replace('.json', '') | |
| data_by_filename[model_name_key] = json.load(f) | |
| #FINALLY, MAKE SURE THEY DIDNT ALREADY FILL IT OUT. Must go through every tuple of (question_ID, TxAgent, other model) where other model could be any of the other files in data_by_filename | |
| model_names = [key for key in data_by_filename.keys() if key != 'txagent'] | |
| evaluator_question_ids = question_map.get(evaluator_id).get('question_ids') | |
| full_question_ids_list = [] | |
| for other_model_name in model_names: | |
| for q_id in evaluator_question_ids: | |
| full_question_ids_list.append((q_id, other_model_name)) | |
| results_df = read_sheet_to_df(custom_sheet_name=str(TXAGENT_RESULTS_SHEET_BASE_NAME + f"_{str(evaluator_id)}")) | |
| if (results_df is not None) and (not results_df.empty): | |
| # collect all (question_ID, other_model) pairs already seen | |
| matched_pairs = set() | |
| for _, row in results_df.iterrows(): | |
| q = row["Question ID"] | |
| # pick whichever response isn’t 'txagent' | |
| a, b = row["ResponseA_Model"], row["ResponseB_Model"] | |
| if a == "txagent" and b != "txagent": | |
| matched_pairs.add((q, b)) | |
| elif b == "txagent" and a != "txagent": | |
| matched_pairs.add((q, a)) | |
| # filter out any tuple whose (q_id, other_model) was already matched | |
| full_question_ids_list = [ | |
| (q_id, other_model) | |
| for (q_id, other_model) in full_question_ids_list | |
| if (q_id, other_model) not in matched_pairs | |
| ] | |
| print(f"Filtered question IDs: {full_question_ids_list}") | |
| print(f"Length of filtered question IDs: {len(full_question_ids_list)}") | |
| return full_question_ids_list, data_by_filename | |
| def go_to_page0_from_minus1(): | |
| return gr.update(visible=False), gr.update(visible=True) | |
| def go_to_eval_progress_modal(name, email, evaluator_id, specialty_dd, subspecialty_dd, years_exp_radio, exp_explanation_tb, npi_id): | |
| # ADDED: Validate that name and email are non-empty before proceeding | |
| if not name or not email or not evaluator_id or not specialty_dd or not years_exp_radio: | |
| return gr.update(visible=True), gr.update(visible=False), None, "Please fill out all the required fields (name, email, evaluator ID, specialty, years of experience). If you are not a licensed physician with a specific specialty, please choose the specialty that most closely aligns with your biomedical expertise.", gr.Chatbot(), gr.Chatbot(), gr.HTML(),gr.State(),gr.update(visible=False), "" | |
| question_map_path = hf_hub_download( | |
| repo_id=REPO_ID, | |
| filename=QUESTION_MAP_FILENAME, | |
| repo_type="dataset", # or omit if it's a Model/Space | |
| # force_download=True, # ← always fetch new copy | |
| revision="main" # branch/tag/commit, fetches the most recent version of the dataset each time this command is called | |
| ) | |
| # Load the question map from the downloaded file | |
| with open(question_map_path, 'r') as f: | |
| question_map = json.load(f) | |
| #retrieve data from HF | |
| evaluator_directory = question_map.get(evaluator_id, {}).get('evaluator_name', None) | |
| if evaluator_directory is None: | |
| return gr.update(visible=True), gr.update(visible=False), None, "Invalid Evaluator ID, please try again.", gr.Chatbot(), gr.Chatbot(), gr.HTML(),gr.State(),gr.update(visible=False),"" | |
| all_files = list_repo_files( | |
| repo_id=REPO_ID, | |
| repo_type="dataset", | |
| revision="main", | |
| ) | |
| full_question_ids_list, data_by_filename = get_evaluator_questions(evaluator_id, all_files, evaluator_directory, question_map) | |
| if len(full_question_ids_list) == 0: | |
| return gr.update(visible=True), gr.update(visible=False), None, "Based on your submitted data, you have no more questions to evaluate. You may exit the page; we will follow-up if we require anything else from you. Thank you!", gr.Chatbot(), gr.Chatbot(), gr.HTML(),gr.State(),gr.update(visible=False),"" | |
| full_question_ids_list = sorted(full_question_ids_list, key=lambda x: str(x[0])+str(x[1])) | |
| #selected question is the first element | |
| q_id, other_model_name = full_question_ids_list[0] | |
| #Constructing question_for_eval, the question to evaluate this round | |
| txagent_matched_entry = next( | |
| (entry for entry in data_by_filename['txagent'] if entry.get("question_ID") == q_id), | |
| None | |
| ) | |
| other_model_matched_entry = next( | |
| (entry for entry in data_by_filename[other_model_name] if entry.get("question_ID") == q_id), | |
| None | |
| ) | |
| models_list = [ | |
| { | |
| "model": "txagent", | |
| "reasoning_trace": txagent_matched_entry.get("solution") | |
| }, | |
| { | |
| "model": other_model_name, | |
| "reasoning_trace": other_model_matched_entry.get("solution") | |
| } | |
| ] | |
| random.shuffle(models_list) | |
| question_for_eval = { | |
| "question": txagent_matched_entry.get("question"), | |
| "question_ID": q_id, | |
| "models": models_list, | |
| } | |
| #update user_info | |
| user_info = (name, email, specialty_dd, subspecialty_dd, years_exp_radio, exp_explanation_tb, npi_id, q_id, evaluator_id) | |
| chat_A_value = format_chat(question_for_eval['models'][0]['reasoning_trace'], tool_database_labels) | |
| chat_B_value = format_chat(question_for_eval['models'][1]['reasoning_trace'], tool_database_labels) | |
| prompt_text = question_for_eval['question'] | |
| # Construct the question-specific elements of the pairwise rating page (page 1) | |
| page1_prompt = gr.HTML(f'<div style="background-color: #FFEFD5; border: 2px solid #FF8C00; padding: 10px; border-radius: 5px; color: black;"><strong style="color: black;">Prompt:</strong> {prompt_text}</div>') | |
| chat_a = gr.Chatbot( | |
| value=chat_A_value, | |
| type="messages", | |
| height=400, | |
| label="Model A Response", | |
| show_copy_button=False, | |
| show_label=True, | |
| render_markdown=True, # Required for markdown/HTML support in messages | |
| avatar_images=None, # Optional: omit user/assistant icons | |
| rtl=False | |
| ) | |
| chat_b = gr.Chatbot( | |
| value=chat_B_value, | |
| type="messages", | |
| height=400, | |
| label="Model B Response", | |
| show_copy_button=False, | |
| show_label=True, | |
| render_markdown=True, # Required for markdown/HTML support in messages | |
| avatar_images=None, # Optional: omit user/assistant icons | |
| rtl=False | |
| ) | |
| return gr.update(visible=True), gr.update(visible=False), user_info,"", chat_a, chat_b, page1_prompt, question_for_eval, gr.update(visible=True), f"You are about to evaluate the next question. You have {len(full_question_ids_list)} question(s) remaining to evaluate." | |
| #goes to page 1 from confirmation modal that tells users how many questions they have left to evaluate | |
| def go_to_page1(): | |
| """ | |
| Shows page 1 | |
| """ | |
| # Return updates to hide modal, hide page 0, show page 1, populate page 1, and set final state | |
| updates = [ | |
| gr.update(visible=False), | |
| gr.update(visible=False), | |
| gr.update(visible=True), | |
| ] | |
| return updates | |
| # Callback to transition from Page 1 to Page 2. | |
| def go_to_page2(data_subset_state,*pairwise_values): | |
| # pairwise_values is a tuple of values from each radio input. | |
| criteria_count = len(criteria_for_comparison) | |
| pairwise_list = list(pairwise_values[:criteria_count]) | |
| comparison_reasons_list = list(pairwise_values[criteria_count:]) | |
| #gradio components to display previous page results on next page | |
| pairwise_results_for_display = [gr.Markdown(f"***As a reminder, your pairwise comparison answer for this criterion was: {pairwise_list[i]}. Your answer choices will be restricted based on your comparison answer, but you may go back and change the comparison answer if you wish.***") for i in range(len(criteria))] | |
| if any(answer is None for answer in pairwise_list): | |
| return gr.update(visible=True), gr.update(visible=False), None, None, "Error: Please select an option for every pairwise comparison.", gr.Chatbot(), gr.Chatbot(), gr.HTML(), *pairwise_results_for_display | |
| chat_A_value = format_chat(data_subset_state['models'][0]['reasoning_trace'], tool_database_labels) | |
| chat_B_value = format_chat(data_subset_state['models'][1]['reasoning_trace'], tool_database_labels) | |
| prompt_text = data_subset_state['question'] | |
| # Construct the question-specific elements of the rating page (page 2) | |
| chat_A_rating = gr.Chatbot( | |
| value=chat_A_value, | |
| type="messages", | |
| height=400, | |
| label="Model A Response", | |
| show_copy_button=False, | |
| render_markdown=True | |
| ) | |
| chat_B_rating = gr.Chatbot( | |
| value=chat_B_value, | |
| type="messages", | |
| height=400, | |
| label="Model B Response", | |
| show_copy_button=False, | |
| render_markdown=True | |
| ) | |
| page2_prompt = gr.HTML(f'<div style="background-color: #FFEFD5; border: 2px solid #FF8C00; padding: 10px; border-radius: 5px; color: black;"><strong style="color: black;">Prompt:</strong> {prompt_text}</div>') | |
| return gr.update(visible=False), gr.update(visible=True), pairwise_list, comparison_reasons_list, "", chat_A_rating, chat_B_rating, page2_prompt, *pairwise_results_for_display | |
| # Callback to store scores for Response A. | |
| def store_A_scores(*args): | |
| # Unpack the arguments: first half are scores, second half are checkboxes. | |
| num = len(args) // 2 | |
| scores = list(args[:num]) | |
| unquals = list(args[num:]) | |
| return scores, unquals | |
| # Callback to transition from Page 2 to Page 3. | |
| def go_to_page3(): | |
| return gr.update(visible=False), gr.update(visible=True) | |
| # Updated validation callback that ignores criteria with 'Unable to Judge' | |
| def validate_ratings(pairwise_choices, *args): | |
| num_criteria = len(criteria) | |
| ratings_A_list = list(args[:num_criteria]) | |
| ratings_B_list = list(args[num_criteria:]) | |
| if any(r is None for r in ratings_A_list) or any(r is None for r in ratings_B_list): | |
| return "Error: Please provide ratings for both responses for every criterion.", "Error: Please provide ratings for both responses for every criterion." | |
| error_msgs = [] | |
| for i, choice in enumerate(pairwise_choices): | |
| score_a = ratings_A_list[i] | |
| score_b = ratings_B_list[i] | |
| # Skip criteria if either rating is "Unable to Judge" | |
| if score_a == "Unable to Judge" or score_b == "Unable to Judge": | |
| continue | |
| # Convert string scores to integers for comparison. | |
| score_a = int(score_a) | |
| score_b = int(score_b) | |
| if choice == "👈 Model A" and score_a < score_b: | |
| error_msgs.append(f"Criterion {i+1} ({criteria[i]['label']}): You selected A as better but scored A lower than B.") | |
| elif choice == "👉 Model B" and score_b < score_a: | |
| error_msgs.append(f"Criterion {i+1} ({criteria[i]['label']}): You selected B as better but scored B lower than A.") | |
| elif choice == "🤝 Tie" and score_a != score_b: | |
| error_msgs.append(f"Criterion {i+1} ({criteria[i]['label']}): You selected Tie but scored A and B differently.") | |
| if error_msgs: | |
| err_str = "\n".join(error_msgs) | |
| return err_str, err_str | |
| else: | |
| return "No errors in responses; feel free to submit!", "No errors in responses; feel free to submit!" | |
| # # Additional callback to handle submission results. | |
| def toggle_slider(is_unqualified): | |
| # When the checkbox is checked (True), set interactive to False to disable the slider. | |
| return gr.update(interactive=not is_unqualified) | |
| centered_col_css = """ | |
| #centered-column { | |
| margin-left: auto; | |
| margin-right: auto; | |
| max-width: 800px; /* Adjust this width as desired */ | |
| width: 100%; | |
| } | |
| #participate-btn { | |
| background-color: purple !important; | |
| color: white !important; | |
| border-color: purple !important; | |
| } | |
| #clear_btn { | |
| background-color: #F08080 !important; | |
| color: white !important; | |
| border-color: #F08080 !important; | |
| } | |
| """ | |
| with gr.Blocks(css=centered_col_css) as demo: | |
| # States to save information between pages. | |
| user_info_state = gr.State() | |
| pairwise_state = gr.State() | |
| scores_A_state = gr.State() | |
| comparison_reasons = gr.State() | |
| unqualified_A_state = gr.State() | |
| data_subset_state = gr.State() | |
| # Load specialty data | |
| specialties_path = "specialties.json" | |
| subspecialties_path = "subspecialties.json" | |
| try: | |
| with open(specialties_path, 'r') as f: | |
| specialties_list = json.load(f) | |
| with open(subspecialties_path, 'r') as f: | |
| subspecialties_list = json.load(f) | |
| except FileNotFoundError: | |
| print(f"Error: Could not find specialty files at {specialties_path} or {subspecialties_path}. Please ensure these files exist.") | |
| # Provide default empty lists or handle the error as appropriate | |
| specialties_list = ["Error loading specialties"] | |
| subspecialties_list = ["Error loading subspecialties"] | |
| except json.JSONDecodeError: | |
| print(f"Error: Could not parse JSON from specialty files.") | |
| specialties_list = ["Error parsing specialties"] | |
| subspecialties_list = ["Error parsing subspecialties"] | |
| # Page -1: Page to link them to question submission form or evaluation portal | |
| with gr.Column(visible=True, elem_id="page-1") as page_minus1: | |
| gr.HTML(""" | |
| <div> | |
| <h1>TxAgent Evaluation Portal</h1> | |
| <p>Welcome to the TxAgent Evaluation Portal.</p> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| participate_eval_btn = gr.Button( | |
| value="🌟 Participate in TxAgent Evaluation 🌟", | |
| variant="primary", | |
| size="lg", | |
| elem_id="participate-btn" | |
| ) | |
| gr.HTML(TxAgent_Project_Page_HTML) | |
| # Page 0: Welcome / Informational page. | |
| with gr.Column(visible=False, elem_id="page0") as page0: | |
| gr.Markdown("## Welcome to the TxAgent Evalution Study!") | |
| gr.Markdown("Please read the following instructions and then enter your information to begin:") | |
| # Existing informational markdown... | |
| gr.Markdown(""" | |
| - Each session requires a minimum commitment of 5-10 minutes to complete one question. | |
| - If you wish to evaluate multiple questions, you may do so; you will never be asked to re-evaluate questions you have already seen. | |
| - When evaluating a question, you will be asked to compare the responses of two different models to the question and then rate each model's response on a scale of 1-5. | |
| - You may use the Back and Next buttons at the bottom of each page to edit any of your responses before submitting. | |
| - You may use the Instruction Page and Home Page buttons at the bottom of each page to return to this page or the home page. Your progress will be saved but not submitted. | |
| - You must submit your answers to the current question before moving on to evaluate the next question. | |
| - You may stop in between questions and return at a later time; however, you must submit your answers to the current question if you would like them saved. | |
| By clicking 'Next' below, you will start the study, with your progress saved after submitting each question. If you have any other questions or concerns, please contact us directly. Thank you for your participation! | |
| """) | |
| gr.Markdown("## Please enter your information to get a question to evaluate. Please use the same email every time you log onto this evaluation portal, as we use your email to prevent showing repeat questions.") | |
| name = gr.Textbox(label="Name (required)") | |
| email = gr.Textbox(label="Email (required). Please use the same email every time you log onto this evaluation portal, as we use your email to prevent showing repeat questions.") | |
| evaluator_id = gr.Textbox(label="Evaluator ID (required). This is the four-digit ID you received from us for the evaluation study. If you do not have an Evaluator ID or are unsure about your Evaluator ID, please contact us.") | |
| specialty_dd = gr.Dropdown(choices=specialties_list, label="Primary Medical Specialty (required). Go to https://www.abms.org/member-boards/specialty-subspecialty-certificates/ for categorization)", multiselect=True) | |
| subspecialty_dd = gr.Dropdown(choices=subspecialties_list, label="Subspecialty (if applicable). Go to https://www.abms.org/member-boards/specialty-subspecialty-certificates/ for categorization)", multiselect=True) | |
| npi_id = gr.Textbox(label="National Provider Identifier ID (optional). Got to https://npiregistry.cms.hhs.gov/search to search for your NPI ID. If you do not have an NPI ID, please leave this blank.") | |
| years_exp_radio = gr.Radio( | |
| choices=["0-2 years", "3-5 years", "6-10 years", "11-20 years", "20+ years", "Not Applicable"], | |
| label="How many years have you been involved in clinical and/or research activities related to your biomedical area of expertise? (required)" | |
| ) | |
| exp_explanation_tb = gr.Textbox(label="Please briefly explain your expertise/experience relevant to evaluating AI for clinical decision support (optional)") | |
| page0_error_box = gr.Markdown("") | |
| with gr.Row(): | |
| next_btn_0 = gr.Button("Next") | |
| with gr.Row(): | |
| home_btn_0 = gr.Button("Home (your registration info will be saved)") | |
| with Modal(visible=False) as eval_progress_modal: | |
| eval_progress_text = gr.Markdown("You have X questions remaining.") | |
| eval_progress_proceed_btn = gr.Button("OK, proceed to question evaluation") | |
| # Page 1: Pairwise Comparison. | |
| with gr.Column(visible=False) as page1: | |
| gr.Markdown("## Part 1/2: Pairwise Comparison") #Make the number controlled by question indexing! | |
| page1_prompt = gr.HTML() | |
| with gr.Row(): | |
| # ADDED: Use gr.Chatbot to display the scrollable chat window for Response A. | |
| with gr.Column(): | |
| gr.Markdown("**Model A Response:**") # Already bold label. | |
| chat_a = gr.Chatbot( | |
| value=[], # Placeholder for chat history | |
| type="messages", | |
| height=400, | |
| label="Model A Response", | |
| show_copy_button=False, | |
| show_label=True, | |
| render_markdown=True, # Required for markdown/HTML support in messages | |
| avatar_images=None, # Optional: omit user/assistant icons | |
| rtl=False | |
| ) | |
| # ADDED: Use gr.Chatbot to display the scrollable chat window for Response B. | |
| with gr.Column(): | |
| gr.Markdown("**Model B Response:**") | |
| chat_b = gr.Chatbot( | |
| value=[], | |
| type="messages", | |
| height=400, | |
| label="Model B Response", | |
| show_copy_button=False, | |
| show_label=True, | |
| render_markdown=True, # Required for markdown/HTML support in messages | |
| avatar_images=None, # Optional: omit user/assistant icons | |
| rtl=False | |
| ) | |
| gr.Markdown("<br><br>") | |
| gr.Markdown("### For each criterion, select which response did better:") | |
| comparison_reasons_inputs = [] # ADDED: list to store the free-text inputs | |
| pairwise_inputs = [] | |
| for crit in criteria_for_comparison: | |
| with gr.Row(): | |
| gr.Markdown(crit['text']) | |
| radio = gr.Radio( | |
| choices=[ | |
| "👈 Model A", # A | |
| "👉 Model B", # B | |
| "🤝 Tie", # tie | |
| "👎 Neither model did well" # neither | |
| ], | |
| label="Which is better?" | |
| ) | |
| pairwise_inputs.append(radio) | |
| # ADDED: free text under each comparison | |
| text_input = gr.Textbox(label=f"Reasons for your selection (optional)") | |
| comparison_reasons_inputs.append(text_input) | |
| page1_error_box = gr.Markdown("") # ADDED: display validation errors | |
| with gr.Row(): | |
| back_btn_0 = gr.Button("Back") | |
| next_btn_1 = gr.Button("Next: Rate Responses") | |
| with gr.Row(): | |
| home_btn_1 = gr.Button("Home Page (your progress on this question will be saved but not submitted)") # ADDED: Home button on page11 | |
| # Page 2: Combined Rating Page for both responses. | |
| with gr.Column(visible=False) as page2: | |
| gr.Markdown("## Part 2/2: Rate Model Responses") | |
| # ### EDIT: Show a highlighted prompt as on previous pages. | |
| page2_prompt = gr.HTML() | |
| # ### EDIT: Display both responses side-by-side using Chatbot windows. | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("**Model A Response:**") | |
| chat_a_rating = gr.Chatbot( | |
| value=[], | |
| type="messages", | |
| height=400, | |
| label="Model A Response", | |
| show_copy_button=False, | |
| render_markdown=True | |
| ) | |
| with gr.Column(): | |
| gr.Markdown("**Model B Response:**") | |
| chat_b_rating = gr.Chatbot( | |
| value=[], | |
| type="messages", | |
| height=400, | |
| label="Model B Response", | |
| show_copy_button=False, | |
| render_markdown=True | |
| ) | |
| gr.Markdown("<br><br>") | |
| gr.Markdown("### For each criterion, select your ratings for each model response:") | |
| # ### EDIT: For each criterion, create a row with two multiple-choice sets (left: Response A, right: Response B) separated by a border. | |
| ratings_A = [] # to store the radio components for response A | |
| ratings_B = [] # to store the radio components for response B | |
| def restrict_choices(pairwise_list, index, score_a, score_b): | |
| """ | |
| Returns (update_for_A, update_for_B). | |
| Enforces rating constraints based on the pairwise choice for the given criterion index. | |
| """ | |
| # Get the specific pairwise choice for this criterion using the index | |
| # Add error handling in case the state/list is not ready or index is wrong | |
| if not pairwise_list or index >= len(pairwise_list): | |
| pairwise_choice = None | |
| else: | |
| pairwise_choice = pairwise_list[index] | |
| base = ["1","2","3","4","5","Unable to Judge"] | |
| # Default: no restrictions unless explicitly set | |
| upd_A = gr.update(choices=base) | |
| upd_B = gr.update(choices=base) | |
| # Skip if no meaningful pairwise choice or either score is "Unable to Judge" | |
| if pairwise_choice is None or pairwise_choice == "👎 Neither model did well" or (score_a is None and score_b is None): | |
| # If one score is UJ but the other isn't, AND it's a Tie, we might still want to restrict the non-UJ one later? | |
| # For now, keep it simple: if either is UJ or choice is Neither/None, don't restrict. | |
| return upd_A, upd_B | |
| # Helper to parse int safely | |
| def to_int(x): | |
| try: return int(x) | |
| except (ValueError, TypeError): return None | |
| a_int = to_int(score_a) | |
| b_int = to_int(score_b) | |
| # --- Apply Restrictions --- | |
| if pairwise_choice == "👈 Model A": | |
| # B must be ≤ A (if A is numeric) | |
| if a_int is not None: #it is None if unable to judge | |
| allowed_b_choices = [str(i) for i in range(1, a_int + 1)] + ["Unable to Judge"] | |
| current_b = score_b if score_b in allowed_b_choices else None # Keep current valid choice | |
| upd_B = gr.update(choices=allowed_b_choices, value=current_b) | |
| # If A is UJ or non-numeric, B is unrestricted by this rule | |
| # else: upd_B remains gr.update(choices=base) | |
| if b_int is not None: | |
| # A must be >= B (if B is numeric) | |
| allowed_a_choices = [str(i) for i in range(b_int, 6)] + ["Unable to Judge"] | |
| current_a = score_a if score_a in allowed_a_choices else None # Keep current valid choice | |
| upd_A = gr.update(choices=allowed_a_choices, value=current_a) | |
| # If B is UJ or non-numeric, A is unrestricted by this rule | |
| # else: upd_A remains gr.update(choices=base) | |
| elif pairwise_choice == "👉 Model B": | |
| # A must be ≤ B (if B is numeric) | |
| if b_int is not None: | |
| allowed_a_choices = [str(i) for i in range(1, b_int + 1)] + ["Unable to Judge"] | |
| current_a = score_a if score_a in allowed_a_choices else None # Keep current valid choice | |
| upd_A = gr.update(choices=allowed_a_choices, value=current_a) | |
| # If B is UJ or non-numeric, A is unrestricted by this rule | |
| # else: upd_A remains gr.update(choices=base) | |
| if a_int is not None: | |
| # B must be >= A (if A is numeric) | |
| allowed_b_choices = [str(i) for i in range(a_int, 6)] + ["Unable to Judge"] | |
| current_b = score_b if score_b in allowed_b_choices else None # Keep current valid choice | |
| upd_B = gr.update(choices=allowed_b_choices, value=current_b) | |
| # If A is UJ or non-numeric, B is unrestricted by this rule | |
| # else: upd_B remains gr.update(choices=base) | |
| elif pairwise_choice == "🤝 Tie": | |
| # If both are numeric, they must match. Enforce based on the one that *just changed*. | |
| # If one changes to numeric, force the other (if also numeric) to match. | |
| # If one changes to UJ, the other is unrestricted. | |
| if a_int is not None: | |
| upd_B = gr.update(choices=[score_a]) | |
| elif score_a == "Unable to Judge": | |
| upd_B = gr.update(choices=["Unable to Judge"]) | |
| if b_int is not None: | |
| upd_A = gr.update(choices=[score_b]) | |
| elif score_b == "Unable to Judge": | |
| upd_A = gr.update(choices=["Unable to Judge"]) | |
| return upd_A, upd_B | |
| def clear_selection(): | |
| return None, None | |
| pairwise_results_for_display = [gr.Markdown(render=False) for _ in range(len(criteria))] | |
| indices_for_change = [] | |
| for i, crit in enumerate(criteria): | |
| index_component = gr.Number(value=i, visible=False, interactive=False) | |
| indices_for_change.append(index_component) | |
| with gr.Column(elem_id="centered-column"): | |
| gr.Markdown(f'<div style="text-align: left;">{crit["text"][0]}</div>') | |
| gr.Markdown(f'<div style="text-align: left;">{crit["text"][1]}</div>') | |
| pairwise_results_for_display[i].render() | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| rating_a = gr.Radio(choices=["1", "2", "3", "4", "5", "Unable to Judge"], | |
| label=f"Score for Response A - {crit['label']}", | |
| interactive=True) | |
| with gr.Column(scale=1): | |
| rating_b = gr.Radio(choices=["1", "2", "3", "4", "5", "Unable to Judge"], | |
| label=f"Score for Response B - {crit['label']}", | |
| interactive=True) | |
| with gr.Row(): | |
| clear_btn = gr.Button("Clear Selection", size="sm",elem_id="clear_btn") | |
| clear_btn.click(fn=clear_selection, outputs=[rating_a,rating_b]) | |
| # wire each to re‐restrict the other on change | |
| rating_a.change( | |
| fn=restrict_choices, | |
| inputs=[ pairwise_state, index_component, rating_a, rating_b ], | |
| outputs=[ rating_a, rating_b ] | |
| ) | |
| rating_b.change( | |
| fn=restrict_choices, | |
| inputs=[ pairwise_state, index_component, rating_a, rating_b ], | |
| outputs=[ rating_a, rating_b ] | |
| ) | |
| ratings_A.append(rating_a) | |
| ratings_B.append(rating_b) | |
| with gr.Row(): | |
| back_btn_2 = gr.Button("Back") | |
| submit_btn = gr.Button("Submit (Note: Once submitted, you cannot edit your responses)", elem_id="submit_btn") | |
| with gr.Row(): | |
| home_btn_2 = gr.Button("Home Page (your progress on this question will be saved but not submitted)") | |
| result_text = gr.Textbox(label="Validation Result") | |
| # Final Page: Thank you message. | |
| with gr.Column(visible=False, elem_id="final_page") as final_page: | |
| gr.Markdown("## You have no questions left to evaluate. Thank you for your participation!") | |
| # Error Modal: For displaying validation errors. | |
| with Modal("Error", visible=False, elem_id="error_modal") as error_modal: | |
| error_message_box = gr.Markdown() | |
| ok_btn = gr.Button("OK") | |
| # Clicking OK hides the modal. | |
| ok_btn.click(lambda: gr.update(visible=False), None, error_modal) | |
| # Confirmation Modal: Ask for final submission confirmation. | |
| with Modal("Confirm Submission", visible=False, elem_id="confirm_modal") as confirm_modal: | |
| gr.Markdown("Are you sure you want to submit? Once submitted, you cannot edit your responses.") | |
| with gr.Row(): | |
| yes_btn = gr.Button("Yes, please submit") | |
| cancel_btn = gr.Button("Cancel") | |
| # --- Define Callback Functions for Confirmation Flow --- | |
| def build_row_dict(data_subset_state, user_info, pairwise, comparisons_reasons, *args): | |
| num_criteria = len(criteria) | |
| ratings_A_vals = list(args[:num_criteria]) | |
| ratings_B_vals = list(args[num_criteria:]) | |
| prompt_text = data_subset_state['question'] | |
| response_A_model = data_subset_state['models'][0]['model'] | |
| response_B_model = data_subset_state['models'][1]['model'] | |
| timestamp = datetime.datetime.now().isoformat() | |
| row = { | |
| "Timestamp": timestamp, | |
| "Name": user_info[0], | |
| "Email": user_info[1], | |
| "Evaluator ID": user_info[8], | |
| "Specialty": str(user_info[2]), | |
| "Subspecialty": str(user_info[3]), | |
| "Years of Experience": user_info[4], | |
| "Experience Explanation": user_info[5], | |
| "NPI ID": user_info[6], | |
| "Question ID": user_info[7], | |
| "Prompt": prompt_text, | |
| "ResponseA_Model": response_A_model, | |
| "ResponseB_Model": response_B_model, | |
| } | |
| pairwise = [mapping.get(val, val) for val in pairwise] | |
| for i, crit in enumerate(criteria): | |
| label = crit['label'] | |
| row[f"Criterion_{label} Comparison: Which is Better?"] = pairwise[i] | |
| row[f"Criterion_{label} Comments"] = comparisons_reasons[i] | |
| row[f"ScoreA_{label}"] = ratings_A_vals[i] | |
| row[f"ScoreB_{label}"] = ratings_B_vals[i] | |
| return row | |
| # def final_submit(data_subset_state, user_info, pairwise, comparisons_reasons, *args): | |
| # row_dict = build_row_dict(data_subset_state, user_info, pairwise, comparisons_reasons, *args) | |
| # append_to_sheet(user_data=None, custom_row_dict=row_dict, custom_sheet_name=str(TXAGENT_RESULTS_SHEET_BASE_NAME), add_header_when_create_sheet=True) | |
| # return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False) | |
| def final_submit(data_subset_state, user_info, pairwise, comparisons_reasons, *args): | |
| # --- Part 1: Submit the current results (Existing Logic) --- | |
| row_dict = build_row_dict(data_subset_state, user_info, pairwise, comparisons_reasons, *args) | |
| _, _, _, _, _, _, _, _, evaluator_id = user_info | |
| append_to_sheet(user_data=None, custom_row_dict=row_dict, custom_sheet_name=str(TXAGENT_RESULTS_SHEET_BASE_NAME + f"_{evaluator_id}"), add_header_when_create_sheet=True) | |
| # --- Part 2: Recalculate remaining questions (Existing Logic + Modified Error Handling) --- | |
| # try: | |
| # --- Re-fetch data and filter questions (Same logic as before) --- | |
| question_map_path = hf_hub_download( | |
| repo_id=REPO_ID, | |
| filename=QUESTION_MAP_FILENAME, | |
| repo_type="dataset", # or omit if it's a Model/Space | |
| # force_download=True, # ← always fetch new copy | |
| revision="main" # branch/tag/commit, fetches the most recent version of the dataset each time this command is called | |
| ) | |
| with open(question_map_path, 'r') as f: | |
| question_map = json.load(f) | |
| evaluator_directory = question_map.get(evaluator_id, {}).get('evaluator_name', None) | |
| all_files = list_repo_files( | |
| repo_id=REPO_ID, | |
| repo_type="dataset", | |
| revision="main", | |
| ) | |
| full_question_ids_list, data_by_filename = get_evaluator_questions(evaluator_id, all_files, evaluator_directory, question_map) | |
| remaining_count = len(full_question_ids_list) | |
| # --- Part 3: Determine UI updates based on remaining count --- | |
| if remaining_count == 0: | |
| # Success with NO remaining questions | |
| return ( | |
| gr.update(visible=False), # page0 (Hide) | |
| gr.update(visible=False), # page2 (Hide) | |
| gr.update(visible=False), # confirm_modal | |
| gr.update(visible=False), | |
| "", | |
| gr.update(visible=True), # final_page (Show) | |
| "", | |
| None, | |
| None, | |
| None, | |
| None | |
| ) | |
| full_question_ids_list = sorted(full_question_ids_list, key=lambda x: str(x[0])+str(x[1])) | |
| #selected question is the first element | |
| q_id, other_model_name = full_question_ids_list[0] | |
| #Constructing question_for_eval, the question to evaluate this round | |
| txagent_matched_entry = next( | |
| (entry for entry in data_by_filename['txagent'] if entry.get("question_ID") == q_id), | |
| None | |
| ) | |
| other_model_matched_entry = next( | |
| (entry for entry in data_by_filename[other_model_name] if entry.get("question_ID") == q_id), | |
| None | |
| ) | |
| models_list = [ | |
| { | |
| "model": "txagent", | |
| "reasoning_trace": txagent_matched_entry.get("solution") | |
| }, | |
| { | |
| "model": other_model_name, | |
| "reasoning_trace": other_model_matched_entry.get("solution") | |
| } | |
| ] | |
| random.shuffle(models_list) | |
| question_for_eval = { | |
| "question": txagent_matched_entry.get("question"), | |
| "question_ID": q_id, | |
| "models": models_list, | |
| } | |
| chat_A_value = format_chat(question_for_eval['models'][0]['reasoning_trace'], tool_database_labels) | |
| chat_B_value = format_chat(question_for_eval['models'][1]['reasoning_trace'], tool_database_labels) | |
| prompt_text = question_for_eval['question'] | |
| # Construct the question-specific elements of the pairwise rating page (page 1) | |
| page1_prompt = gr.HTML(f'<div style="background-color: #FFEFD5; border: 2px solid #FF8C00; padding: 10px; border-radius: 5px; color: black;"><strong style="color: black;">Prompt:</strong> {prompt_text}</div>') | |
| chat_a = gr.Chatbot( | |
| value=chat_A_value, | |
| type="messages", | |
| height=400, | |
| label="Model A Response", | |
| show_copy_button=False, | |
| show_label=True, | |
| render_markdown=True, # Required for markdown/HTML support in messages | |
| avatar_images=None, # Optional: omit user/assistant icons | |
| rtl=False | |
| ) | |
| chat_b = gr.Chatbot( | |
| value=chat_B_value, | |
| type="messages", | |
| height=400, | |
| label="Model B Response", | |
| show_copy_button=False, | |
| show_label=True, | |
| render_markdown=True, # Required for markdown/HTML support in messages | |
| avatar_images=None, # Optional: omit user/assistant icons | |
| rtl=False | |
| ) | |
| # Success with remaining questions | |
| return ( | |
| gr.update(visible=False), # page0 (Hide) | |
| gr.update(visible=False), # page2 (Hide) | |
| gr.update(visible=False), # confirm_modal (Hide) | |
| gr.update(visible=True), # eval_progress_modal (Show) | |
| f"Submission successful! You have {remaining_count} question(s) remaining to evaluate. You may exit the page and return later if you wish.", # eval_progress_text | |
| gr.update(visible=False), # final_page (Hide) | |
| "", | |
| chat_a, | |
| chat_b, | |
| page1_prompt, | |
| question_for_eval) | |
| # except Exception as e: | |
| # error_message = f"Your submission was saved, but an error occurred while checking for remaining questions: {e}. Please try starting the process again by entering your details. If the problem persists, contact the administrator." | |
| # print(f"Error during recalculation in final_submit: {e}") # Keep logging for debugging | |
| # # *** MODIFIED RETURN ***: Error during recalculation | |
| # return ( | |
| # gr.update(visible=True), # page0 (Show) - Send user back to start | |
| # gr.update(visible=False), # page2 (Hide) | |
| # gr.update(visible=False), # confirm_modal (Hide) | |
| # gr.update(visible=False), # eval_progress_modal (Hide) | |
| # "", # eval_progress_text (Clear) | |
| # gr.update(visible=False), # final_page (Hide) | |
| # error_message # page0_error_box (Update with error) | |
| # ) | |
| def cancel_submission(): | |
| # Cancel final submission: just hide the confirmation modal. | |
| return gr.update(visible=False) | |
| def reset_everything_except_user_info(): | |
| # 3) Reset all pairwise radios & textboxes | |
| reset_pairwise_radios = [gr.update(value=None) for i in range(len(criteria))] | |
| reset_pairwise_reasoning_texts = [gr.update(value=None) for i in range(len(criteria))] | |
| # 4) Reset all rating radios | |
| reset_ratings_A = [gr.update(value=None) for i in range(len(criteria))] | |
| reset_ratings_B = [gr.update(value=None) for i in range(len(criteria))] | |
| return ( | |
| # states | |
| # gr.update(value=None), # user_info_state | |
| gr.update(value=None), # pairwise_state | |
| gr.update(value=None), # scores_A_state | |
| gr.update(value=None), # comparison_reasons | |
| gr.update(value=None), # unqualified_A_state | |
| # gr.update(value=None), # data_subset_state | |
| #page0 elements that need to be reset | |
| gr.update(value=""), #page0_error_box | |
| # page1 elements that need to be reset | |
| # gr.update(value=""), #page1_prompt | |
| # gr.update(value=[]), #chat_a | |
| # gr.update(value=[]), #chat_b | |
| gr.update(value=""), #page1_error_box | |
| # page2 elements that need to be reset | |
| gr.update(value=""), #page2_prompt | |
| gr.update(value=[]), #chat_a_rating | |
| gr.update(value=[]), #chat_b_rating | |
| gr.update(value=""), #result_text | |
| #lists of gradio elements that need to be unrolled | |
| *reset_pairwise_radios, | |
| *reset_pairwise_reasoning_texts, | |
| *reset_ratings_A, | |
| *reset_ratings_B | |
| ) | |
| # --- Define Transitions Between Pages --- | |
| # For the "Participate in Evaluation" button, transition to page0 | |
| participate_eval_btn.click( | |
| fn=go_to_page0_from_minus1, | |
| inputs=None, | |
| outputs=[page_minus1, page0] | |
| ) | |
| # Transition from Page 0 (Welcome) to Page 1. | |
| next_btn_0.click( | |
| fn=go_to_eval_progress_modal, | |
| inputs=[name, email, evaluator_id, specialty_dd, subspecialty_dd, years_exp_radio, exp_explanation_tb, npi_id], | |
| outputs=[page0, page1, user_info_state, page0_error_box, chat_a, chat_b, page1_prompt, data_subset_state,eval_progress_modal,eval_progress_text], | |
| scroll_to_output=True | |
| ) | |
| eval_progress_proceed_btn.click( | |
| fn=go_to_page1, | |
| inputs=None, | |
| outputs=[eval_progress_modal, page0, page1], | |
| scroll_to_output=True | |
| ) | |
| #Home page buttons to simply shown page-1 | |
| home_btn_0.click(lambda: (gr.update(visible=True), gr.update(visible=False)), None, [page_minus1, page0]) | |
| home_btn_1.click(lambda: (gr.update(visible=True), gr.update(visible=False)), None, [page_minus1, page1]) | |
| home_btn_2.click(lambda: (gr.update(visible=True), gr.update(visible=False)), None, [page_minus1, page2]) | |
| # Transition from Page 1 to Page 0 (Back button). | |
| back_btn_0.click( | |
| fn=lambda: (gr.update(visible=True), gr.update(visible=False)), | |
| inputs=None, | |
| outputs=[page0, page1] | |
| ) | |
| # Transition from Page 1 (Pairwise) to the combined Rating Page (Page 2). | |
| next_btn_1.click( | |
| fn=go_to_page2, # ### EDIT: Rename or update the function to simply pass the pairwise inputs if needed. | |
| inputs=[data_subset_state,*pairwise_inputs,*comparison_reasons_inputs], | |
| outputs=[page1, page2, pairwise_state, comparison_reasons, page1_error_box, chat_a_rating, chat_b_rating, page2_prompt, *pairwise_results_for_display], | |
| scroll_to_output=True | |
| ) | |
| # Transition from Rating Page (Page 2) back to Pairwise page. | |
| back_btn_2.click( | |
| fn=lambda: (gr.update(visible=True), gr.update(visible=False)), | |
| inputs=None, | |
| outputs=[page1, page2], | |
| scroll_to_output=True | |
| ) | |
| # --- Submission: Validate the Ratings and then Process the Result --- | |
| def process_result(result): | |
| # If validation passed, show the confirmation modal and proceed. | |
| if result == "No errors in responses; feel free to submit!": | |
| return ( | |
| gr.update(), # Show page 3 | |
| gr.update(), # Hide final page | |
| gr.update(visible=True), # Show confirmation modal | |
| gr.update(visible=False), # Hide error modal | |
| gr.update(value="") # EDIT: Clear the error_message_box | |
| ) | |
| else: | |
| # If validation fails, show the error modal and display the error in error_message_box. | |
| return ( | |
| gr.update(), # Keep page3 as is | |
| gr.update(), # Keep final page unchanged | |
| gr.update(visible=False), # Hide confirmation modal | |
| gr.update(visible=True), # Show error modal | |
| gr.update(value=result) # EDIT: Update error_message_box with the validation error | |
| ) | |
| # ### EDIT: Update the submission callback to use the new radio inputs. | |
| submit_btn.click( | |
| fn=validate_ratings, | |
| inputs=[pairwise_state, *ratings_A, *ratings_B], | |
| outputs=[error_message_box, result_text] | |
| ).then( | |
| fn=process_result, | |
| inputs=error_message_box, | |
| outputs=[page2, final_page, confirm_modal, error_modal, error_message_box], | |
| scroll_to_output=True | |
| ) | |
| # Finalize submission if user confirms. | |
| # yes_btn.click( | |
| # fn=final_submit, | |
| # inputs=[data_subset_state, user_info_state, pairwise_state, comparison_reasons, *ratings_A, *ratings_B], | |
| # outputs=[page2, final_page, confirm_modal] | |
| # ) | |
| question_submission_event = yes_btn.click( | |
| fn=final_submit, | |
| inputs=[data_subset_state, user_info_state, pairwise_state, comparison_reasons, *ratings_A, *ratings_B], | |
| outputs=[ | |
| page0, # Controlled by final_submit return value 1 | |
| page2, # Controlled by final_submit return value 2 | |
| confirm_modal, # Controlled by final_submit return value 3 | |
| eval_progress_modal, # Controlled by final_submit return value 4 | |
| eval_progress_text, # Controlled by final_submit return value 5 | |
| final_page, # Controlled by final_submit return value 6 | |
| page0_error_box, | |
| chat_a, | |
| chat_b, | |
| page1_prompt, | |
| data_subset_state | |
| ], | |
| scroll_to_output=True | |
| ) | |
| # Cancel final submission. | |
| cancel_btn.click( | |
| fn=cancel_submission, | |
| inputs=None, | |
| outputs=confirm_modal | |
| ) | |
| # Reset everything and evaluate another question button | |
| question_submission_event.then( | |
| fn=reset_everything_except_user_info, | |
| inputs=[], | |
| outputs=[ | |
| # states | |
| # user_info_state, | |
| pairwise_state, | |
| scores_A_state, | |
| comparison_reasons, | |
| unqualified_A_state, | |
| # data_subset_state, | |
| #page0 elements that need to be reset | |
| page0_error_box, | |
| # # page1 elements that need to be reset | |
| # page1_prompt, | |
| # chat_a, | |
| # chat_b, | |
| page1_error_box, | |
| # page2 elements that need to be reset | |
| page2_prompt, | |
| chat_a_rating, | |
| chat_b_rating, | |
| result_text, | |
| #lists of gradio elements that need to be unrolled | |
| *pairwise_inputs, | |
| *comparison_reasons_inputs, | |
| *ratings_A, | |
| *ratings_B | |
| ] | |
| ) | |
| demo.launch(share=True, allowed_paths = ["."]) | |