import json import os import gradio as gr from gradio_modal import Modal from logger import logger from model import get_guardian_response, get_prompt from utils import ( get_messages, get_result_description, load_command_line_args, to_snake_case, to_title_case, ) load_command_line_args() catalog = {} toy_json = '{"name": "John"}' with open("catalog.json") as f: logger.debug("Loading catalog from json.") catalog = json.load(f) def update_selected_test_case(button_name, state: gr.State, event: gr.EventData): target_sub_catalog_name, target_test_case_name = event.target.elem_id.split("---") state["selected_sub_catalog"] = target_sub_catalog_name state["selected_criteria_name"] = target_test_case_name state["selected_test_case"] = next( t for sub_catalog_name, sub_catalog in catalog.items() for t in sub_catalog if t["name"] == to_snake_case(button_name) and to_snake_case(sub_catalog_name) == target_sub_catalog_name ) return state def on_test_case_click(state: gr.State): selected_sub_catalog = state["selected_sub_catalog"] selected_criteria_name = state["selected_criteria_name"] selected_test_case = state["selected_test_case"] logger.debug(f'Changing to test case "{selected_criteria_name}" from catalog "{selected_sub_catalog}".') is_context_editable = selected_criteria_name == "context_relevance" is_user_message_editable = selected_sub_catalog == "harmful_content_in_user_prompt" is_assistant_message_editable = ( selected_sub_catalog == "harmful_content_in_assistant_response" or selected_criteria_name == "groundedness" or selected_criteria_name == "answer_relevance" ) is_tools_present = "tools" in selected_test_case and selected_test_case["tools"] is not None test_case_name = f'
{get_result_description(selected_sub_catalog,selected_criteria_name)}
", ) return ( test_case_name, criteria, context, user_message, assistant_message_text, assistant_message_json, tools, result_text, result_explanation ) def change_button_color(event: gr.EventData): return [ ( gr.update(elem_classes=["catalog-button", "selected"]) if v.elem_id == event.target.elem_id else gr.update(elem_classes=["catalog-button"]) ) for c in catalog_buttons.values() for v in c.values() ] def on_submit(criteria, context, user_message, assistant_message_text, assistant_message_json, tools, state): criteria_name = state["selected_criteria_name"] if criteria_name == "function_calling_hallucination": assistant_message = assistant_message_json else: assistant_message = assistant_message_text test_case = { "name": criteria_name, "criteria": criteria, "context": context, "user_message": user_message, "assistant_message": assistant_message, "tools": tools, } messages = get_messages(test_case=test_case, sub_catalog_name=state["selected_sub_catalog"]) logger.debug( f"Starting evaluation for subcatelog {state['selected_sub_catalog']} and criteria name {state['selected_criteria_name']}" ) result = get_guardian_response(messages=messages, criteria_name=criteria_name) result_label = result["assessment"] # Yes or No result_confidence_score = round(result["certainty"], 3) html_str = f"{result_label} (Confidence Score: {result_confidence_score})
" # html_str = f"{get_result_description(state['selected_sub_catalog'], state['selected_criteria_name'])} {result_label}" return gr.update(value=html_str) def on_show_prompt_click(criteria, context, user_message, assistant_message_text, assistant_message_json, tools, state): criteria_name = state["selected_criteria_name"] if criteria_name == "function_calling_hallucination": assistant_message = assistant_message_json else: assistant_message = assistant_message_text test_case = { "name": criteria_name, "criteria": criteria, "context": context, "user_message": user_message, "assistant_message": assistant_message, "tools": tools, } messages = get_messages(test_case=test_case, sub_catalog_name=state["selected_sub_catalog"]) prompt = get_prompt(messages, criteria_name) prompt = prompt.replace("<", "<").replace(">", ">").replace("\\n", "Granite Guardian models are specialized language models in the Granite family that can detect harms and risks in generative AI systems. They can be used with any large language model to make interactions with generative AI systems safe. Select an example in the left panel to see how the Granite Guardian model evaluates harms and risks in user prompts, assistant responses, and for hallucinations in retrival-augmented generation and function calling. In this demo, we use granite-guardian-3.1-8b.
", ) with gr.Row(elem_classes="column-gap"): with gr.Column(scale=0, elem_classes="no-gap"): title_display_left = gr.HTML("{get_result_description(state.value['selected_sub_catalog'],state.value['selected_criteria_name'])}
", elem_classes="result-meaning" ) submit_button = gr.Button( "Evaluate", variant="primary", icon=os.path.join(os.path.dirname(os.path.abspath(__file__)), "send-white.png"), elem_classes="submit-button", ) # result_text = gr.HTML(label='Result', elem_classes=['result-text', 'read-only', 'input-box'], visible=False, value='') result_text = gr.HTML( label="Result", elem_classes=["result-root"], show_label=True, visible=False, value="" ) with Modal(visible=False, elem_classes="modal") as modal: prompt = gr.Markdown("") # events show_propt_button.click( on_show_prompt_click, inputs=[criteria, context, user_message, assistant_message_text, assistant_message_json, tools, state], outputs=prompt, ).then(lambda: gr.update(visible=True), None, modal) submit_button.click(lambda: gr.update(visible=True, value=""), None, result_text).then( on_submit, inputs=[criteria, context, user_message, assistant_message_text, assistant_message_json, tools, state], outputs=[result_text], scroll_to_output=True, ) for button in [ t for sub_catalog_name, sub_catalog_buttons in catalog_buttons.items() for t in sub_catalog_buttons.values() ]: button.click( change_button_color, inputs=None, outputs=[v for c in catalog_buttons.values() for v in c.values()] ).then(update_selected_test_case, inputs=[button, state], outputs=[state]).then( on_test_case_click, inputs=state, outputs=[ test_case_name, criteria, context, user_message, assistant_message_text, assistant_message_json, tools, result_text, result_description ], ) demo.launch(server_name="0.0.0.0")