File size: 24,476 Bytes
fab07ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "f56cc5ad",
   "metadata": {},
   "source": [
    "# NDIS Project - OpenAI - PBSP Scoring - Page 5 - Plan Implementation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a8d844ea",
   "metadata": {
    "hide_input": false
   },
   "outputs": [],
   "source": [
    "import openai\n",
    "import re\n",
    "from ipywidgets import interact\n",
    "import ipywidgets as widgets\n",
    "from IPython.display import display, clear_output, Javascript, HTML, Markdown\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.ticker as mtick\n",
    "import json\n",
    "import spacy\n",
    "from spacy import displacy\n",
    "from dotenv import load_dotenv\n",
    "import pandas as pd\n",
    "import argilla as rg\n",
    "from argilla.metrics.text_classification import f1\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "%matplotlib inline\n",
    "pd.set_option('display.max_rows', 500)\n",
    "pd.set_option('display.max_colwidth', 10000)\n",
    "pd.set_option('display.width', 10000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "96b83a1d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#initializations\n",
    "openai.api_key = os.environ['API_KEY']\n",
    "openai.api_base = os.environ['API_BASE']\n",
    "openai.api_type = os.environ['API_TYPE']\n",
    "openai.api_version = os.environ['API_VERSION']\n",
    "deployment_name = os.environ['DEPLOYMENT_ID']\n",
    "\n",
    "#argilla\n",
    "rg.init(\n",
    "    api_url=os.environ[\"ARGILLA_API_URL\"],\n",
    "    api_key=os.environ[\"ARGILLA_API_KEY\"]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dee25d82",
   "metadata": {},
   "outputs": [],
   "source": [
    "#sentence extraction\n",
    "def extract_sentences(paragraph):\n",
    "    symbols = ['\\\\.', '!', '\\\\?', ';', ':', ',', '\\\\_', '\\n', '\\\\-']\n",
    "    pattern = '|'.join([f'{symbol}' for symbol in symbols])\n",
    "    sentences = re.split(pattern, paragraph)\n",
    "    sentences = [sentence.strip() for sentence in sentences if sentence.strip()]\n",
    "    return sentences"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "02fda761",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_response(response, query):\n",
    "    sentences = []\n",
    "    topics = []\n",
    "    scores = []\n",
    "    lines = response.strip().split(\"\\n\")\n",
    "    topic = None\n",
    "    for line in lines:\n",
    "        if \"Implementors & Roles:\" in line:\n",
    "            topic = \"IMPLEMENTORS & ROLES\"\n",
    "        elif \"Training Strategies:\" in line:\n",
    "            topic = \"TRAINING STRATEGIES\"\n",
    "        elif \"Implementation Support:\" in line:\n",
    "            topic = \"IMPLEMENTATION SUPPORT\"\n",
    "        elif \"Communication Strategies:\" in line:\n",
    "            topic = \"COMMUNICATION STRATEGIES\"\n",
    "        elif \"Treatment Fidelity:\" in line:\n",
    "            topic = \"TREATMENT FIDELITY\"\n",
    "        elif \"None:\" in line:\n",
    "            topic = \"NONE\"\n",
    "        else:\n",
    "            try:\n",
    "                parts = line.split(\"(Confidence Score:\")\n",
    "                if len(parts) == 2:\n",
    "                    phrase = parts[0].strip()\n",
    "                    score = float(parts[1].strip().replace(\")\", \"\"))\n",
    "                    sentences.append(phrase)\n",
    "                    topics.append(topic)\n",
    "                    scores.append(score)\n",
    "            except:\n",
    "                pass\n",
    "    result_df = pd.DataFrame({'Phrase': sentences, 'Topic': topics, 'Score': scores})\n",
    "    try:\n",
    "        result_df['Phrase'] = result_df['Phrase'].str.replace('\\d+\\.', '', regex=True)\n",
    "        result_df['Phrase'] = result_df['Phrase'].str.replace('^\\s', '', regex=True)\n",
    "        result_df['Phrase'] = result_df['Phrase'].str.strip('\"')\n",
    "    except:\n",
    "        sentences = extract_sentences(query)\n",
    "        topics = ['NONE'] * len(sentences)\n",
    "        scores = [0.9] * len(sentences)\n",
    "        result_df = pd.DataFrame({'Phrase': sentences, 'Topic': topics, 'Score': scores})\n",
    "    return result_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "714fafb4",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_prompt(query):\n",
    "    prompt = f\"\"\"\n",
    "    The paragraph below is written by a disability practitioner to outline the implementation of his positive behaviour support plan, where he is expected to describe the approaches and methodologies used to implement the plan. \n",
    "\n",
    "    Paragraph:\n",
    "    {query}\n",
    "\n",
    "    Requirement:\n",
    "    - Identify the phrases from the paragraph above that represent each of the following plan implementation topics: \"Implementors & Roles\", \"Training Strategies\", \"Implementation Support\", \"Communication Strategies\", \"Treatment Fidelity\".\n",
    "\n",
    "    Guidelines:\n",
    "    - \"Implementors & Roles\": refers to the individuals who will implement the plan and their specific roles. Example keywords to look for: \"implement\", \"roles\", \"responsibility\", \"support staff\", \"parents\", and \"caregivers\".\n",
    "    - \"Training Strategies\": refers to proposed strategies to train relevant plan implementers and who will deliver the training. Example keywords to look for: \"workshops\", \"training sessions\", \"online resources\", and \"coaching\".\n",
    "    - \"Implementation Support\": refers to whether the proposed strategies to support the implementation of the plan in relevant settings. Example keywords to look for: \"mentoring\", \"support\", \"assistance\", and \"ongoing\".\n",
    "    - \"Communication Strategies\": refers to proposed strategies for plan implementers to communicate relevant information about the plan and its implementation with one another. Example keywords to look for: \"meetings\", \"reports\", \"progress\", and \"sharing\".\n",
    "    - \"Treatment Fidelity\": refers to the proposed strategies to ensure the fidelity of the plan implementation and set a criterion level of achievement. Example keywords to look for: \"monitoring\", \"assessment\", \"outcomes\", and \"feedback\".\n",
    "\n",
    "    Specifications of a correct answer:\n",
    "    - Please provide a response that closely matches the information in the paragraph and does not deviate significantly from it.\n",
    "    - Provide your answer in numbered lists. \n",
    "    - All the phrases in your answer must be exact substrings in the original paragraph. without changing any characters.\n",
    "    - All the upper case and lower case characters in the phrases in your answer must match the upper case and lower case characters in the original paragraph.\n",
    "    - Start numbering the phrases under each implementation topic from number 1. \n",
    "    - Start each list of phrases with these titles: \"Implementors & Roles\", \"Training Strategies\", \"Implementation Support\", \"Communication Strategies\", \"Treatment Fidelity\".\n",
    "    - For each phrase that belongs to any of the above implementation topics, provide a confidence score that ranges between 0.50 and 1.00, where a score of 0.50 means you are very weakly confident that the phrase belongs to that specific implementation topic, whereas a score of 1.00 means you are very strongly confident that the phrase belongs to that specific implementation topic.\n",
    "    - Never include any phrase in your answer that does not exist in the paragraph above.\n",
    "    - If there are not any phrases that belong to one or more of the implementation topics, then do not include these strategies in your answer. \n",
    "    - Include a final numbered list titled \"None:\", which include all the remaining phrases from the paragraph above that do not belong to any of the implementation topics above. Provide a confidence score for each of these phrases as well.\n",
    "\n",
    "    Example answer:\n",
    "\n",
    "    Implementors & Roles:\n",
    "    1. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    2. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "\n",
    "    Training Strategies:\n",
    "    1. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    2. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "\n",
    "    Implementation Support:\n",
    "    1. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    2. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "\n",
    "    Communication Strategies:\n",
    "    1. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    2. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "\n",
    "    Treatment Fidelity:\n",
    "    1. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    2. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    \n",
    "    None:\n",
    "    1. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    2. <your phrase goes here>. (Confidence Score: <your score goes here>)\n",
    "    \"\"\"\n",
    "    return prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e23821b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_response_chatgpt(prompt):\n",
    "    response=openai.ChatCompletion.create(   \n",
    "        engine=deployment_name,   \n",
    "        messages=[         \n",
    "        {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},                  \n",
    "        {\"role\": \"user\", \"content\": prompt}     \n",
    "        ],\n",
    "        temperature=0\n",
    "    )\n",
    "    reply = response[\"choices\"][0][\"message\"][\"content\"]\n",
    "    return reply"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "983765bc",
   "metadata": {},
   "outputs": [],
   "source": [
    "def convert_df(result_df):\n",
    "    new_df = pd.DataFrame(columns=['text', 'prediction'])\n",
    "    new_df['text'] = result_df['Phrase']\n",
    "    new_df['prediction'] = result_df.apply(lambda row: [[row['Topic'], row['Score']]], axis=1)\n",
    "    return new_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc69cc81",
   "metadata": {},
   "outputs": [],
   "source": [
    "#query = \"\"\"\n",
    "#As Eddie's caregiver, I understand the importance of developing a comprehensive positive behavior support plan that is tailored to his specific needs. In terms of implementation, I plan to involve myself, Eddie's parents, and his support staff as the primary implementers of the plan. To ensure that everyone is well-equipped to implement the plan, I plan to organize a series of training sessions that will be delivered by experienced behavior support professionals. These sessions will cover a range of topics, including identifying triggers for challenging behaviors, responding to these behaviors in a positive and effective manner, and tracking progress over time. Additionally, I plan to provide ongoing support and assistance to all implementers, particularly during the initial stages of implementation. To promote effective communication and collaboration, I plan to organize regular meetings where all implementers can share information about Eddie's progress, any challenges they have encountered, and strategies that have proven successful. Finally, to ensure treatment fidelity, I plan to monitor progress closely using a range of assessment tools, including behavior tracking forms and outcome measures.\n",
    "#\"\"\"\n",
    "#prompt = get_prompt(query)\n",
    "#response = get_response_chatgpt(prompt)\n",
    "#result_df = process_response(response, query)\n",
    "#result_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "905eaf2a",
   "metadata": {},
   "outputs": [],
   "source": [
    "topic_color_dict = {\n",
    "        'IMPLEMENTORS & ROLES': '#FFCCCC',\n",
    "        'TRAINING STRATEGIES': '#CCFFFF',\n",
    "        'IMPLEMENTATION SUPPORT': '#FF69B4',\n",
    "        'COMMUNICATION STRATEGIES': '#FFFF00',\n",
    "        'TREATMENT FIDELITY': '#CCCCFF',\n",
    "        'NONE': '#ECECEC'\n",
    "    }\n",
    "\n",
    "def color(df, color):\n",
    "    return df.style.format({'Score': '{:,.2%}'.format}).bar(subset=['Score'], color=color)\n",
    "\n",
    "def annotate_query(highlights, query, topics):\n",
    "    ents = []\n",
    "    for h, t in zip(highlights, topics):\n",
    "        pattern = re.escape(h)\n",
    "        pattern = re.sub(r'\\\\(.)', r'[\\1\\\\W]*', pattern) # optional non-alphanumeric characters\n",
    "        for match in re.finditer(pattern, query, re.IGNORECASE):\n",
    "            ent_dict = {\"start\": match.start(), \"end\": match.end(), \"label\": t}\n",
    "            ents.append(ent_dict)\n",
    "    return ents\n",
    "\n",
    "def path_to_image_html(path):\n",
    "    return '<img src=\"'+ path + '\" width=\"30\" height=\"15\" />'\n",
    "\n",
    "passing_score = 0.7\n",
    "final_passing = 0.0\n",
    "def display_final_df(agg_df):\n",
    "    tags = []\n",
    "    crits = [\n",
    "            'IMPLEMENTORS & ROLES',\n",
    "            'TRAINING STRATEGIES',\n",
    "            'IMPLEMENTATION SUPPORT',\n",
    "            'COMMUNICATION STRATEGIES',\n",
    "            'TREATMENT FIDELITY'\n",
    "        ]\n",
    "    orig_crits = crits\n",
    "    crits = [x for x in crits if x in agg_df.index.tolist()]\n",
    "    bools = [agg_df.loc[crit, 'Final_Score'] > final_passing for crit in crits]\n",
    "    paths = ['./thumbs_up.png' if x else './thumbs_down.png' for x in bools]\n",
    "    df = pd.DataFrame({'Plan Implementation Topic': crits, 'USED': paths})\n",
    "    rem_crits = [x for x in orig_crits if x not in crits]\n",
    "    if len(rem_crits) > 0:\n",
    "        df2 = pd.DataFrame({'Plan Implementation Topic': rem_crits, 'USED': ['./thumbs_down.png'] * len(rem_crits)})\n",
    "        df = pd.concat([df, df2])\n",
    "    df = df.set_index('Plan Implementation Topic')\n",
    "    pd.set_option('display.max_colwidth', None)\n",
    "    display(HTML('<div style=\"text-align: center;\">' + df.to_html(classes=[\"align-center\"], index=True, escape=False ,formatters=dict(USED=path_to_image_html)) + '</div>'))\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2c6e9fe7",
   "metadata": {},
   "source": [
    "### Please outline your implementation plan, including training, specific strategies, treatment fidelity and communication with relevant people."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "76dd8cab",
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "#demo with Voila\n",
    "\n",
    "bhvr_label = widgets.Label(value='Please type your answer:')\n",
    "bhvr_text_input = widgets.Textarea(\n",
    "    value='',\n",
    "    placeholder='Type your answer',\n",
    "    description='',\n",
    "    disabled=False,\n",
    "    layout={'height': '300px', 'width': '90%'}\n",
    ")\n",
    "\n",
    "bhvr_nlp_btn = widgets.Button(\n",
    "    description='Score Answer',\n",
    "    disabled=False,\n",
    "    button_style='success', # 'success', 'info', 'warning', 'danger' or ''\n",
    "    tooltip='Score Answer',\n",
    "    icon='check',\n",
    "    layout={'height': '70px', 'width': '250px'}\n",
    ")\n",
    "bhvr_agr_btn = widgets.Button(\n",
    "    description='Validate Data',\n",
    "    disabled=False,\n",
    "    button_style='success', # 'success', 'info', 'warning', 'danger' or ''\n",
    "    tooltip='Validate Data',\n",
    "    icon='check',\n",
    "    layout={'height': '70px', 'width': '250px'}\n",
    ")\n",
    "bhvr_eval_btn = widgets.Button(\n",
    "    description='Evaluate Model',\n",
    "    disabled=False,\n",
    "    button_style='success', # 'success', 'info', 'warning', 'danger' or ''\n",
    "    tooltip='Evaluate Model',\n",
    "    icon='check',\n",
    "    layout={'height': '70px', 'width': '250px'}\n",
    ")\n",
    "btn_box = widgets.HBox([bhvr_nlp_btn, bhvr_agr_btn, bhvr_eval_btn], \n",
    "                       layout={'width': '100%', 'height': '160%'})\n",
    "bhvr_outt = widgets.Output()\n",
    "bhvr_outt.layout.height = '100%'\n",
    "bhvr_outt.layout.width = '100%'\n",
    "bhvr_box = widgets.VBox([bhvr_text_input, btn_box, bhvr_outt], \n",
    "                   layout={'width': '100%', 'height': '160%'})\n",
    "dataset_rg_name = 'pbsp-page5-plan-implementation-argilla-ds'\n",
    "agrilla_df = None\n",
    "annotated = False\n",
    "def on_bhvr_button_next(b):\n",
    "    global agrilla_df\n",
    "    with bhvr_outt:\n",
    "        clear_output()\n",
    "        query = bhvr_text_input.value\n",
    "        prompt = get_prompt(query)\n",
    "        response = get_response_chatgpt(prompt)\n",
    "        result_df = process_response(response, query)\n",
    "        sub_result_df = result_df[(result_df['Score'] >= passing_score) & (result_df['Topic'] != 'NONE')]\n",
    "        sub_2_result_df = result_df[result_df['Topic'] == 'NONE']\n",
    "        highlights = []\n",
    "        if len(sub_result_df) > 0:\n",
    "            highlights = sub_result_df['Phrase'].tolist()\n",
    "            highlight_topics = sub_result_df['Topic'].tolist()    \n",
    "            ents = annotate_query(highlights, query, highlight_topics)\n",
    "            colors = {}\n",
    "            for ent, ht in zip(ents, highlight_topics):\n",
    "                colors[ent['label']] = topic_color_dict[ht]\n",
    "\n",
    "            ex = [{\"text\": query,\n",
    "                   \"ents\": ents,\n",
    "                   \"title\": None}]\n",
    "            title = \"Plan Implementation Topic Highlights\"\n",
    "            display(HTML(f'<center><h1>{title}</h1></center>'))\n",
    "            html = displacy.render(ex, style=\"ent\", manual=True, jupyter=True, options={'colors': colors})\n",
    "            display(HTML(html))\n",
    "            title = \"Plan Implementation Topic Classifications\"\n",
    "            display(HTML(f'<center><h1>{title}</h1></center>'))\n",
    "            for top in topic_color_dict.keys():\n",
    "                top_result_df = sub_result_df[sub_result_df['Topic'] == top]\n",
    "                if len(top_result_df) > 0:\n",
    "                    top_result_df = top_result_df.sort_values(by='Score', ascending=False).reset_index(drop=True)\n",
    "                    top_result_df = top_result_df.set_index('Phrase')\n",
    "                    top_result_df = top_result_df[['Score']]\n",
    "                    display(HTML(\n",
    "                        f'<left><h2 style=\"text-decoration: underline; text-decoration-color:{topic_color_dict[top]};\">{top}</h2></left>'))\n",
    "                    display(color(top_result_df, topic_color_dict[top]))\n",
    "\n",
    "            agg_df = sub_result_df.groupby('Topic')['Score'].sum()\n",
    "            agg_df = agg_df.to_frame()\n",
    "            agg_df.index.name = 'Topic'\n",
    "            agg_df.columns = ['Total Score']\n",
    "            agg_df = agg_df.assign(\n",
    "                Final_Score=lambda x: x['Total Score'] / x['Total Score'].sum() * 100.00\n",
    "            )\n",
    "            agg_df = agg_df.sort_values(by='Final_Score', ascending=False)\n",
    "            title = \"Plan Implementation Topic Coverage\"\n",
    "            display(HTML(f'<center><h1>{title}</h1></center>'))\n",
    "            agg_df['Topic'] = agg_df.index\n",
    "            rem_topics= [x for x in list(topic_color_dict.keys()) if not x in agg_df.Topic.tolist()]\n",
    "            if len(rem_topics) > 0:\n",
    "                rem_agg_df = pd.DataFrame({'Topic': rem_topics, 'Final_Score': 0.0, 'Total Score': 0.0})\n",
    "                agg_df = pd.concat([agg_df, rem_agg_df])\n",
    "            labels = agg_df['Final_Score'].round(1).astype('str') + '%'\n",
    "            ax = agg_df.plot.bar(x='Topic', y='Final_Score', rot=0, figsize=(20, 5), align='center')\n",
    "            for container in ax.containers:\n",
    "                ax.bar_label(container, labels=labels)\n",
    "                ax.yaxis.set_major_formatter(mtick.PercentFormatter())\n",
    "                ax.legend([\"Final Score (%)\"])\n",
    "                ax.set_xlabel('')\n",
    "            plt.show()\n",
    "            title = \"Final Scores\"\n",
    "            display(HTML(f'<left><h1>{title}</h1></left>'))\n",
    "            display_final_df(agg_df)\n",
    "            if len(sub_2_result_df) > 0:\n",
    "                sub_result_df = pd.concat([sub_result_df, sub_2_result_df]).reset_index(drop=True)\n",
    "            agrilla_df = sub_result_df.copy()\n",
    "        else:\n",
    "            print(query)\n",
    "            \n",
    "def on_agr_button_next(b):\n",
    "    global agrilla_df, annotated\n",
    "    with bhvr_outt:\n",
    "        clear_output()\n",
    "        if agrilla_df is not None:\n",
    "            # convert the dataframe to the structure accepted by argilla\n",
    "            converted_df = convert_df(agrilla_df)\n",
    "            # convert pandas dataframe to DatasetForTextClassification\n",
    "            dataset_rg = rg.DatasetForTextClassification.from_pandas(converted_df)\n",
    "            # delete the old DatasetForTextClassification from the Argilla web app if exists\n",
    "            rg.delete(dataset_rg_name, workspace=\"admin\")\n",
    "            # load the new DatasetForTextClassification into the Argilla web app\n",
    "            rg.log(dataset_rg, name=dataset_rg_name, workspace=\"admin\")\n",
    "            # Make sure all classes are present for annotation\n",
    "            rg_settings = rg.TextClassificationSettings(label_schema=list(topic_color_dict.keys()))\n",
    "            rg.configure_dataset(name=dataset_rg_name, workspace=\"admin\", settings=rg_settings)\n",
    "            annotated = True\n",
    "        else:\n",
    "            display(Markdown(\"<h2 style='color:red; text-align:center;'>Please score the answer first!</h2>\"))\n",
    "            \n",
    "def on_eval_button_next(b):\n",
    "    global annotated\n",
    "    with bhvr_outt:\n",
    "        clear_output()\n",
    "        if annotated:\n",
    "            display(f1(dataset_rg_name).visualize())\n",
    "        else:\n",
    "            display(Markdown(\"<h2 style='color:red; text-align:center;'>Please score the answer and validate the data first!</h2>\"))\n",
    "\n",
    "bhvr_nlp_btn.on_click(on_bhvr_button_next)\n",
    "bhvr_agr_btn.on_click(on_agr_button_next)\n",
    "bhvr_eval_btn.on_click(on_eval_button_next)\n",
    "\n",
    "display(bhvr_label, bhvr_box)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed551eba",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "hide_input": false,
  "kernelspec": {
   "display_name": "Python 3.9 (Argilla)",
   "language": "python",
   "name": "argilla"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": false,
   "sideBar": true,
   "skip_h1_title": true,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "258.097px"
   },
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}