File size: 19,747 Bytes
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d3b0ac9
2ac37a5
d3b0ac9
2ac37a5
 
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ac37a5
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ac37a5
 
 
 
 
 
 
 
 
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ac37a5
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ac37a5
 
 
 
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ac37a5
3724ac8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ac37a5
 
 
 
 
 
3724ac8
 
 
 
 
 
 
 
115a0ba
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
import os
import pandas as pd
from datetime import datetime, date
import gradio as gr
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import networkx as nx
import ast

# # Environment settings
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# os.environ["HF_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
# os.environ["HUGGINGFACE_HUB_CACHE"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"
# os.environ["HF_HOME"] = "/eos/jeodpp/home/users/consose/cache/huggingface/hub"

# Load the CSV file
#df = pd.read_csv("emdat2.csv", sep=',', header=0, dtype=str, encoding='utf-8')
df = pd.read_csv("https://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/ETOHA/storylines/emdat2.csv", sep=',', header=0, dtype=str, encoding='utf-8')
#df = pd.read_csv("/eos/jeodpp/home/users/roncmic/data/crisesStorylinesRAG/procem_graph.csv", sep=',', header=0, dtype=str, encoding='utf-8')
#df = pd.read_csv("https://jeodpp.jrc.ec.europa.eu/ftp/jrc-opendata/ETOHA/storylines/procem_graph.csv", sep=',', header=0, dtype=str, encoding='utf-8')
#df = df.drop_duplicates(subset='DisNo.', keep='first')  #I drop all duplicates for column "DisNo.", keeping the first occurrence


# grp=eval(df.iloc[0]["causal graph"])
#
# source, relations, target = list(zip(*grp))
# kg_df = pd.DataFrame({'source':source, 'target':target, 'edge':relations})
#
# print("ciao")


def try_parse_date(y, m, d):
    try:
        if not y or not m or not d:
            return None
        return date(int(float(y)), int(float(m)), int(float(d)))
    except (ValueError, TypeError):
        return None

def plot_cgraph(grp):
    if not grp:
        return None
    source, relations, target = list(zip(*grp))
    kg_df = pd.DataFrame({'source': source, 'target': target, 'edge': relations})
    G = nx.from_pandas_edgelist(kg_df, "source", "target", edge_attr='edge', create_using=nx.MultiDiGraph())
    edge_colors_dict = {"causes": "red", "prevents": "green"}
    edge_color_list = [edge_colors_dict.get(G[u][v][key]['edge'], 'black') for u, v, key in G.edges(keys=True)]

    plt.figure(figsize=(12, 12))
    pos = nx.spring_layout(G, k=1.5, iterations=100)
    nx.draw_networkx_nodes(G, pos, node_color='skyblue', node_size=800, alpha=0.8)
    nx.draw_networkx_edges(G, pos, edge_color=edge_color_list, arrows=True, width=2)
    nx.draw_networkx_labels(G, pos)
    legend_elements = [Line2D([0], [0], color=color, label=edge_type, lw=2) for edge_type, color in
                       edge_colors_dict.items()]
    plt.legend(handles=legend_elements, loc='best')
    plt.axis('off')
    plt.tight_layout()
    return plt.gcf()

def display_info(selected_row_str, country, year, month, day, graph_type):
    additional_fields = [
        "Country", "ISO", "Subregion", "Region", "Location", "Origin",
        "Disaster Group", "Disaster Subgroup", "Disaster Type", "Disaster Subtype", "External IDs",
        "Event Name", "Associated Types", "OFDA/BHA Response", "Appeal", "Declaration",
        "AID Contribution ('000 US$)", "Magnitude", "Magnitude Scale", "Latitude",
        "Longitude", "River Basin", "Total Deaths", "No. Injured",
        "No. Affected", "No. Homeless", "Total Affected",
        "Reconstruction Costs ('000 US$)", "Reconstruction Costs, Adjusted ('000 US$)",
        "Insured Damage ('000 US$)", "Insured Damage, Adjusted ('000 US$)",
        "Total Damage ('000 US$)", "Total Damage, Adjusted ('000 US$)", "CPI",
        "Admin Units",
        #"Entry Date", "Last Update"
    ]

    if selected_row_str is None or selected_row_str == '':
        print("No row selected.")
        return ('', '', '', '', '', '', '', None, '', '') + tuple([''] * len(additional_fields))

    print(f"Selected Country: {country}, Selected Row: {selected_row_str}, Date: {year}-{month}-{day}")

    filtered_df = df
    if country:
        filtered_df = filtered_df[filtered_df['Country'] == country]

    selected_date = try_parse_date(year, month, day)

    if selected_date:
        filtered_df = filtered_df[filtered_df.apply(
            lambda row: (
                    (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) is not None) and
                    (try_parse_date(row['End Year'], "01" if row['End Month'] == "" else row['End Month'], "01" if row['End Day'] == "" else row['End Day']) is not None) and
                    (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) <= selected_date <=
                     try_parse_date(row['End Year'], "01" if row['End Month'] == "" else row['End Month'], "01" if row['End Day'] == "" else row['End Day']))
            ), axis=1)]
    else:
        if year:
            sstart = None
            eend = None
            if month:
                try:
                    sstart = try_parse_date(year, month, "01")
                    eend = try_parse_date(year, int(float(month)) + 1, "01")
                except Exception as err:
                    print("Invalid selected date.")
                    sstart = None
                    eend = None

                if sstart and eend:
                    filtered_df = filtered_df[filtered_df.apply(
                        lambda row: (
                                (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) is not None) and
                                (sstart <= try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) < eend)
                        ), axis=1)]
            else:
                try:
                    sstart = try_parse_date(year, "01", "01")
                    eend = try_parse_date(year, "12", "31")
                except Exception as err:
                    print("Invalid selected date.")
                    sstart = None
                    eend = None

                if sstart and eend:
                    filtered_df = filtered_df[filtered_df.apply(
                        lambda row: (
                                (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) is not None) and
                                (sstart <= try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) <= eend)
                        ), axis=1)]

        else:
            print("Invalid selected date.")

    # Use the "DisNo." column for selecting the row
    row_data = filtered_df[filtered_df['DisNo.'] == selected_row_str].squeeze()

    if not row_data.empty:
        print(f"Row data: {row_data}")
        key_information = row_data.get('key information', '')
        severity = row_data.get('severity', '')
        key_drivers = row_data.get('key drivers', '')
        impacts_exposure_vulnerability = row_data.get('main impacts, exposure, and vulnerability', '')
        likelihood_multi_hazard = row_data.get('likelihood of multi-hazard risks', '')
        best_practices = row_data.get('best practices for managing this risk', '')
        recommendations = row_data.get('recommendations and supportive measures for recovery', '')
        if graph_type == "LLaMA Graph":
            causal_graph_caption = row_data.get('llama graph', '')
        elif graph_type == "Mixtral Graph":
            causal_graph_caption = row_data.get('mixtral graph', '')
        elif graph_type == "Ensemble Graph":
            causal_graph_caption = row_data.get('ensemble graph', '')
        else:
            causal_graph_caption = ''
        #causal_graph_caption = row_data.get('causal graph', '')
        grp = ast.literal_eval(causal_graph_caption) if causal_graph_caption else []
        causal_graph_plot = plot_cgraph(grp)

        # Parse and format the start date
        start_date = try_parse_date(row_data['Start Year'], row_data['Start Month'], row_data['Start Day'])
        start_date_str = start_date.strftime('%Y-%m-%d') if start_date else str(row_data['Start Year'])+"-"+str(row_data['Start Month'])+"-"+str(row_data['Start Day']) #'N/A'

        # Parse and format the end date
        end_date = try_parse_date(row_data['End Year'], row_data['End Month'], row_data['End Day'])
        end_date_str = end_date.strftime('%Y-%m-%d') if end_date else str(row_data['End Year'])+"-"+str(row_data['End Month'])+"-"+str(row_data['End Day']) #'N/A'

        # Collect additional field data
        additional_data = [row_data.get(field, '') for field in additional_fields]

        return (
            key_information,
            severity,
            key_drivers,
            impacts_exposure_vulnerability,
            likelihood_multi_hazard,
            best_practices,
            recommendations,
            causal_graph_plot,
            start_date_str,
            end_date_str
        ) + tuple(additional_data)
    else:
        print("No valid data found for the selection.")
        return ('', '', '', '', '', '', '', None, '', '') + tuple([''] * len(additional_fields))

def update_row_dropdown(country, year, month, day):
    filtered_df = df
    if country:
        filtered_df = filtered_df[filtered_df['Country'] == country]

    selected_date = try_parse_date(year, month, day)

    if selected_date:
        # filtered_rows = []
        # for idx, row in filtered_df.iterrows():
        #     if (try_parse_date(row['Start Year'], row['Start Month'], row['Start Day']) is not None) and \
        #             (try_parse_date(row['End Year'], row['End Month'], row['End Day']) is not None) and \
        #             (try_parse_date(row['Start Year'], row['Start Month'], row['Start Day']) <= selected_date <= \
        #              try_parse_date(row['End Year'], row['End Month'], row['End Day'])):
        #         filtered_rows.append(row)
        #
        # filtered_df = pd.DataFrame(filtered_rows)
        filtered_df = filtered_df[filtered_df.apply(
            lambda row: (
                    (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) is not None) and
                    (try_parse_date(row['End Year'], "01" if row['End Month'] == "" else row['End Month'], "01" if row['End Day'] == "" else row['End Day']) is not None) and
                    (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) <= selected_date <=
                     try_parse_date(row['End Year'], "01" if row['End Month'] == "" else row['End Month'], "01" if row['End Day'] == "" else row['End Day']))
            ), axis=1)]
    else:

        if year:
            sstart = None
            eend = None
            if month:
                try:
                    sstart = try_parse_date(year, month, "01")
                    eend = try_parse_date(year, int(float(month)) + 1, "01")
                except Exception as err:
                    print("Invalid selected date.")
                    sstart = None
                    eend = None

                if sstart and eend:
                    filtered_df = filtered_df[filtered_df.apply(
                        lambda row: (
                                (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) is not None) and
                                (sstart <= try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) < eend)
                        ), axis=1)]
            else:
                try:
                    sstart = try_parse_date(year, "01", "01")
                    eend = try_parse_date(year, "12", "31")
                except Exception as err:
                    print("Invalid selected date.")
                    sstart = None
                    eend = None

                if sstart and eend:
                    filtered_df = filtered_df[filtered_df.apply(
                        lambda row: (
                                (try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) is not None) and
                                (sstart <= try_parse_date(row['Start Year'], "01" if row['Start Month'] == "" else row['Start Month'], "01" if row['Start Day'] == "" else row['Start Day']) <= eend)
                        ), axis=1)]

        else:
            print("Invalid selected date.")



    # Use the "DisNo." column for choices
    choices = filtered_df['DisNo.'].tolist() if not filtered_df.empty else []
    print(f"Available rows for {country} on {year}-{month}-{day}: {choices}")
    return gr.update(choices=choices, value=choices[0] if choices else None)


def build_interface():
    with gr.Blocks() as interface:   
        # Add title and description using text elements
        gr.Markdown("## From Data to Narratives: AI-Enhanced Disaster and Health Threats Storylines")  # Title
        gr.Markdown("This Gradio app complements Health Threats and Disaster event data through generative AI techniques, including the use of Retrieval Augmented Generation (RAG) with the [Europe Media Monitoring (EMM)](https://emm.newsbrief.eu/overview.html) service, "
                    "and Large Language Models (LLMs) from the [GPT@JRC](https://gpt.jrc.ec.europa.eu/) portfolio. <br>"
                    "The app leverages the EMM RAG service to retrieve relevant news chunks for each event data, transforms the unstructured news chunks into structured narratives and causal knowledge graphs using LLMs and text-to-graph techniques, linking health threats and disaster events to their causes and impacts. "
                    "Drawing data from sources like the [EM-DAT](https://www.emdat.be/) database, it augments each event with news-derived information in a storytelling fashion. <br>"
                    "This tool enables decision-makers to better explore health threats and disaster dynamics, identify patterns, and simulate scenarios for improved response and readiness. <br><br>"
                    "Select an event data below. You can filter by country and date period. Below, you will see the AI-generated storyline and causal knowledge graph, while on the right you can see the related EM-DAT data record.  <br><br>")  # Description  -, and constructs disaster-specific ontologies. "

        # Extract and prepare unique years from "Start Year" and "End Year"
        if not df.empty:
            start_years = df["Start Year"].dropna().unique()
            end_years = df["End Year"].dropna().unique()

            # Convert to integers and merge to create a union set
            years = set(start_years.astype(int).tolist() + end_years.astype(int).tolist())
            year_choices = sorted(years)
        else:
            year_choices = []

        country_dropdown = gr.Dropdown(choices=[''] + df['Country'].unique().tolist(), label="Select Country")
        year_dropdown = gr.Dropdown(choices=[""] + [str(year) for year in year_choices], label="Select Year")
        month_dropdown = gr.Dropdown(choices=[""] + [f"{i:02d}" for i in range(1, 13)], label="Select Month")
        day_dropdown = gr.Dropdown(choices=[""] + [f"{i:02d}" for i in range(1, 32)], label="Select Day")
        row_dropdown = gr.Dropdown(choices=[], label="Select Disaster Event #", interactive=True)
        graph_type_dropdown = gr.Dropdown(
            choices=["LLaMA Graph", "Mixtral Graph", "Ensemble Graph"], 
            label="Select Graph Type"
        )

        # Define the additional fields once to use later in both position and function
        additional_fields = [
            "Country", "ISO", "Subregion", "Region", "Location", "Origin",
            "Disaster Group", "Disaster Subgroup", "Disaster Type", "Disaster Subtype", "External IDs",
            "Event Name", "Associated Types", "OFDA/BHA Response", "Appeal", "Declaration",
            "AID Contribution ('000 US$)", "Magnitude", "Magnitude Scale", "Latitude",
            "Longitude", "River Basin", "Total Deaths", "No. Injured",
            "No. Affected", "No. Homeless", "Total Affected",
            "Reconstruction Costs ('000 US$)", "Reconstruction Costs, Adjusted ('000 US$)",
            "Insured Damage ('000 US$)", "Insured Damage, Adjusted ('000 US$)",
            "Total Damage ('000 US$)", "Total Damage, Adjusted ('000 US$)", "CPI",
            "Admin Units",
            #"Entry Date", "Last Update"
        ]

        with gr.Row():
            with gr.Column():
                # Main controls and outputs
                country_dropdown
                year_dropdown
                month_dropdown
                day_dropdown
                row_dropdown
                graph_type_dropdown

                outputs = [
                    gr.Textbox(label="Key Information", interactive=False),
                    gr.Textbox(label="Severity", interactive=False),
                    gr.Textbox(label="Key Drivers", interactive=False),
                    gr.Textbox(label="Main Impacts, Exposure, and Vulnerability", interactive=False),
                    gr.Textbox(label="Likelihood of Multi-Hazard Risks", interactive=False),
                    gr.Textbox(label="Best Practices for Managing This Risk", interactive=False),
                    gr.Textbox(label="Recommendations and Supportive Measures for Recovery", interactive=False),
                    gr.Plot(label="Causal Graph")
                ]

            with gr.Column():
                # Additional information on the right
                outputs.extend([
                    gr.Textbox(label="Start Date", interactive=False),
                    gr.Textbox(label="End Date", interactive=False)
                ])
                for field in additional_fields:
                    outputs.append(gr.Textbox(label=field, interactive=False))

        # Update the selectable rows when any of the filters change
        country_dropdown.change(
            fn=update_row_dropdown,
            inputs=[country_dropdown, year_dropdown, month_dropdown, day_dropdown],
            outputs=row_dropdown
        )
        year_dropdown.change(
            fn=update_row_dropdown,
            inputs=[country_dropdown, year_dropdown, month_dropdown, day_dropdown],
            outputs=row_dropdown
        )
        month_dropdown.change(
            fn=update_row_dropdown,
            inputs=[country_dropdown, year_dropdown, month_dropdown, day_dropdown],
            outputs=row_dropdown
        )
        day_dropdown.change(
            fn=update_row_dropdown,
            inputs=[country_dropdown, year_dropdown, month_dropdown, day_dropdown],
            outputs=row_dropdown
        )

        # Update the display information when a row is selected
        row_dropdown.change(
            fn=display_info,
            inputs=[row_dropdown, country_dropdown, year_dropdown, month_dropdown, day_dropdown, graph_type_dropdown],
            outputs=outputs
        )
        graph_type_dropdown.change(
            fn=display_info,
            inputs=[row_dropdown, country_dropdown, year_dropdown, month_dropdown, day_dropdown, graph_type_dropdown],
            outputs=outputs
        )

    return interface


app = build_interface()
app.launch(share=True)