JerLag commited on
Commit
7619418
·
verified ·
1 Parent(s): bedc4b5

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -430
app.py DELETED
@@ -1,430 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Gradio UI — Analyse sémantique NPS (Assurance)
4
- - Sentiment : OpenAI -> HuggingFace -> Règles (OpenAI désactivé par défaut)
5
- - Thèmes : Regex (+ option OpenAI) + occurrences
6
- - Résumé : synthèse chiffrée (+ option synthèse OpenAI)
7
- - UI : tableaux consultables + 3 panneaux (Enchantements/Irritants/Recos)
8
- - Graphiques : NPS (jauge), Emotions (barres), Top thèmes (barres), Balance Pos/Neg (stacked)
9
- Lancer : python app_gradio_nps.py
10
- """
11
-
12
- import os, re, json, collections, tempfile, zipfile
13
- from typing import List, Dict, Optional
14
- import pandas as pd
15
- from unidecode import unidecode
16
- import gradio as gr
17
- import plotly.express as px
18
- import plotly.graph_objects as go
19
-
20
- try:
21
- from unidecode import unidecode
22
- except Exception:
23
- import unicodedata
24
- def unidecode(x):
25
- return unicodedata.normalize('NFKD', str(x)).encode('ascii','ignore').decode('ascii')
26
-
27
- # ---------- Thésaurus ASSURANCE ----------
28
- THEMES = {
29
- "Remboursements santé":[r"\bremboursement[s]?\b", r"\bt[eé]l[eé]transmission\b", r"\bno[eé]mie\b",
30
- r"\bprise\s*en\s*charge[s]?\b", r"\btaux\s+de\s+remboursement[s]?\b", r"\b(ameli|cpam)\b",
31
- r"\bcompl[eé]mentaire\s+sant[eé]\b", r"\bmutuelle\b", r"\battestation[s]?\b", r"\bcarte\s+(mutuelle|tiers\s*payant)\b"],
32
- "Tiers payant / Réseau de soins":[r"\btiers\s*payant\b", r"\br[ée]seau[x]?\s+de\s+soins\b",
33
- r"\b(optique|dentaire|hospitalisation|pharmacie)\b", r"\bitelis\b", r"\bsant[eé]clair\b", r"\bkalixia\b"],
34
- "Sinistres / Indemnisation":[r"\bsinistre[s]?\b", r"\bindemni(sation|ser)\b", r"\bexpertis[ea]\b",
35
- r"\bd[eé]claration\s+de\s+sinistre\b", r"\bconstat\b", r"\bbris\s+de\s+glace\b", r"\bassistance\b", r"\bd[ée]pannage\b"],
36
- "Adhésion / Contrat":[r"\badh[eé]sion[s]?\b", r"\bsouscription[s]?\b", r"\baffiliation[s]?\b", r"\bcontrat[s]?\b",
37
- r"\bavenant[s]?\b", r"\bcarence[s]?\b", r"\brenouvellement[s]?\b", r"\br[eé]siliation[s]?\b"],
38
- "Garanties / Exclusions / Franchise":[r"\bgarantie[s]?\b", r"\bexclusion[s]?\b", r"\bplafond[s]?\b",
39
- r"\bfranchise[s]?\b", r"\bconditions\s+g[eé]n[eé]rales\b", r"\bnotice\b"],
40
- "Cotisations / Facturation":[r"\bcotisation[s]?\b", r"\bpr[eé]l[eè]vement[s]?\b", r"\bech[eé]ancier[s]?\b",
41
- r"\bfacture[s]?\b", r"\berreur[s]?\s+de\s+facturation\b", r"\bremboursement[s]?\b", r"\bRIB\b", r"\bIBAN\b"],
42
- "Délais & Suivi dossier":[r"\bd[eé]lai[s]?\b", r"\btraitement[s]?\b", r"\bsuivi[s]?\b", r"\brelance[s]?\b", r"\bretard[s]?\b"],
43
- "Espace client / App / Connexion":[r"\bespace\s+client\b", r"\bapplication\b", r"\bapp\b", r"\bsite\b",
44
- r"\bconnexion\b", r"\bidentifiant[s]?\b", r"\bmot\s+de\s+passe\b", r"\bpaiement\s+en\s+ligne\b",
45
- r"\bbogue[s]?\b", r"\bbug[s]?\b", r"\bnavigation\b", r"\binterface\b", r"\bUX\b"],
46
- "Support / Conseiller":[r"\bSAV\b", r"\bservice[s]?\s+client[s]?\b", r"\bconseiller[s]?\b",
47
- r"\b[rR][eé]ponse[s]?\b", r"\bjoignable[s]?\b", r"\brapp?el\b"],
48
- "Communication / Transparence":[r"\binformation[s]?\b", r"\bcommunication\b", r"\btransparence\b",
49
- r"\bclart[eé]\b", r"\bcourrier[s]?\b", r"\bmail[s]?\b", r"\bnotification[s]?\b"],
50
- "Prix":[r"\bprix\b", r"\bcher[s]?\b", r"\bco[uû]t[s]?\b", r"\btarif[s]?\b",
51
- r"\bcomp[eé]titif[s]?\b", r"\babusif[s]?\b", r"\bbon\s+rapport\s+qualit[eé]\s*prix\b"],
52
- "Offre / Gamme":[r"\boffre[s]?\b", r"\bgamme[s]?\b", r"\bdisponibilit[eé][s]?\b", r"\bdevis\b", r"\bchoix\b", r"\bcatalogue[s]?\b"],
53
- "Produit/Qualité":[r"\bqualit[eé]s?\b", r"\bfiable[s]?\b", r"\bconforme[s]?\b", r"\bnon\s+conforme[s]?\b",
54
- r"\bd[eé]fectueux?[es]?\b", r"\bperformant[e]?[s]?\b"],
55
- "Agence / Accueil":[r"\bagence[s]?\b", r"\bboutique[s]?\b", r"\baccueil\b", r"\bconseil[s]?\b", r"\battente\b", r"\bcaisse[s]?\b"],
56
- }
57
-
58
- # ---------- Fallback sentiment ----------
59
- POS_WORDS = {"bien":1.0,"super":1.2,"parfait":1.4,"excellent":1.5,"ravi":1.2,"ravis":1.2,"satisfait":1.0,"satisfaite":1.0,
60
- "rapide":0.8,"efficace":1.0,"fiable":1.0,"simple":0.8,"facile":0.8,"clair":0.8,"conforme":0.8,"sympa":0.8,
61
- "professionnel":1.0,"réactif":1.0,"reactif":1.0,"compétent":1.0,"competent":1.0,"top":1.2,"recommande":1.2,
62
- "recommandé":1.2,"bon":0.8,"fiers":1.0}
63
- NEG_WORDS = {"mauvais":-1.2,"horrible":-1.5,"naze":-1.0,"nul":-1.2,"lente":-0.8,"lent":-0.8,"cher":-0.9,"arnaque":-1.5,
64
- "déçu":-1.2,"decu":-1.2,"incompétent":-1.3,"incompetent":-1.3,"bug":-0.9,"bogue":-0.9,"problème":-1.0,
65
- "probleme":-1.0,"attente":-0.6,"retard":-0.9,"erreur":-1.0,"mensonge":-1.4,"complexe":-0.7,"compliqué":-0.8,
66
- "complique":-0.8,"défectueux":-1.3,"defectueux":-1.3,"non conforme":-1.2,"impossible":-1.0,"difficile":-0.7}
67
- NEGATIONS = [r"\bpas\b", r"\bjamais\b", r"\bplus\b", r"\baucun[e]?\b", r"\brien\b", r"\bni\b", r"\bgu[eè]re\b"]
68
- INTENSIFIERS = [r"\btr[eè]s\b", r"\bvraiment\b", r"\btellement\b", r"\bextr[eê]mement\b", r"\bhyper\b"]
69
- DIMINISHERS = [r"\bun[e]?\s+peu\b", r"\bassez\b", r"\bplut[oô]t\b", r"\bl[eé]g[eè]rement\b"]
70
- INTENSIFIER_W, DIMINISHER_W = 1.5, 0.7
71
-
72
- # ---------- OpenAI (optionnel) ----------
73
- OPENAI_AVAILABLE = False
74
- try:
75
- from openai import OpenAI
76
- _client = OpenAI() # clé via OPENAI_API_KEY
77
- OPENAI_AVAILABLE = True
78
- except Exception:
79
- OPENAI_AVAILABLE = False
80
-
81
- # ---------- Utils ----------
82
- def normalize(t:str)->str:
83
- if not isinstance(t,str): return ""
84
- return re.sub(r"\s+"," ",t.strip())
85
-
86
- def to_analyzable(t:str)->str:
87
- return unidecode(normalize(t.lower()))
88
-
89
- def window_has(patterns:List[str], toks:List[str], i:int, w:int=3)->bool:
90
- s=max(0,i-w); e=min(len(toks),i+w+1); win=" ".join(toks[s:e])
91
- return any(re.search(p,win) for p in patterns)
92
-
93
- def lexical_sentiment_score(text:str)->float:
94
- toks = to_analyzable(text).split(); score=0.0
95
- for i,t in enumerate(toks):
96
- base = POS_WORDS.get(t,0.0) or NEG_WORDS.get(t,0.0)
97
- if not base and i<len(toks)-1:
98
- bi=f"{t} {toks[i+1]}"; base = NEG_WORDS.get(bi,0.0)
99
- if base:
100
- w=1.0
101
- if window_has(INTENSIFIERS,toks,i): w*=INTENSIFIER_W
102
- if window_has(DIMINISHERS,toks,i): w*=DIMINISHER_W
103
- if window_has(NEGATIONS,toks,i): base*=-1
104
- score+=base*w
105
- return max(min(score,4.0),-4.0)
106
-
107
- def lexical_sentiment_label(s:float)->str:
108
- return "positive" if s>=0.3 else ("negatif" if s<=-0.3 else "neutre")
109
-
110
- def detect_themes_regex(text:str):
111
- t=to_analyzable(text); found=[]; counts={}
112
- for th,pats in THEMES.items():
113
- c=sum(len(re.findall(p,t)) for p in pats)
114
- if c>0: found.append(th); counts[th]=c
115
- return found, counts
116
-
117
- def nps_bucket(s):
118
- try: s=int(s)
119
- except: return "inconnu"
120
- return "promoter" if s>=9 else ("passive" if s>=7 else ("detractor" if s>=0 else "inconnu"))
121
-
122
- def compute_nps(series):
123
- vals=[]
124
- for x in series.dropna().tolist():
125
- try:
126
- v=int(x)
127
- if 0<=v<=10: vals.append(v)
128
- except: pass
129
- if not vals: return None
130
- tot=len(vals); pro=sum(1 for v in vals if v>=9); det=sum(1 for v in vals if v<=6)
131
- return 100.0*(pro/tot - det/tot)
132
-
133
- def anonymize(t:str)->str:
134
- if not isinstance(t,str): return ""
135
- t=re.sub(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}","[email]",t)
136
- t=re.sub(r"\b(?:\+?\d[\s.-]?){7,}\b","[tel]",t)
137
- return t
138
-
139
- # ---------- OpenAI helpers ----------
140
- def openai_json(model:str, system:str, user:str, temperature:float=0.0) -> Optional[dict]:
141
- if not OPENAI_AVAILABLE: return None
142
- try:
143
- resp = _client.chat.completions.create(
144
- model=model, temperature=temperature,
145
- messages=[{"role":"system","content":system},{"role":"user","content":user}],
146
- )
147
- txt = resp.choices[0].message.content.strip()
148
- m = re.search(r"\{.*\}", txt, re.S)
149
- return json.loads(m.group(0) if m else txt)
150
- except Exception:
151
- return None
152
-
153
- def oa_sentiment(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
154
- system = "Tu es un classifieur FR. Réponds strictement en JSON."
155
- user = f'Texte: {comment}\nDonne "label" parmi ["positive","neutre","negatif"] et "score" entre -4 et 4. JSON.'
156
- return openai_json(model, system, user, temperature)
157
-
158
- def oa_themes(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
159
- system = "Tu maps le texte client vers un thésaurus assurance. Réponds strictement en JSON."
160
- user = f"Texte: {comment}\nThésaurus: {json.dumps(list(THEMES.keys()), ensure_ascii=False)}\nRetourne {{'themes': [...], 'counts': {{...}}}}"
161
- return openai_json(model, system, user, temperature)
162
-
163
- def oa_summary(nps:Optional[float], dist:Dict[str,int], themes_df:pd.DataFrame, model:str, temperature:float=0.2) -> Optional[str]:
164
- system = "Tu es un analyste CX FR. Donne une synthèse courte et actionnable en Markdown."
165
- top = [] if themes_df is None else themes_df.head(6).to_dict(orient="records")
166
- user = f"Données: NPS={None if nps is None else round(nps,1)}, Répartition={dist}, Thèmes={json.dumps(top, ensure_ascii=False)}"
167
- j = openai_json(model, system, user, temperature)
168
- if isinstance(j, dict) and "text" in j: return j["text"]
169
- if isinstance(j, dict): return ' '.join(str(v) for v in j.values())
170
- return None
171
-
172
- # ---------- Recos & panneaux ----------
173
- RECO_RULES = {
174
- "Délais & Suivi dossier": "Réduire les délais (SLA), suivi proactif, file d’attente priorisée.",
175
- "Cotisations / Facturation": "Audit prélèvements, factures claires, alertes anomalies, parcours litige.",
176
- "Espace client / App / Connexion": "Corriger login/MDP, parcours réinitialisation, QA multi-navigateurs.",
177
- "Support / Conseiller": "Améliorer joignabilité, formation, scripts d’appel, rappel auto.",
178
- "Communication / Transparence": "Messages clairs, notifications aux étapes clés, FAQ ciblées.",
179
- "Sinistres / Indemnisation": "Transparence étapes/délais, suivi dossier, points de contact dédiés.",
180
- "Remboursements santé": "Accélérer télétransmission, notifier réception/prise en charge.",
181
- "Tiers payant / Réseau de soins": "Élargir réseau, informer disponibilité, carte à jour.",
182
- "Garanties / Exclusions / Franchise": "Clarifier CG, simulateur de reste à charge, exemples.",
183
- }
184
- def make_panels(themes_df: pd.DataFrame):
185
- if themes_df is None or themes_df.empty: return "—","—","—"
186
- pos_top = themes_df.sort_values(["verbatims_pos","total_mentions"], ascending=[False,False]).head(4)
187
- neg_top = themes_df.sort_values(["verbatims_neg","total_mentions"], ascending=[False,False]).head(4)
188
- def bullets(df, col, label):
189
- lines=[f"**{label}**"]
190
- for _, r in df.iterrows(): lines.append(f"- **{r['theme']}** — {int(r[col])} verbatims")
191
- return "\n".join(lines)
192
- ench_md = bullets(pos_top, "verbatims_pos", "Points d’enchantement")
193
- irr_md = bullets(neg_top, "verbatims_neg", "Irritants")
194
- rec_lines=["**Recommandations**"]
195
- for _, r in neg_top.iterrows():
196
- rec_lines.append(f"- **{r['theme']}** — {RECO_RULES.get(r['theme'],'Analyser les causes racines et plan d’action.')}")
197
- return ench_md, irr_md, "\n".join(rec_lines)
198
-
199
- # ---------- Graphiques Plotly ----------
200
- def fig_nps_gauge(nps: Optional[float]) -> go.Figure:
201
- v = 0.0 if nps is None else float(nps)
202
- return go.Figure(go.Indicator(
203
- mode="gauge+number",
204
- value=v,
205
- gauge={"axis":{"range":[-100,100]}, "bar":{"thickness":0.3}},
206
- title={"text":"NPS (−100 à +100)"}
207
- ))
208
-
209
- def fig_sentiment_bar(dist: Dict[str,int]) -> go.Figure:
210
- order = ["negatif","neutre","positive"]
211
- x = [o for o in order if o in dist]
212
- y = [dist.get(o,0) for o in x]
213
- return px.bar(x=x, y=y, labels={"x":"Sentiment","y":"Nombre"}, title="Répartition des émotions")
214
-
215
- def fig_top_themes(themes_df: pd.DataFrame, k: int) -> go.Figure:
216
- if themes_df is None or themes_df.empty: return go.Figure()
217
- d = themes_df.head(k)
218
- fig = px.bar(d, x="theme", y="total_mentions", title=f"Top {k} thèmes — occurrences")
219
- fig.update_layout(xaxis_tickangle=-30)
220
- return fig
221
-
222
- def fig_theme_balance(themes_df: pd.DataFrame, k: int) -> go.Figure:
223
- if themes_df is None or themes_df.empty: return go.Figure()
224
- d = themes_df.head(k)
225
- d2 = d.melt(id_vars=["theme"], value_vars=["verbatims_pos","verbatims_neg"], var_name="type", value_name="count")
226
- d2["type"] = d2["type"].map({"verbatims_pos":"Positifs","verbatims_neg":"Négatifs"})
227
- fig = px.bar(d2, x="theme", y="count", color="type", barmode="stack", title=f"Top {k} thèmes — balance Pos/Neg")
228
- fig.update_layout(xaxis_tickangle=-30)
229
- return fig
230
-
231
- # ---------- Analyse principale ----------
232
- def analyze_file(file_obj, comment_col, score_col, id_col,
233
- do_anonymize,
234
- use_oa_sent, use_oa_themes, use_oa_summary,
235
- oa_model, oa_temp,
236
- top_k):
237
-
238
- # Lecture CSV
239
- try:
240
- df=pd.read_csv(file_obj.name, sep=None, engine="python")
241
- except Exception:
242
- df=pd.read_csv(file_obj.name)
243
-
244
- # Remap auto
245
- rem={"verbatim":"comment","texte":"comment","avis":"comment","note":"nps_score","score":"nps_score",
246
- "Identifiant":"id","identifiant":"id","id_client":"id"}
247
- df=df.rename(columns={k:v for k,v in rem.items() if k in df.columns})
248
-
249
- # Overrides UI
250
- if comment_col and comment_col in df.columns and comment_col!="comment": df=df.rename(columns={comment_col:"comment"})
251
- if score_col and score_col in df.columns and score_col!="nps_score": df=df.rename(columns={score_col:"nps_score"})
252
- if id_col and id_col in df.columns and id_col!="id": df=df.rename(columns={id_col:"id"})
253
-
254
- if "comment" not in df.columns: raise gr.Error("Colonne 'comment' introuvable.")
255
- if "nps_score" not in df.columns: raise gr.Error("Colonne 'nps_score' introuvable.")
256
- if "id" not in df.columns: df["id"]=range(1,len(df)+1)
257
- if do_anonymize: df["comment"]=df["comment"].apply(anonymize)
258
-
259
- if (use_oa_sent or use_oa_themes or use_oa_summary) and not OPENAI_AVAILABLE:
260
- raise gr.Error("OpenAI non dispo (installe `openai` et définis OPENAI_API_KEY).")
261
-
262
- # Init HF lazy
263
- HF_AVAILABLE=False
264
- try:
265
- from transformers import pipeline
266
- hf_pipe = pipeline("text-classification",
267
- model="cmarkea/distilcamembert-base-sentiment",
268
- tokenizer="cmarkea/distilcamembert-base-sentiment")
269
- HF_AVAILABLE=True
270
- except Exception:
271
- HF_AVAILABLE=False
272
-
273
- def hf_sent(text:str):
274
- if not HF_AVAILABLE or not text.strip(): return None
275
- try:
276
- res=hf_pipe(text); lab=str(res[0]["label"]).lower(); p=float(res[0].get("score",0.5))
277
- if "1" in lab or "2" in lab: return {"label":"negatif","score":-4*p}
278
- if "3" in lab: return {"label":"neutre","score":0.0}
279
- return {"label":"positive","score":4*p}
280
- except Exception:
281
- return None
282
-
283
- rows=[]
284
- theme_agg=collections.defaultdict(lambda:{"mentions":0,"pos":0,"neg":0})
285
- used_hf=False; used_oa=False
286
-
287
- for _, r in df.iterrows():
288
- cid=r["id"]; comment=normalize(str(r["comment"]))
289
-
290
- # Sentiment: OpenAI -> HF -> règles
291
- sent=None
292
- if use_oa_sent:
293
- sent=oa_sentiment(comment, oa_model, oa_temp); used_oa = used_oa or bool(sent)
294
- if not sent:
295
- hf=hf_sent(comment)
296
- if hf: sent=hf; used_hf=True
297
- if not sent:
298
- s=float(lexical_sentiment_score(comment))
299
- sent={"label":lexical_sentiment_label(s),"score":s}
300
-
301
- # Thèmes : regex (+ fusion OA)
302
- themes, counts = detect_themes_regex(comment)
303
- if use_oa_themes:
304
- tjson=oa_themes(comment, oa_model, oa_temp)
305
- if isinstance(tjson, dict):
306
- used_oa=True
307
- for th, c in (tjson.get("counts",{}) or {}).items():
308
- if th in THEMES and int(c) > 0:
309
- counts[th] = max(counts.get(th, 0), int(c))
310
- themes = [th for th, c in counts.items() if c > 0]
311
-
312
- bucket = nps_bucket(r.get("nps_score", None))
313
-
314
- for th, c in counts.items():
315
- theme_agg[th]["mentions"] += c
316
- if sent["label"] == "positive":
317
- theme_agg[th]["pos"] += 1
318
- elif sent["label"] == "negatif":
319
- theme_agg[th]["neg"] += 1
320
-
321
- rows.append({
322
- "id": cid, "nps_score": r.get("nps_score", None), "nps_bucket": bucket,
323
- "comment": comment,
324
- "sentiment_score": round(float(sent["score"]), 3),
325
- "sentiment_label": sent["label"],
326
- "sentiment_source": "openai" if (use_oa_sent and used_oa) else ("huggingface" if used_hf else "rules"),
327
- "themes": ", ".join(themes) if themes else "",
328
- "theme_counts_json": json.dumps(counts, ensure_ascii=False)
329
- })
330
-
331
- out_df=pd.DataFrame(rows)
332
- nps=compute_nps(df["nps_score"])
333
- dist=out_df["sentiment_label"].value_counts().to_dict()
334
-
335
- # Stats par thème
336
- trs=[]
337
- for th, d in theme_agg.items():
338
- trs.append({"theme":th,"total_mentions":int(d["mentions"]),
339
- "verbatims_pos":int(d["pos"]),"verbatims_neg":int(d["neg"]),
340
- "net_sentiment":int(d["pos"]-d["neg"])})
341
- themes_df=pd.DataFrame(trs).sort_values(["total_mentions","net_sentiment"],ascending=[False,False])
342
-
343
- # Synthèse texte
344
- method = "OpenAI + HF + règles" if (use_oa_sent and used_hf) else ("OpenAI + règles" if use_oa_sent else ("HF + règles" if used_hf else "Règles"))
345
- lines=[ "# Synthèse NPS & ressentis clients",
346
- f"- **Méthode** : {method}",
347
- f"- **NPS global** : {nps:.1f}" if nps is not None else "- **NPS global** : n/a" ]
348
- if dist:
349
- tot=sum(dist.values()); pos=dist.get("positive",0); neg=dist.get("negatif",0); neu=dist.get("neutre",0)
350
- lines.append(f"- **Répartition émotions** : positive {pos}/{tot}, neutre {neu}/{tot}, négative {neg}/{tot}")
351
- if not themes_df.empty:
352
- lines.append("\n## Thèmes les plus cités")
353
- for th,m in themes_df.head(5)[["theme","total_mentions"]].values.tolist():
354
- lines.append(f"- **{th}** : {m} occurrence(s)")
355
- summary_md="\n".join(lines)
356
-
357
- # Synthèse OpenAI optionnelle
358
- if use_oa_summary:
359
- md = oa_summary(nps, dist, themes_df, oa_model, oa_temp)
360
- if md: summary_md = md + "\n\n---\n" + summary_md
361
-
362
- # Fichiers
363
- tmpdir=tempfile.mkdtemp(prefix="nps_gradio_")
364
- enriched=os.path.join(tmpdir,"enriched_comments.csv"); themes=os.path.join(tmpdir,"themes_stats.csv"); summ=os.path.join(tmpdir,"summary.md")
365
- out_df.to_csv(enriched,index=False,encoding="utf-8-sig")
366
- themes_df.to_csv(themes,index=False,encoding="utf-8-sig")
367
- with open(summ,"w",encoding="utf-8") as f: f.write(summary_md)
368
- zip_path=os.path.join(tmpdir,"nps_outputs.zip")
369
- with zipfile.ZipFile(zip_path,"w",zipfile.ZIP_DEFLATED) as z:
370
- z.write(enriched,arcname="enriched_comments.csv"); z.write(themes,arcname="themes_stats.csv"); z.write(summ,arcname="summary.md")
371
-
372
- # Graphiques
373
- fig_gauge = fig_nps_gauge(nps)
374
- fig_emots = fig_sentiment_bar(dist)
375
- k = max(1, int(top_k or 10))
376
- fig_top = fig_top_themes(themes_df, k)
377
- fig_bal = fig_theme_balance(themes_df, k)
378
-
379
- return (summary_md, themes_df.head(100), out_df.head(200), [enriched, themes, summ, zip_path],
380
- fig_gauge, fig_emots, fig_top, fig_bal)
381
-
382
- # ---------- UI ----------
383
- with gr.Blocks(title="Analyse NPS — Assurance") as demo:
384
- gr.Markdown("## 🔎 Analyse sémantique NPS — Assurance\nTéléverse un CSV, mappe les colonnes, options OpenAI (facultatives), puis lance.")
385
-
386
- with gr.Row():
387
- csv_in=gr.File(label="CSV NPS (UTF-8)", file_types=[".csv"], type="filepath")
388
- with gr.Column():
389
- ccol=gr.Textbox(label="Colonne 'comment' (laisser vide si déjà 'comment')", placeholder="ex: verbatim / texte")
390
- scol=gr.Textbox(label="Colonne 'nps_score' (laisser vide si déjà 'nps_score')", placeholder="ex: note / score")
391
- icol=gr.Textbox(label="Colonne 'id' (optionnel)", placeholder="ex: identifiant")
392
-
393
- with gr.Row():
394
- anon=gr.Checkbox(label="Anonymiser emails / téléphones", value=True)
395
- # OpenAI désactivé par défaut pour éviter les erreurs de clé
396
- use_oa_sent=gr.Checkbox(label="OpenAI pour le sentiment", value=False)
397
- use_oa_themes=gr.Checkbox(label="OpenAI pour les thèmes", value=False)
398
- use_oa_summary=gr.Checkbox(label="OpenAI pour la synthèse", value=False)
399
-
400
- with gr.Row():
401
- oa_model=gr.Textbox(label="Modèle OpenAI", value="gpt-4o-mini")
402
- oa_temp=gr.Slider(label="Température", minimum=0.0, maximum=1.0, value=0.1, step=0.1)
403
- top_k=gr.Slider(label="Top thèmes (K) pour les graphes", minimum=5, maximum=20, value=10, step=1)
404
- run=gr.Button("Lancer l'analyse", variant="primary")
405
-
406
- summary=gr.Markdown(label="Synthèse")
407
- themes_table=gr.Dataframe(label="Thèmes — statistiques")
408
- enriched_table=gr.Dataframe(label="Verbatims enrichis (aperçu)")
409
- files_out=gr.Files(label="Téléchargements (CSV & ZIP)")
410
-
411
- with gr.Row():
412
- plot_nps = gr.Plot(label="NPS — Jauge")
413
- plot_sent= gr.Plot(label="Répartition des émotions")
414
- with gr.Row():
415
- plot_top = gr.Plot(label="Top thèmes — occurrences")
416
- plot_bal = gr.Plot(label="Top thèmes — balance Pos/Neg")
417
-
418
- def _go(file,cc,sc,ic,a,uos,uot,uosum,model,temp,k):
419
- if file is None: raise gr.Error("Ajoute un CSV.")
420
- class F: pass
421
- f=F(); f.name = file if isinstance(file,str) else file.name
422
- return analyze_file(f, cc.strip() if cc else "", sc.strip() if sc else "", ic.strip() if ic else "",
423
- a, uos, uot, uosum, model.strip() or "gpt-4o-mini", float(temp or 0.0), int(k or 10))
424
-
425
- run.click(_go,
426
- inputs=[csv_in, ccol, scol, icol, anon, use_oa_sent, use_oa_themes, use_oa_summary, oa_model, oa_temp, top_k],
427
- outputs=[summary, themes_table, enriched_table, files_out, plot_nps, plot_sent, plot_top, plot_bal])
428
-
429
- if __name__=="__main__":
430
- demo.launch(share=False, show_api=False)