JerLag commited on
Commit
a40f4c0
·
verified ·
1 Parent(s): 7619418

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +410 -0
app.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ NPS Assurance — Gradio (Paste-only)
4
+ - Entrée: verbatims collés (1 ligne = 1 verbatim, score NPS optionnel après un séparateur, ex: "|")
5
+ - Sorties: émotion (pos/neutre/neg), thématiques, occurrences, résumé Markdown, graphiques Plotly
6
+ - IA (facultatif): OpenAI pour sentiment/thèmes/synthèse. Sans clé, fallback HuggingFace (si installé) puis règles lexicales.
7
+ - Déployable tel quel sur Hugging Face Spaces (app_file = app.py)
8
+ """
9
+
10
+ import os, re, json, collections, tempfile, zipfile
11
+ from typing import List, Dict, Optional
12
+ import pandas as pd
13
+ from unidecode import unidecode
14
+ import gradio as gr
15
+ import plotly.express as px
16
+ import plotly.graph_objects as go
17
+
18
+ # ---------------- Thésaurus ASSURANCE ----------------
19
+ THEMES = {
20
+ "Remboursements santé":[r"\bremboursement[s]?\b", r"\bt[eé]l[eé]transmission\b", r"\bno[eé]mie\b",
21
+ r"\bprise\s*en\s*charge[s]?\b", r"\btaux\s+de\s+remboursement[s]?\b", r"\b(ameli|cpam)\b",
22
+ r"\bcompl[eé]mentaire\s+sant[eé]\b", r"\bmutuelle\b", r"\battestation[s]?\b", r"\bcarte\s+(mutuelle|tiers\s*payant)\b"],
23
+ "Tiers payant / Réseau de soins":[r"\btiers\s*payant\b", r"\br[ée]seau[x]?\s+de\s+soins\b",
24
+ r"\b(optique|dentaire|hospitalisation|pharmacie)\b", r"\bitelis\b", r"\bsant[eé]clair\b", r"\bkalixia\b"],
25
+ "Sinistres / Indemnisation":[r"\bsinistre[s]?\b", r"\bindemni(sation|ser)\b", r"\bexpertis[ea]\b",
26
+ r"\bd[eé]claration\s+de\s+sinistre\b", r"\bconstat\b", r"\bbris\s+de\s+glace\b", r"\bassistance\b", r"\bd[ée]pannage\b"],
27
+ "Adhésion / Contrat":[r"\badh[eé]sion[s]?\b", r"\bsouscription[s]?\b", r"\baffiliation[s]?\b", r"\bcontrat[s]?\b",
28
+ r"\bavenant[s]?\b", r"\bcarence[s]?\b", r"\brenouvellement[s]?\b", r"\br[eé]siliation[s]?\b"],
29
+ "Garanties / Exclusions / Franchise":[r"\bgarantie[s]?\b", r"\bexclusion[s]?\b", r"\bplafond[s]?\b",
30
+ r"\bfranchise[s]?\b", r"\bconditions\s+g[eé]n[eé]rales\b", r"\bnotice\b"],
31
+ "Cotisations / Facturation":[r"\bcotisation[s]?\b", r"\bpr[eé]l[eè]vement[s]?\b", r"\bech[eé]ancier[s]?\b",
32
+ r"\bfacture[s]?\b", r"\berreur[s]?\s+de\s+facturation\b", r"\bremboursement[s]?\b", r"\bRIB\b", r"\bIBAN\b"],
33
+ "Délais & Suivi dossier":[r"\bd[eé]lai[s]?\b", r"\btraitement[s]?\b", r"\bsuivi[s]?\b", r"\brelance[s]?\b", r"\bretard[s]?\b"],
34
+ "Espace client / App / Connexion":[r"\bespace\s+client\b", r"\bapplication\b", r"\bapp\b", r"\bsite\b",
35
+ r"\bconnexion\b", r"\bidentifiant[s]?\b", r"\bmot\s+de\s+passe\b", r"\bpaiement\s+en\s+ligne\b",
36
+ r"\bbogue[s]?\b", r"\bbug[s]?\b", r"\bnavigation\b", r"\binterface\b", r"\bUX\b"],
37
+ "Support / Conseiller":[r"\bSAV\b", r"\bservice[s]?\s+client[s]?\b", r"\bconseiller[s]?\b",
38
+ r"\b[rR][eé]ponse[s]?\b", r"\bjoignable[s]?\b", r"\brapp?el\b"],
39
+ "Communication / Transparence":[r"\binformation[s]?\b", r"\bcommunication\b", r"\btransparence\b",
40
+ r"\bclart[eé]\b", r"\bcourrier[s]?\b", r"\bmail[s]?\b", r"\bnotification[s]?\b"],
41
+ "Prix":[r"\bprix\b", r"\bcher[s]?\b", r"\bco[uû]t[s]?\b", r"\btarif[s]?\b",
42
+ r"\bcomp[eé]titif[s]?\b", r"\babusif[s]?\b", r"\bbon\s+rapport\s+qualit[eé]\s*prix\b"],
43
+ "Offre / Gamme":[r"\boffre[s]?\b", r"\bgamme[s]?\b", r"\bdisponibilit[eé][s]?\b", r"\bdevis\b", r"\bchoix\b", r"\bcatalogue[s]?\b"],
44
+ "Produit/Qualité":[r"\bqualit[eé]s?\b", r"\bfiable[s]?\b", r"\bconforme[s]?\b", r"\bnon\s+conforme[s]?\b",
45
+ r"\bd[eé]fectueux?[es]?\b", r"\bperformant[e]?[s]?\b"],
46
+ "Agence / Accueil":[r"\bagence[s]?\b", r"\bboutique[s]?\b", r"\baccueil\b", r"\bconseil[s]?\b", r"\battente\b", r"\bcaisse[s]?\b"],
47
+ }
48
+
49
+ # --------------- Sentiment (fallback règles) ---------------
50
+ POS_WORDS = {"bien":1.0,"super":1.2,"parfait":1.4,"excellent":1.5,"ravi":1.2,"satisfait":1.0,
51
+ "rapide":0.8,"efficace":1.0,"fiable":1.0,"simple":0.8,"facile":0.8,"clair":0.8,"conforme":0.8,
52
+ "sympa":0.8,"professionnel":1.0,"réactif":1.0,"reactif":1.0,"compétent":1.0,"competent":1.0,
53
+ "top":1.2,"recommande":1.2,"recommandé":1.2,"bon":0.8}
54
+ NEG_WORDS = {"mauvais":-1.2,"horrible":-1.5,"nul":-1.2,"lent":-0.8,"cher":-0.9,"arnaque":-1.5,
55
+ "déçu":-1.2,"decu":-1.2,"incompétent":-1.3,"bug":-0.9,"bogue":-0.9,"problème":-1.0,
56
+ "probleme":-1.0,"attente":-0.6,"retard":-0.9,"erreur":-1.0,"compliqué":-0.8,"complique":-0.8,
57
+ "défectueux":-1.3,"defectueux":-1.3,"non conforme":-1.2,"impossible":-1.0,"difficile":-0.7}
58
+ NEGATIONS = [r"\bpas\b", r"\bjamais\b", r"\bplus\b", r"\baucun[e]?\b", r"\brien\b", r"\bni\b", r"\bgu[eè]re\b"]
59
+ INTENSIFIERS = [r"\btr[eè]s\b", r"\bvraiment\b", r"\bextr[eê]mement\b", r"\bhyper\b"]
60
+ DIMINISHERS = [r"\bun[e]?\s+peu\b", r"\bassez\b", r"\bplut[oô]t\b", r"\bl[eé]g[eè]rement\b"]
61
+ INTENSIFIER_W, DIMINISHER_W = 1.5, 0.7
62
+
63
+ # --------------- OpenAI (optionnel) ---------------
64
+ OPENAI_AVAILABLE = False
65
+ try:
66
+ from openai import OpenAI
67
+ _client = OpenAI() # clé via OPENAI_API_KEY (en secret HF)
68
+ OPENAI_AVAILABLE = True
69
+ except Exception:
70
+ OPENAI_AVAILABLE = False
71
+
72
+ # ---------------- Utils ----------------
73
+ def normalize(t:str)->str:
74
+ if not isinstance(t,str): return ""
75
+ return re.sub(r"\s+"," ",t.strip())
76
+
77
+ def to_analyzable(t:str)->str:
78
+ return unidecode(normalize(t.lower()))
79
+
80
+ def window_has(patterns:List[str], toks:List[str], i:int, w:int=3)->bool:
81
+ s=max(0,i-w); e=min(len(toks),i+w+1); win=" ".join(toks[s:e])
82
+ return any(re.search(p,win) for p in patterns)
83
+
84
+ def lexical_sentiment_score(text:str)->float:
85
+ toks = to_analyzable(text).split(); score=0.0
86
+ for i,t in enumerate(toks):
87
+ base = POS_WORDS.get(t,0.0) or NEG_WORDS.get(t,0.0)
88
+ if not base and i<len(toks)-1:
89
+ bi=f"{t} {toks[i+1]}"; base = NEG_WORDS.get(bi,0.0)
90
+ if base:
91
+ w=1.0
92
+ if window_has(INTENSIFIERS,toks,i): w*=INTENSIFIER_W
93
+ if window_has(DIMINISHERS,toks,i): w*=DIMINISHER_W
94
+ if window_has(NEGATIONS,toks,i): base*=-1
95
+ score+=base*w
96
+ return max(min(score,4.0),-4.0)
97
+
98
+ def lexical_sentiment_label(s:float)->str:
99
+ return "positive" if s>=0.3 else ("negatif" if s<=-0.3 else "neutre")
100
+
101
+ def detect_themes_regex(text:str):
102
+ t=to_analyzable(text); counts={}
103
+ for th,pats in THEMES.items():
104
+ c=sum(len(re.findall(p,t)) for p in pats)
105
+ if c>0: counts[th]=c
106
+ return list(counts.keys()), counts
107
+
108
+ def nps_bucket(s):
109
+ try:
110
+ v=int(s)
111
+ except:
112
+ return "inconnu"
113
+ return "promoter" if v>=9 else ("passive" if v>=7 else ("detractor" if v>=0 else "inconnu"))
114
+
115
+ def compute_nps(series):
116
+ vals=[]
117
+ for x in series.dropna().tolist():
118
+ try:
119
+ v=int(x)
120
+ if 0<=v<=10: vals.append(v)
121
+ except: pass
122
+ if not vals: return None
123
+ tot=len(vals); pro=sum(1 for v in vals if v>=9); det=sum(1 for v in vals if v<=6)
124
+ return 100.0*(pro/tot - det/tot)
125
+
126
+ def anonymize(t:str)->str:
127
+ if not isinstance(t,str): return ""
128
+ t=re.sub(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}","[email]",t)
129
+ t=re.sub(r"\b(?:\+?\d[\s.-]?){7,}\b","[tel]",t)
130
+ return t
131
+
132
+ # --------- Coller du texte → DataFrame ----------
133
+ def df_from_pasted(text:str, sep="|", has_score=False) -> pd.DataFrame:
134
+ lines = [l.strip() for l in (text or "").splitlines() if l.strip()]
135
+ rows = []
136
+ for i, line in enumerate(lines, 1):
137
+ if has_score and sep in line:
138
+ verb, score = line.split(sep, 1)
139
+ rows.append({"id": i, "comment": verb.strip(), "nps_score": pd.to_numeric(score.strip(), errors="coerce")})
140
+ else:
141
+ rows.append({"id": i, "comment": line.strip(), "nps_score": None})
142
+ return pd.DataFrame(rows)
143
+
144
+ # --------- OpenAI helpers (optionnels) ----------
145
+ def openai_json(model:str, system:str, user:str, temperature:float=0.0) -> Optional[dict]:
146
+ if not OPENAI_AVAILABLE: return None
147
+ try:
148
+ resp = _client.chat.completions.create(
149
+ model=model, temperature=temperature,
150
+ messages=[{"role":"system","content":system},{"role":"user","content":user}],
151
+ )
152
+ txt = resp.choices[0].message.content.strip()
153
+ m = re.search(r"\{.*\}", txt, re.S)
154
+ return json.loads(m.group(0) if m else txt)
155
+ except Exception:
156
+ return None
157
+
158
+ def oa_sentiment(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
159
+ system = "Tu es un classifieur FR. Réponds strictement en JSON."
160
+ user = f'Texte: {comment}\nDonne "label" parmi ["positive","neutre","negatif"] et "score" entre -4 et 4. JSON.'
161
+ return openai_json(model, system, user, temperature)
162
+
163
+ def oa_themes(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
164
+ system = "Tu maps le texte client vers un thésaurus assurance. Réponds strictement en JSON."
165
+ user = f"Texte: {comment}\nThésaurus: {json.dumps(list(THEMES.keys()), ensure_ascii=False)}\nRetourne {{'themes': [...], 'counts': {{...}}}}"
166
+ return openai_json(model, system, user, temperature)
167
+
168
+ def oa_summary(nps:Optional[float], dist:Dict[str,int], themes_df:pd.DataFrame, model:str, temperature:float=0.2) -> Optional[str]:
169
+ system = "Tu es un analyste CX FR. Donne une synthèse courte et actionnable en Markdown."
170
+ top = [] if themes_df is None else themes_df.head(6).to_dict(orient="records")
171
+ user = f"Données: NPS={None if nps is None else round(nps,1)}, Répartition={dist}, Thèmes={json.dumps(top, ensure_ascii=False)}"
172
+ j = openai_json(model, system, user, temperature)
173
+ if isinstance(j, dict) and "text" in j: return j["text"]
174
+ if isinstance(j, dict): return ' '.join(str(v) for v in j.values())
175
+ return None
176
+
177
+ # --------- Graphiques ----------
178
+ def fig_nps_gauge(nps: Optional[float]) -> go.Figure:
179
+ v = 0.0 if nps is None else float(nps)
180
+ return go.Figure(go.Indicator(mode="gauge+number", value=v,
181
+ gauge={"axis":{"range":[-100,100]}, "bar":{"thickness":0.3}},
182
+ title={"text":"NPS (−100 à +100)"}))
183
+
184
+ def fig_sentiment_bar(dist: Dict[str,int]) -> go.Figure:
185
+ order = ["negatif","neutre","positive"]
186
+ x = [o for o in order if o in dist]; y = [dist.get(o,0) for o in x]
187
+ return px.bar(x=x, y=y, labels={"x":"Sentiment","y":"Nombre"}, title="Répartition des émotions")
188
+
189
+ def fig_top_themes(themes_df: pd.DataFrame, k: int) -> go.Figure:
190
+ if themes_df is None or themes_df.empty: return go.Figure()
191
+ d = themes_df.head(k); fig = px.bar(d, x="theme", y="total_mentions", title=f"Top {k} thèmes — occurrences")
192
+ fig.update_layout(xaxis_tickangle=-30); return fig
193
+
194
+ def fig_theme_balance(themes_df: pd.DataFrame, k: int) -> go.Figure:
195
+ if themes_df is None or themes_df.empty: return go.Figure()
196
+ d = themes_df.head(k)
197
+ d2 = d.melt(id_vars=["theme"], value_vars=["verbatims_pos","verbatims_neg"], var_name="type", value_name="count")
198
+ d2["type"] = d2["type"].map({"verbatims_pos":"Positifs","verbatims_neg":"Négatifs"})
199
+ fig = px.bar(d2, x="theme", y="count", color="type", barmode="stack", title=f"Top {k} thèmes — balance Pos/Neg")
200
+ fig.update_layout(xaxis_tickangle=-30); return fig
201
+
202
+ # --------- Analyse principale ----------
203
+ def analyze_text(pasted_txt, has_sc, sep_chr,
204
+ do_anonymize, use_oa_sent, use_oa_themes, use_oa_summary,
205
+ oa_model, oa_temp, top_k):
206
+
207
+ df = df_from_pasted(pasted_txt or "", sep=sep_chr or "|", has_score=bool(has_sc))
208
+ if df.empty:
209
+ raise gr.Error("Colle au moins un verbatim (une ligne).")
210
+
211
+ if do_anonymize:
212
+ df["comment"]=df["comment"].apply(anonymize)
213
+
214
+ if (use_oa_sent or use_oa_themes or use_oa_summary) and not OPENAI_AVAILABLE:
215
+ raise gr.Error("OpenAI non dispo : installe `openai` et définis OPENAI_API_KEY, ou décoche les options OpenAI.")
216
+
217
+ # HF sentiment (optionnel)
218
+ HF_AVAILABLE=False
219
+ try:
220
+ from transformers import pipeline
221
+ hf_pipe = pipeline("text-classification",
222
+ model="cmarkea/distilcamembert-base-sentiment",
223
+ tokenizer="cmarkea/distilcamembert-base-sentiment")
224
+ HF_AVAILABLE=True
225
+ except Exception:
226
+ HF_AVAILABLE=False
227
+ def hf_sent(text:str):
228
+ if not HF_AVAILABLE or not text.strip(): return None
229
+ try:
230
+ res=hf_pipe(text); lab=str(res[0]["label"]).lower(); p=float(res[0].get("score",0.5))
231
+ if "1" in lab or "2" in lab: return {"label":"negatif","score":-4*p}
232
+ if "3" in lab: return {"label":"neutre","score":0.0}
233
+ return {"label":"positive","score":4*p}
234
+ except Exception:
235
+ return None
236
+
237
+ rows=[]
238
+ theme_agg=collections.defaultdict(lambda:{"mentions":0,"pos":0,"neg":0})
239
+ used_hf=False; used_oa=False
240
+
241
+ for _, r in df.iterrows():
242
+ cid=r["id"]; comment=normalize(str(r["comment"]))
243
+
244
+ # Sentiment: OpenAI -> HF -> règles
245
+ sent=None
246
+ if use_oa_sent:
247
+ sent=oa_sentiment(comment, oa_model, oa_temp); used_oa = used_oa or bool(sent)
248
+ if not sent:
249
+ hf=hf_sent(comment)
250
+ if hf: sent=hf; used_hf=True
251
+ if not sent:
252
+ s=float(lexical_sentiment_score(comment))
253
+ sent={"label":lexical_sentiment_label(s),"score":s}
254
+
255
+ # Thèmes: regex (+ fusion OpenAI)
256
+ themes, counts = detect_themes_regex(comment)
257
+ if use_oa_themes:
258
+ tjson=oa_themes(comment, oa_model, oa_temp)
259
+ if isinstance(tjson, dict):
260
+ used_oa=True
261
+ for th, c in (tjson.get("counts",{}) or {}).items():
262
+ if th in THEMES and int(c) > 0:
263
+ counts[th] = max(counts.get(th, 0), int(c))
264
+ themes = [th for th, c in counts.items() if c > 0]
265
+
266
+ bucket = nps_bucket(r.get("nps_score", None))
267
+
268
+ for th, c in counts.items():
269
+ theme_agg[th]["mentions"] += c
270
+ if sent["label"] == "positive":
271
+ theme_agg[th]["pos"] += 1
272
+ elif sent["label"] == "negatif":
273
+ theme_agg[th]["neg"] += 1
274
+
275
+ rows.append({
276
+ "id": cid, "nps_score": r.get("nps_score", None), "nps_bucket": bucket,
277
+ "comment": comment,
278
+ "sentiment_score": round(float(sent["score"]), 3),
279
+ "sentiment_label": sent["label"],
280
+ "sentiment_source": "openai" if (use_oa_sent and used_oa) else ("huggingface" if used_hf else "rules"),
281
+ "themes": ", ".join(themes) if themes else "",
282
+ "theme_counts_json": json.dumps(counts, ensure_ascii=False)
283
+ })
284
+
285
+ out_df=pd.DataFrame(rows)
286
+ nps=compute_nps(df["nps_score"]) # peut être None si pas de scores
287
+ dist=out_df["sentiment_label"].value_counts().to_dict()
288
+
289
+ # Stats par thème
290
+ trs=[]
291
+ for th, d in theme_agg.items():
292
+ trs.append({"theme":th,"total_mentions":int(d["mentions"]),
293
+ "verbatims_pos":int(d["pos"]),"verbatims_neg":int(d["neg"]),
294
+ "net_sentiment":int(d["pos"]-d["neg"])})
295
+ themes_df=pd.DataFrame(trs).sort_values(["total_mentions","net_sentiment"],ascending=[False,False])
296
+
297
+ # Synthèse texte
298
+ method = "OpenAI + HF + règles" if (use_oa_sent and used_hf) else ("OpenAI + règles" if use_oa_sent else ("HF + règles" if used_hf else "Règles"))
299
+ lines=[ "# Synthèse NPS & ressentis clients",
300
+ f"- **Méthode** : {method}",
301
+ f"- **NPS global** : {nps:.1f}" if nps is not None else "- **NPS global** : n/a" ]
302
+ if dist:
303
+ tot=sum(dist.values()); pos=dist.get("positive",0); neg=dist.get("negatif",0); neu=dist.get("neutre",0)
304
+ lines.append(f"- **Répartition émotions** : positive {pos}/{tot}, neutre {neu}/{tot}, négative {neg}/{tot}")
305
+ if not themes_df.empty:
306
+ lines.append("\n## Thèmes les plus cités")
307
+ for th,m in themes_df.head(5)[["theme","total_mentions"]].values.tolist():
308
+ lines.append(f"- **{th}** : {m} occurrence(s)")
309
+ summary_md="\n".join(lines)
310
+
311
+ if use_oa_summary:
312
+ md = oa_summary(nps, dist, themes_df, oa_model, oa_temp)
313
+ if md: summary_md = md + "\n\n---\n" + summary_md
314
+
315
+ # Fichiers export
316
+ tmpdir=tempfile.mkdtemp(prefix="nps_gradio_")
317
+ enriched=os.path.join(tmpdir,"enriched_comments.csv")
318
+ themes=os.path.join(tmpdir,"themes_stats.csv")
319
+ summ=os.path.join(tmpdir,"summary.md")
320
+ out_df.to_csv(enriched,index=False,encoding="utf-8-sig")
321
+ themes_df.to_csv(themes,index=False,encoding="utf-8-sig")
322
+ with open(summ,"w",encoding="utf-8") as f: f.write(summary_md)
323
+ zip_path=os.path.join(tmpdir,"nps_outputs.zip")
324
+ with zipfile.ZipFile(zip_path,"w",zipfile.ZIP_DEFLATED) as z:
325
+ z.write(enriched,arcname="enriched_comments.csv")
326
+ z.write(themes,arcname="themes_stats.csv")
327
+ z.write(summ,arcname="summary.md")
328
+
329
+ # Graphiques
330
+ fig_gauge = fig_nps_gauge(nps)
331
+ fig_emots = fig_sentiment_bar(dist)
332
+ k = max(1, int(top_k or 10))
333
+ fig_top = fig_top_themes(themes_df, k)
334
+ fig_bal = fig_theme_balance(themes_df, k)
335
+
336
+ # Panneaux (rapide)
337
+ def make_panels(dfT: pd.DataFrame):
338
+ if dfT is None or dfT.empty: return "—","—","—"
339
+ pos_top = dfT.sort_values(["verbatims_pos","total_mentions"], ascending=[False,False]).head(4)
340
+ neg_top = dfT.sort_values(["verbatims_neg","total_mentions"], ascending=[False,False]).head(4)
341
+ def bullets(df, col, label):
342
+ lines=[f"**{label}**"]
343
+ for _, r in df.iterrows(): lines.append(f"- **{r['theme']}** — {int(r[col])} verbatims")
344
+ return "\n".join(lines)
345
+ ench_md = bullets(pos_top, "verbatims_pos", "Points d’enchantement")
346
+ irr_md = bullets(neg_top, "verbatims_neg", "Irritants")
347
+ RECO_RULES = {
348
+ "Délais & Suivi dossier": "Réduire les délais (SLA), suivi proactif.",
349
+ "Cotisations / Facturation": "Clarifier factures, alerter anomalies.",
350
+ "Espace client / App / Connexion": "Corriger login/MDP, QA navigateurs.",
351
+ "Support / Conseiller": "Améliorer joignabilité, scripts, rappel auto.",
352
+ "Communication / Transparence": "Notifications étapes clés, messages clairs.",
353
+ "Sinistres / Indemnisation": "Transparence délais + suivi dossier.",
354
+ }
355
+ rec_lines=["**Recommandations**"]
356
+ for _, r in neg_top.iterrows():
357
+ rec_lines.append(f"- **{r['theme']}** — {RECO_RULES.get(r['theme'],'Plan d’action dédié')}")
358
+ return ench_md, irr_md, "\n".join(rec_lines)
359
+
360
+ ench_md, irr_md, reco_md = make_panels(themes_df)
361
+
362
+ return (summary_md, themes_df.head(100), out_df.head(200), [enriched, themes, summ, zip_path],
363
+ ench_md, irr_md, reco_md, fig_gauge, fig_emots, fig_top, fig_bal)
364
+
365
+ # ---------------- UI ----------------
366
+ with gr.Blocks(title="NPS — Analyse (Assurance)") as demo:
367
+ gr.Markdown("## 🔎 NPS — Analyse sémantique (Assurance)\nColle tes verbatims (1 par ligne). Option: score NPS après un `|`.")
368
+
369
+ with gr.Column():
370
+ pasted = gr.Textbox(label="Verbatims (un par ligne)", lines=10,
371
+ placeholder="Exemple :\nRemboursement rapide, télétransmission OK | 10\nConnexion impossible à l’app | 3\nDélais corrects | 7")
372
+ with gr.Row():
373
+ has_score = gr.Checkbox(label="J’ai un score NPS par ligne", value=False)
374
+ sep = gr.Textbox(label="Séparateur score", value="|", scale=1)
375
+
376
+ with gr.Row():
377
+ anon=gr.Checkbox(label="Anonymiser emails / téléphones", value=True)
378
+ use_oa_sent=gr.Checkbox(label="OpenAI pour le sentiment", value=False)
379
+ use_oa_themes=gr.Checkbox(label="OpenAI pour les thèmes", value=False)
380
+ use_oa_summary=gr.Checkbox(label="OpenAI pour la synthèse", value=False)
381
+
382
+ with gr.Row():
383
+ oa_model=gr.Textbox(label="Modèle OpenAI", value="gpt-4o-mini")
384
+ oa_temp=gr.Slider(label="Température", minimum=0.0, maximum=1.0, value=0.1, step=0.1)
385
+ top_k=gr.Slider(label="Top thèmes (K) pour les graphes", minimum=5, maximum=20, value=10, step=1)
386
+ run=gr.Button("Lancer l'analyse", variant="primary")
387
+
388
+ with gr.Row():
389
+ ench_panel=gr.Markdown(); irr_panel=gr.Markdown(); reco_panel=gr.Markdown()
390
+
391
+ summary=gr.Markdown(label="Synthèse")
392
+ themes_table=gr.Dataframe(label="Thèmes — statistiques")
393
+ enriched_table=gr.Dataframe(label="Verbatims enrichis (aperçu)")
394
+ files_out=gr.Files(label="Téléchargements (CSV & ZIP)")
395
+
396
+ with gr.Row():
397
+ plot_nps = gr.Plot(label="NPS — Jauge")
398
+ plot_sent= gr.Plot(label="Répartition des émotions")
399
+ with gr.Row():
400
+ plot_top = gr.Plot(label="Top thèmes — occurrences")
401
+ plot_bal = gr.Plot(label="Top thèmes — balance Pos/Neg")
402
+
403
+ run.click(
404
+ analyze_text,
405
+ inputs=[pasted, has_score, sep, anon, use_oa_sent, use_oa_themes, use_oa_summary, oa_model, oa_temp, top_k],
406
+ outputs=[summary, themes_table, enriched_table, files_out, ench_panel, irr_panel, reco_panel, plot_nps, plot_sent, plot_top, plot_bal]
407
+ )
408
+
409
+ if __name__=="__main__":
410
+ demo.launch(share=False, show_api=False)