JerLag commited on
Commit
b2a0e68
·
verified ·
1 Parent(s): 5bd8335

Upload 5 files

Browse files
README.md CHANGED
@@ -1,13 +1,15 @@
1
- ---
2
- title: Analyse Semantique NPS Assurance
3
- emoji: 🏆
4
- colorFrom: green
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 5.44.1
8
- app_file: app.py
9
- pinned: false
10
- short_description: Un outil d'analyse sémantique cadré Net Promoter Score
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
+ # NPS Assurance — Gradio UI
2
+ ## Installation rapide (Windows)
3
+ ```powershell
4
+ python -m venv .venv; .\.venv\Scripts\Activate.ps1
5
+ pip install -r requirements-min.txt
6
+ python app_gradio_nps.py
7
+ ```
8
+ ## Option IA (OpenAI + HF)
9
+ ```powershell
10
+ pip install -r requirements-full.txt
11
+ # clé : setx OPENAI_API_KEY "ta_cle"
12
+ python app_gradio_nps.py
13
+ ```
14
+ ## CSV attendu
15
+ - colonnes : `comment`, `nps_score`, `id` (optionnel)
app_gradio_nps.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Gradio UI — Analyse sémantique NPS (Assurance)
4
+ - Sentiment : OpenAI -> HuggingFace -> Règles (OpenAI désactivé par défaut)
5
+ - Thèmes : Regex (+ option OpenAI) + occurrences
6
+ - Résumé : synthèse chiffrée (+ option synthèse OpenAI)
7
+ - UI : tableaux consultables + 3 panneaux (Enchantements/Irritants/Recos)
8
+ - Graphiques : NPS (jauge), Emotions (barres), Top thèmes (barres), Balance Pos/Neg (stacked)
9
+ Lancer : python app_gradio_nps.py
10
+ """
11
+
12
+ import os, re, json, collections, tempfile, zipfile
13
+ from typing import List, Dict, Optional
14
+ import pandas as pd
15
+ from unidecode import unidecode
16
+ import gradio as gr
17
+ import plotly.express as px
18
+ import plotly.graph_objects as go
19
+
20
+ # ---------- Thésaurus ASSURANCE ----------
21
+ THEMES = {
22
+ "Remboursements santé":[r"\bremboursement[s]?\b", r"\bt[eé]l[eé]transmission\b", r"\bno[eé]mie\b",
23
+ r"\bprise\s*en\s*charge[s]?\b", r"\btaux\s+de\s+remboursement[s]?\b", r"\b(ameli|cpam)\b",
24
+ r"\bcompl[eé]mentaire\s+sant[eé]\b", r"\bmutuelle\b", r"\battestation[s]?\b", r"\bcarte\s+(mutuelle|tiers\s*payant)\b"],
25
+ "Tiers payant / Réseau de soins":[r"\btiers\s*payant\b", r"\br[ée]seau[x]?\s+de\s+soins\b",
26
+ r"\b(optique|dentaire|hospitalisation|pharmacie)\b", r"\bitelis\b", r"\bsant[eé]clair\b", r"\bkalixia\b"],
27
+ "Sinistres / Indemnisation":[r"\bsinistre[s]?\b", r"\bindemni(sation|ser)\b", r"\bexpertis[ea]\b",
28
+ r"\bd[eé]claration\s+de\s+sinistre\b", r"\bconstat\b", r"\bbris\s+de\s+glace\b", r"\bassistance\b", r"\bd[ée]pannage\b"],
29
+ "Adhésion / Contrat":[r"\badh[eé]sion[s]?\b", r"\bsouscription[s]?\b", r"\baffiliation[s]?\b", r"\bcontrat[s]?\b",
30
+ r"\bavenant[s]?\b", r"\bcarence[s]?\b", r"\brenouvellement[s]?\b", r"\br[eé]siliation[s]?\b"],
31
+ "Garanties / Exclusions / Franchise":[r"\bgarantie[s]?\b", r"\bexclusion[s]?\b", r"\bplafond[s]?\b",
32
+ r"\bfranchise[s]?\b", r"\bconditions\s+g[eé]n[eé]rales\b", r"\bnotice\b"],
33
+ "Cotisations / Facturation":[r"\bcotisation[s]?\b", r"\bpr[eé]l[eè]vement[s]?\b", r"\bech[eé]ancier[s]?\b",
34
+ r"\bfacture[s]?\b", r"\berreur[s]?\s+de\s+facturation\b", r"\bremboursement[s]?\b", r"\bRIB\b", r"\bIBAN\b"],
35
+ "Délais & Suivi dossier":[r"\bd[eé]lai[s]?\b", r"\btraitement[s]?\b", r"\bsuivi[s]?\b", r"\brelance[s]?\b", r"\bretard[s]?\b"],
36
+ "Espace client / App / Connexion":[r"\bespace\s+client\b", r"\bapplication\b", r"\bapp\b", r"\bsite\b",
37
+ r"\bconnexion\b", r"\bidentifiant[s]?\b", r"\bmot\s+de\s+passe\b", r"\bpaiement\s+en\s+ligne\b",
38
+ r"\bbogue[s]?\b", r"\bbug[s]?\b", r"\bnavigation\b", r"\binterface\b", r"\bUX\b"],
39
+ "Support / Conseiller":[r"\bSAV\b", r"\bservice[s]?\s+client[s]?\b", r"\bconseiller[s]?\b",
40
+ r"\b[rR][eé]ponse[s]?\b", r"\bjoignable[s]?\b", r"\brapp?el\b"],
41
+ "Communication / Transparence":[r"\binformation[s]?\b", r"\bcommunication\b", r"\btransparence\b",
42
+ r"\bclart[eé]\b", r"\bcourrier[s]?\b", r"\bmail[s]?\b", r"\bnotification[s]?\b"],
43
+ "Prix":[r"\bprix\b", r"\bcher[s]?\b", r"\bco[uû]t[s]?\b", r"\btarif[s]?\b",
44
+ r"\bcomp[eé]titif[s]?\b", r"\babusif[s]?\b", r"\bbon\s+rapport\s+qualit[eé]\s*prix\b"],
45
+ "Offre / Gamme":[r"\boffre[s]?\b", r"\bgamme[s]?\b", r"\bdisponibilit[eé][s]?\b", r"\bdevis\b", r"\bchoix\b", r"\bcatalogue[s]?\b"],
46
+ "Produit/Qualité":[r"\bqualit[eé]s?\b", r"\bfiable[s]?\b", r"\bconforme[s]?\b", r"\bnon\s+conforme[s]?\b",
47
+ r"\bd[eé]fectueux?[es]?\b", r"\bperformant[e]?[s]?\b"],
48
+ "Agence / Accueil":[r"\bagence[s]?\b", r"\bboutique[s]?\b", r"\baccueil\b", r"\bconseil[s]?\b", r"\battente\b", r"\bcaisse[s]?\b"],
49
+ }
50
+
51
+ # ---------- Fallback sentiment ----------
52
+ POS_WORDS = {"bien":1.0,"super":1.2,"parfait":1.4,"excellent":1.5,"ravi":1.2,"ravis":1.2,"satisfait":1.0,"satisfaite":1.0,
53
+ "rapide":0.8,"efficace":1.0,"fiable":1.0,"simple":0.8,"facile":0.8,"clair":0.8,"conforme":0.8,"sympa":0.8,
54
+ "professionnel":1.0,"réactif":1.0,"reactif":1.0,"compétent":1.0,"competent":1.0,"top":1.2,"recommande":1.2,
55
+ "recommandé":1.2,"bon":0.8,"fiers":1.0}
56
+ NEG_WORDS = {"mauvais":-1.2,"horrible":-1.5,"naze":-1.0,"nul":-1.2,"lente":-0.8,"lent":-0.8,"cher":-0.9,"arnaque":-1.5,
57
+ "déçu":-1.2,"decu":-1.2,"incompétent":-1.3,"incompetent":-1.3,"bug":-0.9,"bogue":-0.9,"problème":-1.0,
58
+ "probleme":-1.0,"attente":-0.6,"retard":-0.9,"erreur":-1.0,"mensonge":-1.4,"complexe":-0.7,"compliqué":-0.8,
59
+ "complique":-0.8,"défectueux":-1.3,"defectueux":-1.3,"non conforme":-1.2,"impossible":-1.0,"difficile":-0.7}
60
+ NEGATIONS = [r"\bpas\b", r"\bjamais\b", r"\bplus\b", r"\baucun[e]?\b", r"\brien\b", r"\bni\b", r"\bgu[eè]re\b"]
61
+ INTENSIFIERS = [r"\btr[eè]s\b", r"\bvraiment\b", r"\btellement\b", r"\bextr[eê]mement\b", r"\bhyper\b"]
62
+ DIMINISHERS = [r"\bun[e]?\s+peu\b", r"\bassez\b", r"\bplut[oô]t\b", r"\bl[eé]g[eè]rement\b"]
63
+ INTENSIFIER_W, DIMINISHER_W = 1.5, 0.7
64
+
65
+ # ---------- OpenAI (optionnel) ----------
66
+ OPENAI_AVAILABLE = False
67
+ try:
68
+ from openai import OpenAI
69
+ _client = OpenAI() # clé via OPENAI_API_KEY
70
+ OPENAI_AVAILABLE = True
71
+ except Exception:
72
+ OPENAI_AVAILABLE = False
73
+
74
+ # ---------- Utils ----------
75
+ def normalize(t:str)->str:
76
+ if not isinstance(t,str): return ""
77
+ return re.sub(r"\s+"," ",t.strip())
78
+
79
+ def to_analyzable(t:str)->str:
80
+ return unidecode(normalize(t.lower()))
81
+
82
+ def window_has(patterns:List[str], toks:List[str], i:int, w:int=3)->bool:
83
+ s=max(0,i-w); e=min(len(toks),i+w+1); win=" ".join(toks[s:e])
84
+ return any(re.search(p,win) for p in patterns)
85
+
86
+ def lexical_sentiment_score(text:str)->float:
87
+ toks = to_analyzable(text).split(); score=0.0
88
+ for i,t in enumerate(toks):
89
+ base = POS_WORDS.get(t,0.0) or NEG_WORDS.get(t,0.0)
90
+ if not base and i<len(toks)-1:
91
+ bi=f"{t} {toks[i+1]}"; base = NEG_WORDS.get(bi,0.0)
92
+ if base:
93
+ w=1.0
94
+ if window_has(INTENSIFIERS,toks,i): w*=INTENSIFIER_W
95
+ if window_has(DIMINISHERS,toks,i): w*=DIMINISHER_W
96
+ if window_has(NEGATIONS,toks,i): base*=-1
97
+ score+=base*w
98
+ return max(min(score,4.0),-4.0)
99
+
100
+ def lexical_sentiment_label(s:float)->str:
101
+ return "positive" if s>=0.3 else ("negatif" if s<=-0.3 else "neutre")
102
+
103
+ def detect_themes_regex(text:str):
104
+ t=to_analyzable(text); found=[]; counts={}
105
+ for th,pats in THEMES.items():
106
+ c=sum(len(re.findall(p,t)) for p in pats)
107
+ if c>0: found.append(th); counts[th]=c
108
+ return found, counts
109
+
110
+ def nps_bucket(s):
111
+ try: s=int(s)
112
+ except: return "inconnu"
113
+ return "promoter" if s>=9 else ("passive" if s>=7 else ("detractor" if s>=0 else "inconnu"))
114
+
115
+ def compute_nps(series):
116
+ vals=[]
117
+ for x in series.dropna().tolist():
118
+ try:
119
+ v=int(x)
120
+ if 0<=v<=10: vals.append(v)
121
+ except: pass
122
+ if not vals: return None
123
+ tot=len(vals); pro=sum(1 for v in vals if v>=9); det=sum(1 for v in vals if v<=6)
124
+ return 100.0*(pro/tot - det/tot)
125
+
126
+ def anonymize(t:str)->str:
127
+ if not isinstance(t,str): return ""
128
+ t=re.sub(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}","[email]",t)
129
+ t=re.sub(r"\b(?:\+?\d[\s.-]?){7,}\b","[tel]",t)
130
+ return t
131
+
132
+ # ---------- OpenAI helpers ----------
133
+ def openai_json(model:str, system:str, user:str, temperature:float=0.0) -> Optional[dict]:
134
+ if not OPENAI_AVAILABLE: return None
135
+ try:
136
+ resp = _client.chat.completions.create(
137
+ model=model, temperature=temperature,
138
+ messages=[{"role":"system","content":system},{"role":"user","content":user}],
139
+ )
140
+ txt = resp.choices[0].message.content.strip()
141
+ m = re.search(r"\{.*\}", txt, re.S)
142
+ return json.loads(m.group(0) if m else txt)
143
+ except Exception:
144
+ return None
145
+
146
+ def oa_sentiment(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
147
+ system = "Tu es un classifieur FR. Réponds strictement en JSON."
148
+ user = f'Texte: {comment}\nDonne "label" parmi ["positive","neutre","negatif"] et "score" entre -4 et 4. JSON.'
149
+ return openai_json(model, system, user, temperature)
150
+
151
+ def oa_themes(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
152
+ system = "Tu maps le texte client vers un thésaurus assurance. Réponds strictement en JSON."
153
+ user = f"Texte: {comment}\nThésaurus: {json.dumps(list(THEMES.keys()), ensure_ascii=False)}\nRetourne {{'themes': [...], 'counts': {{...}}}}"
154
+ return openai_json(model, system, user, temperature)
155
+
156
+ def oa_summary(nps:Optional[float], dist:Dict[str,int], themes_df:pd.DataFrame, model:str, temperature:float=0.2) -> Optional[str]:
157
+ system = "Tu es un analyste CX FR. Donne une synthèse courte et actionnable en Markdown."
158
+ top = [] if themes_df is None else themes_df.head(6).to_dict(orient="records")
159
+ user = f"Données: NPS={None if nps is None else round(nps,1)}, Répartition={dist}, Thèmes={json.dumps(top, ensure_ascii=False)}"
160
+ j = openai_json(model, system, user, temperature)
161
+ if isinstance(j, dict) and "text" in j: return j["text"]
162
+ if isinstance(j, dict): return ' '.join(str(v) for v in j.values())
163
+ return None
164
+
165
+ # ---------- Recos & panneaux ----------
166
+ RECO_RULES = {
167
+ "Délais & Suivi dossier": "Réduire les délais (SLA), suivi proactif, file d’attente priorisée.",
168
+ "Cotisations / Facturation": "Audit prélèvements, factures claires, alertes anomalies, parcours litige.",
169
+ "Espace client / App / Connexion": "Corriger login/MDP, parcours réinitialisation, QA multi-navigateurs.",
170
+ "Support / Conseiller": "Améliorer joignabilité, formation, scripts d’appel, rappel auto.",
171
+ "Communication / Transparence": "Messages clairs, notifications aux étapes clés, FAQ ciblées.",
172
+ "Sinistres / Indemnisation": "Transparence étapes/délais, suivi dossier, points de contact dédiés.",
173
+ "Remboursements santé": "Accélérer télétransmission, notifier réception/prise en charge.",
174
+ "Tiers payant / Réseau de soins": "Élargir réseau, informer disponibilité, carte à jour.",
175
+ "Garanties / Exclusions / Franchise": "Clarifier CG, simulateur de reste à charge, exemples.",
176
+ }
177
+ def make_panels(themes_df: pd.DataFrame):
178
+ if themes_df is None or themes_df.empty: return "—","—","—"
179
+ pos_top = themes_df.sort_values(["verbatims_pos","total_mentions"], ascending=[False,False]).head(4)
180
+ neg_top = themes_df.sort_values(["verbatims_neg","total_mentions"], ascending=[False,False]).head(4)
181
+ def bullets(df, col, label):
182
+ lines=[f"**{label}**"]
183
+ for _, r in df.iterrows(): lines.append(f"- **{r['theme']}** — {int(r[col])} verbatims")
184
+ return "\n".join(lines)
185
+ ench_md = bullets(pos_top, "verbatims_pos", "Points d’enchantement")
186
+ irr_md = bullets(neg_top, "verbatims_neg", "Irritants")
187
+ rec_lines=["**Recommandations**"]
188
+ for _, r in neg_top.iterrows():
189
+ rec_lines.append(f"- **{r['theme']}** — {RECO_RULES.get(r['theme'],'Analyser les causes racines et plan d’action.')}")
190
+ return ench_md, irr_md, "\n".join(rec_lines)
191
+
192
+ # ---------- Graphiques Plotly ----------
193
+ def fig_nps_gauge(nps: Optional[float]) -> go.Figure:
194
+ v = 0.0 if nps is None else float(nps)
195
+ return go.Figure(go.Indicator(
196
+ mode="gauge+number",
197
+ value=v,
198
+ gauge={"axis":{"range":[-100,100]}, "bar":{"thickness":0.3}},
199
+ title={"text":"NPS (−100 à +100)"}
200
+ ))
201
+
202
+ def fig_sentiment_bar(dist: Dict[str,int]) -> go.Figure:
203
+ order = ["negatif","neutre","positive"]
204
+ x = [o for o in order if o in dist]
205
+ y = [dist.get(o,0) for o in x]
206
+ return px.bar(x=x, y=y, labels={"x":"Sentiment","y":"Nombre"}, title="Répartition des émotions")
207
+
208
+ def fig_top_themes(themes_df: pd.DataFrame, k: int) -> go.Figure:
209
+ if themes_df is None or themes_df.empty: return go.Figure()
210
+ d = themes_df.head(k)
211
+ fig = px.bar(d, x="theme", y="total_mentions", title=f"Top {k} thèmes — occurrences")
212
+ fig.update_layout(xaxis_tickangle=-30)
213
+ return fig
214
+
215
+ def fig_theme_balance(themes_df: pd.DataFrame, k: int) -> go.Figure:
216
+ if themes_df is None or themes_df.empty: return go.Figure()
217
+ d = themes_df.head(k)
218
+ d2 = d.melt(id_vars=["theme"], value_vars=["verbatims_pos","verbatims_neg"], var_name="type", value_name="count")
219
+ d2["type"] = d2["type"].map({"verbatims_pos":"Positifs","verbatims_neg":"Négatifs"})
220
+ fig = px.bar(d2, x="theme", y="count", color="type", barmode="stack", title=f"Top {k} thèmes — balance Pos/Neg")
221
+ fig.update_layout(xaxis_tickangle=-30)
222
+ return fig
223
+
224
+ # ---------- Analyse principale ----------
225
+ def analyze_file(file_obj, comment_col, score_col, id_col,
226
+ do_anonymize,
227
+ use_oa_sent, use_oa_themes, use_oa_summary,
228
+ oa_model, oa_temp,
229
+ top_k):
230
+
231
+ # Lecture CSV
232
+ try:
233
+ df=pd.read_csv(file_obj.name, sep=None, engine="python")
234
+ except Exception:
235
+ df=pd.read_csv(file_obj.name)
236
+
237
+ # Remap auto
238
+ rem={"verbatim":"comment","texte":"comment","avis":"comment","note":"nps_score","score":"nps_score",
239
+ "Identifiant":"id","identifiant":"id","id_client":"id"}
240
+ df=df.rename(columns={k:v for k,v in rem.items() if k in df.columns})
241
+
242
+ # Overrides UI
243
+ if comment_col and comment_col in df.columns and comment_col!="comment": df=df.rename(columns={comment_col:"comment"})
244
+ if score_col and score_col in df.columns and score_col!="nps_score": df=df.rename(columns={score_col:"nps_score"})
245
+ if id_col and id_col in df.columns and id_col!="id": df=df.rename(columns={id_col:"id"})
246
+
247
+ if "comment" not in df.columns: raise gr.Error("Colonne 'comment' introuvable.")
248
+ if "nps_score" not in df.columns: raise gr.Error("Colonne 'nps_score' introuvable.")
249
+ if "id" not in df.columns: df["id"]=range(1,len(df)+1)
250
+ if do_anonymize: df["comment"]=df["comment"].apply(anonymize)
251
+
252
+ if (use_oa_sent or use_oa_themes or use_oa_summary) and not OPENAI_AVAILABLE:
253
+ raise gr.Error("OpenAI non dispo (installe `openai` et définis OPENAI_API_KEY).")
254
+
255
+ # Init HF lazy
256
+ HF_AVAILABLE=False
257
+ try:
258
+ from transformers import pipeline
259
+ hf_pipe = pipeline("text-classification",
260
+ model="cmarkea/distilcamembert-base-sentiment",
261
+ tokenizer="cmarkea/distilcamembert-base-sentiment")
262
+ HF_AVAILABLE=True
263
+ except Exception:
264
+ HF_AVAILABLE=False
265
+
266
+ def hf_sent(text:str):
267
+ if not HF_AVAILABLE or not text.strip(): return None
268
+ try:
269
+ res=hf_pipe(text); lab=str(res[0]["label"]).lower(); p=float(res[0].get("score",0.5))
270
+ if "1" in lab or "2" in lab: return {"label":"negatif","score":-4*p}
271
+ if "3" in lab: return {"label":"neutre","score":0.0}
272
+ return {"label":"positive","score":4*p}
273
+ except Exception:
274
+ return None
275
+
276
+ rows=[]
277
+ theme_agg=collections.defaultdict(lambda:{"mentions":0,"pos":0,"neg":0})
278
+ used_hf=False; used_oa=False
279
+
280
+ for _, r in df.iterrows():
281
+ cid=r["id"]; comment=normalize(str(r["comment"]))
282
+
283
+ # Sentiment: OpenAI -> HF -> règles
284
+ sent=None
285
+ if use_oa_sent:
286
+ sent=oa_sentiment(comment, oa_model, oa_temp); used_oa = used_oa or bool(sent)
287
+ if not sent:
288
+ hf=hf_sent(comment)
289
+ if hf: sent=hf; used_hf=True
290
+ if not sent:
291
+ s=float(lexical_sentiment_score(comment))
292
+ sent={"label":lexical_sentiment_label(s),"score":s}
293
+
294
+ # Thèmes : regex (+ fusion OA)
295
+ themes, counts = detect_themes_regex(comment)
296
+ if use_oa_themes:
297
+ tjson=oa_themes(comment, oa_model, oa_temp)
298
+ if isinstance(tjson, dict):
299
+ used_oa=True
300
+ for th, c in (tjson.get("counts",{}) or {}).items():
301
+ if th in THEMES and int(c) > 0:
302
+ counts[th] = max(counts.get(th, 0), int(c))
303
+ themes = [th for th, c in counts.items() if c > 0]
304
+
305
+ bucket = nps_bucket(r.get("nps_score", None))
306
+
307
+ for th, c in counts.items():
308
+ theme_agg[th]["mentions"] += c
309
+ if sent["label"] == "positive":
310
+ theme_agg[th]["pos"] += 1
311
+ elif sent["label"] == "negatif":
312
+ theme_agg[th]["neg"] += 1
313
+
314
+ rows.append({
315
+ "id": cid, "nps_score": r.get("nps_score", None), "nps_bucket": bucket,
316
+ "comment": comment,
317
+ "sentiment_score": round(float(sent["score"]), 3),
318
+ "sentiment_label": sent["label"],
319
+ "sentiment_source": "openai" if (use_oa_sent and used_oa) else ("huggingface" if used_hf else "rules"),
320
+ "themes": ", ".join(themes) if themes else "",
321
+ "theme_counts_json": json.dumps(counts, ensure_ascii=False)
322
+ })
323
+
324
+ out_df=pd.DataFrame(rows)
325
+ nps=compute_nps(df["nps_score"])
326
+ dist=out_df["sentiment_label"].value_counts().to_dict()
327
+
328
+ # Stats par thème
329
+ trs=[]
330
+ for th, d in theme_agg.items():
331
+ trs.append({"theme":th,"total_mentions":int(d["mentions"]),
332
+ "verbatims_pos":int(d["pos"]),"verbatims_neg":int(d["neg"]),
333
+ "net_sentiment":int(d["pos"]-d["neg"])})
334
+ themes_df=pd.DataFrame(trs).sort_values(["total_mentions","net_sentiment"],ascending=[False,False])
335
+
336
+ # Synthèse texte
337
+ method = "OpenAI + HF + règles" if (use_oa_sent and used_hf) else ("OpenAI + règles" if use_oa_sent else ("HF + règles" if used_hf else "Règles"))
338
+ lines=[ "# Synthèse NPS & ressentis clients",
339
+ f"- **Méthode** : {method}",
340
+ f"- **NPS global** : {nps:.1f}" if nps is not None else "- **NPS global** : n/a" ]
341
+ if dist:
342
+ tot=sum(dist.values()); pos=dist.get("positive",0); neg=dist.get("negatif",0); neu=dist.get("neutre",0)
343
+ lines.append(f"- **Répartition émotions** : positive {pos}/{tot}, neutre {neu}/{tot}, négative {neg}/{tot}")
344
+ if not themes_df.empty:
345
+ lines.append("\n## Thèmes les plus cités")
346
+ for th,m in themes_df.head(5)[["theme","total_mentions"]].values.tolist():
347
+ lines.append(f"- **{th}** : {m} occurrence(s)")
348
+ summary_md="\n".join(lines)
349
+
350
+ # Synthèse OpenAI optionnelle
351
+ if use_oa_summary:
352
+ md = oa_summary(nps, dist, themes_df, oa_model, oa_temp)
353
+ if md: summary_md = md + "\n\n---\n" + summary_md
354
+
355
+ # Fichiers
356
+ tmpdir=tempfile.mkdtemp(prefix="nps_gradio_")
357
+ enriched=os.path.join(tmpdir,"enriched_comments.csv"); themes=os.path.join(tmpdir,"themes_stats.csv"); summ=os.path.join(tmpdir,"summary.md")
358
+ out_df.to_csv(enriched,index=False,encoding="utf-8-sig")
359
+ themes_df.to_csv(themes,index=False,encoding="utf-8-sig")
360
+ with open(summ,"w",encoding="utf-8") as f: f.write(summary_md)
361
+ zip_path=os.path.join(tmpdir,"nps_outputs.zip")
362
+ with zipfile.ZipFile(zip_path,"w",zipfile.ZIP_DEFLATED) as z:
363
+ z.write(enriched,arcname="enriched_comments.csv"); z.write(themes,arcname="themes_stats.csv"); z.write(summ,arcname="summary.md")
364
+
365
+ # Graphiques
366
+ fig_gauge = fig_nps_gauge(nps)
367
+ fig_emots = fig_sentiment_bar(dist)
368
+ k = max(1, int(top_k or 10))
369
+ fig_top = fig_top_themes(themes_df, k)
370
+ fig_bal = fig_theme_balance(themes_df, k)
371
+
372
+ return (summary_md, themes_df.head(100), out_df.head(200), [enriched, themes, summ, zip_path],
373
+ fig_gauge, fig_emots, fig_top, fig_bal)
374
+
375
+ # ---------- UI ----------
376
+ with gr.Blocks(title="Analyse NPS — Assurance") as demo:
377
+ gr.Markdown("## 🔎 Analyse sémantique NPS — Assurance\nTéléverse un CSV, mappe les colonnes, options OpenAI (facultatives), puis lance.")
378
+
379
+ with gr.Row():
380
+ csv_in=gr.File(label="CSV NPS (UTF-8)", file_types=[".csv"], type="filepath")
381
+ with gr.Column():
382
+ ccol=gr.Textbox(label="Colonne 'comment' (laisser vide si déjà 'comment')", placeholder="ex: verbatim / texte")
383
+ scol=gr.Textbox(label="Colonne 'nps_score' (laisser vide si déjà 'nps_score')", placeholder="ex: note / score")
384
+ icol=gr.Textbox(label="Colonne 'id' (optionnel)", placeholder="ex: identifiant")
385
+
386
+ with gr.Row():
387
+ anon=gr.Checkbox(label="Anonymiser emails / téléphones", value=True)
388
+ # OpenAI désactivé par défaut pour éviter les erreurs de clé
389
+ use_oa_sent=gr.Checkbox(label="OpenAI pour le sentiment", value=False)
390
+ use_oa_themes=gr.Checkbox(label="OpenAI pour les thèmes", value=False)
391
+ use_oa_summary=gr.Checkbox(label="OpenAI pour la synthèse", value=False)
392
+
393
+ with gr.Row():
394
+ oa_model=gr.Textbox(label="Modèle OpenAI", value="gpt-4o-mini")
395
+ oa_temp=gr.Slider(label="Température", minimum=0.0, maximum=1.0, value=0.1, step=0.1)
396
+ top_k=gr.Slider(label="Top thèmes (K) pour les graphes", minimum=5, maximum=20, value=10, step=1)
397
+ run=gr.Button("Lancer l'analyse", variant="primary")
398
+
399
+ summary=gr.Markdown(label="Synthèse")
400
+ themes_table=gr.Dataframe(label="Thèmes — statistiques")
401
+ enriched_table=gr.Dataframe(label="Verbatims enrichis (aperçu)")
402
+ files_out=gr.Files(label="Téléchargements (CSV & ZIP)")
403
+
404
+ with gr.Row():
405
+ plot_nps = gr.Plot(label="NPS — Jauge")
406
+ plot_sent= gr.Plot(label="Répartition des émotions")
407
+ with gr.Row():
408
+ plot_top = gr.Plot(label="Top thèmes — occurrences")
409
+ plot_bal = gr.Plot(label="Top thèmes — balance Pos/Neg")
410
+
411
+ def _go(file,cc,sc,ic,a,uos,uot,uosum,model,temp,k):
412
+ if file is None: raise gr.Error("Ajoute un CSV.")
413
+ class F: pass
414
+ f=F(); f.name = file if isinstance(file,str) else file.name
415
+ return analyze_file(f, cc.strip() if cc else "", sc.strip() if sc else "", ic.strip() if ic else "",
416
+ a, uos, uot, uosum, model.strip() or "gpt-4o-mini", float(temp or 0.0), int(k or 10))
417
+
418
+ run.click(_go,
419
+ inputs=[csv_in, ccol, scol, icol, anon, use_oa_sent, use_oa_themes, use_oa_summary, oa_model, oa_temp, top_k],
420
+ outputs=[summary, themes_table, enriched_table, files_out, plot_nps, plot_sent, plot_top, plot_bal])
421
+
422
+ if __name__=="__main__":
423
+ demo.launch(share=False, show_api=False)
nps_comments_sample_assurance.csv ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ id,nps_score,comment
2
+ 1,10,"Remboursement rapide, télétransmission OK, je recommande."
3
+ 2,6,"Impossible de joindre un conseiller, retard de traitement du dossier."
4
+ 3,9,"Tiers payant nickel chez l'opticien, parcours simple."
5
+ 4,5,"Erreur de facturation et prélèvements doublés, très déçu."
requirements-full.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio
2
+ pandas
3
+ unidecode
4
+ plotly
5
+ openai
6
+ transformers
7
+ torch
requirements-min.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ pandas
3
+ unidecode
4
+ plotly