JerLag commited on
Commit
b5fb46a
·
verified ·
1 Parent(s): 52ba7bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -107
app.py CHANGED
@@ -1,10 +1,10 @@
1
  # -*- coding: utf-8 -*-
2
  """
3
  Verbatify — Analyse sémantique NPS (Paste-only, NPS inféré)
 
4
  """
5
 
6
  import os, re, json, collections, tempfile, zipfile
7
- from pathlib import Path
8
  from typing import List, Dict, Optional
9
  import pandas as pd
10
  import gradio as gr
@@ -12,28 +12,47 @@ import plotly.express as px
12
  import plotly.graph_objects as go
13
  import plotly.io as pio
14
 
15
- # ====================== CSS externe (charge verbatim.css OU verbatify.css) ======================
16
- def load_css() -> str:
17
- candidates = [
18
- "verbatim.css", "verbatify.css", # racine
19
- "assets/verbatim.css", "assets/verbatify.css" # sous-dossier éventuel
20
- ]
21
- here = Path(__file__).parent
22
- for name in candidates:
23
- p = here / name
24
- if p.exists():
25
- css = p.read_text(encoding="utf-8")
26
- print(f"[Verbatify] CSS chargé: {p.name}")
27
- return css
28
- print("[Verbatify] ⚠️ Aucun fichier CSS trouvé (verbatim.css / verbatify.css).")
29
- # filet de sécurité visuel
30
- return """
31
- html,body,.gradio-container{background:#ffffff!important;color:#0F172A!important}
32
- """
33
-
34
- VB_CSS = load_css()
35
-
36
- # ====================== PLOTLY THEME ======================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def apply_plotly_theme():
38
  pio.templates["verbatify"] = go.layout.Template(
39
  layout=go.Layout(
@@ -53,11 +72,12 @@ LOGO_SVG = """<svg xmlns='http://www.w3.org/2000/svg' width='224' height='38' vi
53
  <g fill='none' fill-rule='evenodd'>
54
  <rect x='0' y='7' width='38' height='24' rx='12' fill='url(#g)'/>
55
  <circle cx='13' cy='19' r='5' fill='#fff' opacity='0.95'/><circle cx='25' cy='19' r='5' fill='#fff' opacity='0.72'/>
56
- <text x='46' y='25' font-family='Manrope, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif' font-size='20' font-weight='500' fill='#0F172A' letter-spacing='0.2'>Verbatify</text>
57
  </g>
58
  </svg>"""
59
 
60
- # ====================== UNIDECODE (fallback) ======================
 
61
  try:
62
  from unidecode import unidecode
63
  except Exception:
@@ -68,7 +88,8 @@ except Exception:
68
  except Exception:
69
  return str(x)
70
 
71
- # ====================== THÉSAURUS ======================
 
72
  THEMES = {
73
  "Remboursements santé":[r"\bremboursement[s]?\b", r"\bt[eé]l[eé]transmission\b", r"\bno[eé]mie\b",
74
  r"\bprise\s*en\s*charge[s]?\b", r"\btaux\s+de\s+remboursement[s]?\b", r"\b(ameli|cpam)\b",
@@ -99,30 +120,42 @@ THEMES = {
99
  "Agence / Accueil":[r"\bagence[s]?\b", r"\bboutique[s]?\b", r"\baccueil\b", r"\bconseil[s]?\b", r"\battente\b", r"\bcaisse[s]?\b"],
100
  }
101
 
102
- # ====================== SENTIMENT (règles) ======================
103
- POS_WORDS = {"bien":1.0,"super":1.2,"parfait":1.4,"excellent":1.5,"ravi":1.2,"satisfait":1.0,
104
- "rapide":0.8,"efficace":1.0,"fiable":1.0,"simple":0.8,"facile":0.8,"clair":0.8,"conforme":0.8,
105
- "sympa":0.8,"professionnel":1.0,"réactif":1.0,"reactif":1.0,"compétent":1.0,"competent":1.0,
106
- "top":1.2,"recommande":1.2,"recommandé":1.2,"bon":0.8}
107
- NEG_WORDS = {"mauvais":-1.2,"horrible":-1.5,"nul":-1.2,"lent":-0.8,"cher":-0.9,"arnaque":-1.5,
108
- "déçu":-1.2,"decu":-1.2,"incompétent":-1.3,"bug":-0.9,"bogue":-0.9,"problème":-1.0,
109
- "probleme":-1.0,"attente":-0.6,"retard":-0.9,"erreur":-1.0,"compliqué":-0.8,"complique":-0.8,
110
- "défectueux":-1.3,"defectueux":-1.3,"non conforme":-1.2,"impossible":-1.0,"difficile":-0.7}
 
 
 
 
 
111
  NEGATIONS = [r"\bpas\b", r"\bjamais\b", r"\bplus\b", r"\baucun[e]?\b", r"\brien\b", r"\bni\b", r"\bgu[eè]re\b"]
112
  INTENSIFIERS = [r"\btr[eè]s\b", r"\bvraiment\b", r"\bextr[eê]mement\b", r"\bhyper\b"]
113
  DIMINISHERS = [r"\bun[e]?\s+peu\b", r"\bassez\b", r"\bplut[oô]t\b", r"\bl[eé]g[eè]rement\b"]
114
  INTENSIFIER_W, DIMINISHER_W = 1.5, 0.7
115
 
116
- # ====================== OpenAI (optionnel) ======================
 
117
  OPENAI_AVAILABLE = False
118
  try:
119
- from openai import OpenAI
120
- if os.getenv("OPENAI_API_KEY"):
121
- _client = OpenAI(); OPENAI_AVAILABLE = True
 
122
  except Exception:
123
- OPENAI_AVAILABLE = False
 
 
 
 
 
 
124
 
125
- # ====================== UTILS ======================
126
  def normalize(t:str)->str:
127
  if not isinstance(t,str): return ""
128
  return re.sub(r"\s+"," ",t.strip())
@@ -182,18 +215,22 @@ def anonymize(t:str)->str:
182
  t=re.sub(r"\b(?:\+?\d[\s.-]?){7,}\b","[tel]",t)
183
  return t
184
 
185
- def df_from_pasted(text:str, sep="|", has_score=False) -> pd.DataFrame:
 
186
  lines = [l.strip() for l in (text or "").splitlines() if l.strip()]
187
  rows = []
 
188
  for i, line in enumerate(lines, 1):
189
- if has_score and sep in line:
190
- verb, score = line.split(sep, 1)
191
- rows.append({"id": i, "comment": verb.strip(), "nps_score": pd.to_numeric(score.strip(), errors="coerce")})
 
 
192
  else:
193
- rows.append({"id": i, "comment": line.strip(), "nps_score": None})
194
  return pd.DataFrame(rows)
195
 
196
- # ---------- OpenAI helpers ----------
197
  def openai_json(model:str, system:str, user:str, temperature:float=0.0) -> Optional[dict]:
198
  if not OPENAI_AVAILABLE: return None
199
  try:
@@ -207,26 +244,26 @@ def openai_json(model:str, system:str, user:str, temperature:float=0.0) -> Optio
207
  except Exception:
208
  return None
209
 
210
- def oa_sentiment(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
211
  system = "Tu es un classifieur FR. Réponds strictement en JSON."
212
  user = f'Texte: {comment}\nDonne "label" parmi ["positive","neutre","negatif"] et "score" entre -4 et 4. JSON.'
213
- return openai_json(model, system, user, temperature)
214
 
215
- def oa_themes(comment:str, model:str, temperature:float=0.0) -> Optional[dict]:
216
  system = "Tu maps le texte client vers un thésaurus assurance. Réponds strictement en JSON."
217
  user = f"Texte: {comment}\nThésaurus: {json.dumps(list(THEMES.keys()), ensure_ascii=False)}\nRetourne {{'themes': [...], 'counts': {{...}}}}"
218
- return openai_json(model, system, user, temperature)
219
 
220
- def oa_summary(nps:Optional[float], dist:Dict[str,int], themes_df:pd.DataFrame, model:str, temperature:float=0.2) -> Optional[str]:
221
  system = "Tu es un analyste CX FR. Donne une synthèse courte et actionnable en Markdown."
222
  top = [] if themes_df is None else themes_df.head(6).to_dict(orient="records")
223
  user = f"Données: NPS={None if nps is None else round(nps,1)}, Répartition={dist}, Thèmes={json.dumps(top, ensure_ascii=False)}"
224
- j = openai_json(model, system, user, temperature)
225
  if isinstance(j, dict) and "text" in j: return j["text"]
226
  if isinstance(j, dict): return ' '.join(str(v) for v in j.values())
227
  return None
228
 
229
- # ---------- HF sentiment (optionnel) ----------
230
  def make_hf_pipe():
231
  try:
232
  from transformers import pipeline
@@ -236,7 +273,7 @@ def make_hf_pipe():
236
  except Exception:
237
  return None
238
 
239
- # ---------- Inférence NPS ----------
240
  def infer_nps_from_sentiment(label: str, score: float) -> int:
241
  scaled = int(round((float(score) + 4.0) * 1.25)) # -4 -> 0, 0 -> 5, +4 -> 10
242
  scaled = max(0, min(10, scaled))
@@ -246,7 +283,8 @@ def infer_nps_from_sentiment(label: str, score: float) -> int:
246
  return min(6, scaled)
247
  return 8 if score >= 0 else 7
248
 
249
- # ====================== GRAPHIQUES ======================
 
250
  def fig_nps_gauge(nps: Optional[float]) -> go.Figure:
251
  v = 0.0 if nps is None else float(nps)
252
  return go.Figure(go.Indicator(mode="gauge+number", value=v,
@@ -271,53 +309,49 @@ def fig_theme_balance(themes_df: pd.DataFrame, k: int) -> go.Figure:
271
  fig = px.bar(d2, x="theme", y="count", color="type", barmode="stack", title=f"Top {k} thèmes — balance Pos/Neg")
272
  fig.update_layout(xaxis_tickangle=-30); return fig
273
 
274
- # ====================== ANALYSE ======================
275
- def analyze_text(pasted_txt, has_sc, sep_chr,
276
- do_anonymize, use_oa_sent, use_oa_themes, use_oa_summary,
277
- oa_model, oa_temp, top_k):
278
 
279
- df = df_from_pasted(pasted_txt or "", sep=sep_chr or "|", has_score=bool(has_sc))
 
 
280
  if df.empty:
281
  raise gr.Error("Colle au moins un verbatim (une ligne).")
 
282
 
283
- if do_anonymize:
284
- df["comment"]=df["comment"].apply(anonymize)
285
-
286
- if (use_oa_sent or use_oa_themes or use_oa_summary) and not OPENAI_AVAILABLE:
287
  use_oa_sent = use_oa_themes = use_oa_summary = False
288
-
289
  hf_pipe = make_hf_pipe()
290
- def hf_sent(text:str):
291
- if hf_pipe is None or not text.strip(): return None
292
- try:
293
- res=hf_pipe(text); lab=str(res[0]["label"]).lower(); p=float(res[0].get("score",0.5))
294
- if "1" in lab or "2" in lab: return {"label":"negatif","score":-4*p}
295
- if "3" in lab: return {"label":"neutre","score":0.0}
296
- return {"label":"positive","score":4*p}
297
- except Exception:
298
- return None
299
 
 
300
  rows=[]; theme_agg=collections.defaultdict(lambda:{"mentions":0,"pos":0,"neg":0})
301
  used_hf=False; used_oa=False; any_inferred=False
302
 
303
  for idx, r in df.iterrows():
304
  cid=r.get("id", idx+1); comment=normalize(str(r["comment"]))
305
 
306
- # Sentiment
307
  sent=None
308
  if use_oa_sent:
309
- sent=oa_sentiment(comment, oa_model, float(oa_temp or 0.0)); used_oa = used_oa or bool(sent)
310
- if not sent:
311
- hf=hf_sent(comment)
312
- if hf: sent=hf; used_hf=True
 
 
 
 
 
 
313
  if not sent:
314
  s=float(lexical_sentiment_score(comment))
315
  sent={"label":lexical_sentiment_label(s),"score":s}
316
 
317
- # Thèmes
318
  themes, counts = detect_themes_regex(comment)
319
  if use_oa_themes:
320
- tjson=oa_themes(comment, oa_model, float(oa_temp or 0.0))
321
  if isinstance(tjson, dict):
322
  used_oa=True
323
  for th, c in (tjson.get("counts",{}) or {}).items():
@@ -325,7 +359,7 @@ def analyze_text(pasted_txt, has_sc, sep_chr,
325
  counts[th] = max(counts.get(th, 0), int(c))
326
  themes = [th for th, c in counts.items() if c > 0]
327
 
328
- # NPS (existant ou inféré)
329
  given = r.get("nps_score", None)
330
  try:
331
  given = int(given) if given is not None and str(given).strip() != "" else None
@@ -367,7 +401,7 @@ def analyze_text(pasted_txt, has_sc, sep_chr,
367
  themes_df=pd.DataFrame(trs).sort_values(["total_mentions","net_sentiment"],ascending=[False,False])
368
 
369
  # Synthèse
370
- method = "OpenAI + HF + règles" if (use_oa_sent and used_hf) else ("OpenAI + règles" if use_oa_sent else ("HF + règles" if used_hf else "Règles"))
371
  nps_label = "NPS global (inféré)" if any_inferred else "NPS global"
372
  lines=[ "# Synthèse NPS & ressentis clients",
373
  f"- **Méthode** : {method}",
@@ -381,8 +415,8 @@ def analyze_text(pasted_txt, has_sc, sep_chr,
381
  lines.append(f"- **{th}** : {m} occurrence(s)")
382
  summary_md="\n".join(lines)
383
 
384
- if use_oa_summary:
385
- md = oa_summary(nps, dist, themes_df, oa_model, float(oa_temp or 0.0))
386
  if md: summary_md = md + "\n\n---\n" + summary_md
387
 
388
  # Exports
@@ -426,7 +460,7 @@ def analyze_text(pasted_txt, has_sc, sep_chr,
426
  ench_md, irr_md, reco_md = make_panels(themes_df)
427
  fig_gauge = fig_nps_gauge(nps)
428
  fig_emots = fig_sentiment_bar(dist)
429
- k = max(1, int(top_k or 10))
430
  fig_top = fig_top_themes(themes_df, k)
431
  fig_bal = fig_theme_balance(themes_df, k)
432
 
@@ -434,48 +468,41 @@ def analyze_text(pasted_txt, has_sc, sep_chr,
434
  ench_md, irr_md, reco_md, fig_gauge, fig_emots, fig_top, fig_bal)
435
 
436
  # ====================== UI ======================
 
437
  apply_plotly_theme()
438
 
439
- with gr.Blocks(title="Verbatify, révélez la voix de vos assurés, simplement", css=VB_CSS) as demo:
 
440
  gr.HTML(
441
  "<div class='vb-hero'>"
442
  f"{LOGO_SVG}"
443
  "<div><div class='vb-title'>Verbatify — Analyse NPS</div>"
444
- "<div class='vb-sub'>Émotions • Thématiques • Occurrences • Synthèse • NPS</div></div>"
445
  "</div>"
446
  )
447
 
448
- with gr.Column():
 
449
  pasted = gr.Textbox(
450
  label="Verbatims (un par ligne)", lines=10,
451
  placeholder="Exemple :\nRemboursement rapide, télétransmission OK | 10\nImpossible de joindre un conseiller | 3\nEspace client : bug à la connexion | 4",
 
452
  )
453
- with gr.Row():
454
- has_score = gr.Checkbox(label="J’ai un score NPS par ligne", value=True) # déjà coché
455
- sep = gr.Textbox(label="Séparateur score", value="|", scale=1)
456
-
457
- with gr.Row():
458
- anon=gr.Checkbox(label="Anonymiser emails / téléphones", value=True)
459
- use_oa_sent=gr.Checkbox(label="OpenAI pour le sentiment", value=True)
460
- use_oa_themes=gr.Checkbox(label="OpenAI pour les thèmes", value=True)
461
- use_oa_summary=gr.Checkbox(label="OpenAI pour la synthèse", value=True)
462
-
463
- with gr.Row():
464
- oa_model=gr.Textbox(label="Modèle OpenAI", value="gpt-4o-mini")
465
- oa_temp=gr.Slider(label="Température", minimum=0.0, maximum=1.0, value=0.1, step=0.1)
466
- top_k=gr.Slider(label="Top thèmes (K) pour les graphes", minimum=5, maximum=20, value=10, step=1)
467
- run=gr.Button("Lancer l'analyse", elem_classes=["vb-cta"])
468
 
 
469
  with gr.Row():
470
  ench_panel=gr.Markdown()
471
  irr_panel=gr.Markdown()
472
  reco_panel=gr.Markdown()
473
 
 
474
  summary=gr.Markdown(label="Synthèse NPS & ressentis clients")
475
  themes_table=gr.Dataframe(label="Thèmes — statistiques")
476
  enriched_table=gr.Dataframe(label="Verbatims enrichis (aperçu)")
477
  files_out=gr.Files(label="Téléchargements (CSV & ZIP)")
478
 
 
479
  with gr.Row():
480
  plot_nps = gr.Plot(label="NPS — Jauge")
481
  plot_sent= gr.Plot(label="Répartition des émotions")
@@ -483,9 +510,10 @@ with gr.Blocks(title="Verbatify, révélez la voix de vos assurés, simplement",
483
  plot_top = gr.Plot(label="Top thèmes — occurrences")
484
  plot_bal = gr.Plot(label="Top thèmes — balance Pos/Neg")
485
 
 
486
  run.click(
487
  analyze_text,
488
- inputs=[pasted, has_score, sep, anon, use_oa_sent, use_oa_themes, use_oa_summary, oa_model, oa_temp, top_k],
489
  outputs=[summary, themes_table, enriched_table, files_out,
490
  ench_panel, irr_panel, reco_panel,
491
  plot_nps, plot_sent, plot_top, plot_bal]
@@ -493,7 +521,7 @@ with gr.Blocks(title="Verbatify, révélez la voix de vos assurés, simplement",
493
 
494
  gr.HTML(
495
  '<div class="vb-footer">© Verbatify.com — Construit par '
496
- '<a href="https://jeremy-lagache.fr/" target="_blank" rel="dofollow">Jérémy Lagache</a></div>'
497
  )
498
 
499
  if __name__ == "__main__":
 
1
  # -*- coding: utf-8 -*-
2
  """
3
  Verbatify — Analyse sémantique NPS (Paste-only, NPS inféré)
4
+ Interface simplifiée : toutes les options sont appliquées automatiquement.
5
  """
6
 
7
  import os, re, json, collections, tempfile, zipfile
 
8
  from typing import List, Dict, Optional
9
  import pandas as pd
10
  import gradio as gr
 
12
  import plotly.graph_objects as go
13
  import plotly.io as pio
14
 
15
+ # ====================== CSS (externe si présent, sinon fallback) ======================
16
+
17
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
18
+ CSS_FILE = os.path.join(BASE_DIR, "verbatim.css")
19
+
20
+ VB_CSS_FALLBACK = r"""
21
+ @import url('https://fonts.googleapis.com/css2?family=Manrope:wght@400;500;700;800&display=swap');
22
+ :root{--vb-bg:#F8FAFC;--vb-text:#0F172A;--vb-primary:#7C3AED;--vb-primary-2:#06B6D4;--vb-border:#E2E8F0;}
23
+ *{color-scheme:light !important}
24
+ html,body,.gradio-container{background:var(--vb-bg)!important;color:var(--vb-text)!important;
25
+ font-family:Manrope,system-ui,-apple-system,'Segoe UI',Roboto,Arial,sans-serif!important}
26
+ .gradio-container{max-width:1120px!important;margin:0 auto!important}
27
+ .vb-hero{display:flex;align-items:center;gap:16px;padding:20px 22px;margin:10px 0 20px;
28
+ background:linear-gradient(90deg,rgba(124,58,237,.18),rgba(6,182,212,.18));border:1px solid var(--vb-border);
29
+ border-radius:14px;box-shadow:0 10px 26px rgba(2,6,23,.08)}
30
+ .vb-hero .vb-title{font-size:22px;color:#0F172A;font-weight:500}
31
+ .vb-hero .vb-sub{font-size:13px;color:#0F172A}
32
+ .gradio-container .vb-cta{background:linear-gradient(90deg,var(--vb-primary),var(--vb-primary-2))!important;color:#fff!important;
33
+ border:0!important;font-weight:700!important;font-size:16px!important;padding:14px 18px!important;border-radius:14px!important;
34
+ box-shadow:0 10px 24px rgba(124,58,237,.28)}
35
+ .gradio-container .vb-cta:hover{transform:translateY(-2px);filter:brightness(1.05)}
36
+ /* Patch encarts vides & texte noir partout */
37
+ .gradio-container .empty,
38
+ .gradio-container [class*="unpadded_box"],
39
+ .gradio-container [class*="unpadded-box"],
40
+ .gradio-container .empty[class*="box"]{background:#FFFFFF!important;background-image:none!important;border:1px solid transparent!important;box-shadow:none!important}
41
+ .gradio-container .empty *, .gradio-container [class*="unpadded_box"] *{color:#0F172A!important;fill:#0F172A!important}
42
+ """
43
+
44
+ VB_CSS = None
45
+ try:
46
+ if os.path.exists(CSS_FILE):
47
+ with open(CSS_FILE, "r", encoding="utf-8") as f:
48
+ VB_CSS = f.read()
49
+ except Exception:
50
+ VB_CSS = None
51
+ if not VB_CSS:
52
+ VB_CSS = VB_CSS_FALLBACK
53
+
54
+ # ====================== Plotly theme ======================
55
+
56
  def apply_plotly_theme():
57
  pio.templates["verbatify"] = go.layout.Template(
58
  layout=go.Layout(
 
72
  <g fill='none' fill-rule='evenodd'>
73
  <rect x='0' y='7' width='38' height='24' rx='12' fill='url(#g)'/>
74
  <circle cx='13' cy='19' r='5' fill='#fff' opacity='0.95'/><circle cx='25' cy='19' r='5' fill='#fff' opacity='0.72'/>
75
+ <text x='46' y='25' font-family='Manrope, system-ui, -apple-system, Segoe UI, Roboto, Arial, sans-serif' font-size='20' font-weight='800' fill='#0F172A' letter-spacing='0.2'>Verbatify</text>
76
  </g>
77
  </svg>"""
78
 
79
+ # ====================== unidecode fallback ======================
80
+
81
  try:
82
  from unidecode import unidecode
83
  except Exception:
 
88
  except Exception:
89
  return str(x)
90
 
91
+ # ====================== Thésaurus ASSURANCE ======================
92
+
93
  THEMES = {
94
  "Remboursements santé":[r"\bremboursement[s]?\b", r"\bt[eé]l[eé]transmission\b", r"\bno[eé]mie\b",
95
  r"\bprise\s*en\s*charge[s]?\b", r"\btaux\s+de\s+remboursement[s]?\b", r"\b(ameli|cpam)\b",
 
120
  "Agence / Accueil":[r"\bagence[s]?\b", r"\bboutique[s]?\b", r"\baccueil\b", r"\bconseil[s]?\b", r"\battente\b", r"\bcaisse[s]?\b"],
121
  }
122
 
123
+ # ====================== Sentiment (règles) ======================
124
+
125
+ POS_WORDS = {
126
+ "bien":1.0,"super":1.2,"parfait":1.4,"excellent":1.5,"ravi":1.2,"satisfait":1.0,
127
+ "rapide":0.8,"efficace":1.0,"fiable":1.0,"simple":0.8,"facile":0.8,"clair":0.8,"conforme":0.8,
128
+ "sympa":0.8,"professionnel":1.0,"réactif":1.0,"reactif":1.0,"compétent":1.0,"competent":1.0,
129
+ "top":1.2,"recommande":1.2,"recommandé":1.2,"bon":0.8
130
+ }
131
+ NEG_WORDS = {
132
+ "mauvais":-1.2,"horrible":-1.5,"nul":-1.2,"lent":-0.8,"cher":-0.9,"arnaque":-1.5,
133
+ "déçu":-1.2,"decu":-1.2,"incompétent":-1.3,"bug":-0.9,"bogue":-0.9,"problème":-1.0,
134
+ "probleme":-1.0,"attente":-0.6,"retard":-0.9,"erreur":-1.0,"compliqué":-0.8,"complique":-0.8,
135
+ "défectueux":-1.3,"defectueux":-1.3,"non conforme":-1.2,"impossible":-1.0,"difficile":-0.7
136
+ }
137
  NEGATIONS = [r"\bpas\b", r"\bjamais\b", r"\bplus\b", r"\baucun[e]?\b", r"\brien\b", r"\bni\b", r"\bgu[eè]re\b"]
138
  INTENSIFIERS = [r"\btr[eè]s\b", r"\bvraiment\b", r"\bextr[eê]mement\b", r"\bhyper\b"]
139
  DIMINISHERS = [r"\bun[e]?\s+peu\b", r"\bassez\b", r"\bplut[oô]t\b", r"\bl[eé]g[eè]rement\b"]
140
  INTENSIFIER_W, DIMINISHER_W = 1.5, 0.7
141
 
142
+ # ====================== OpenAI (auto) ======================
143
+
144
  OPENAI_AVAILABLE = False
145
  try:
146
+ from openai import OpenAI
147
+ if os.getenv("OPENAI_API_KEY"):
148
+ _client = OpenAI()
149
+ OPENAI_AVAILABLE = True
150
  except Exception:
151
+ OPENAI_AVAILABLE = False
152
+
153
+ OA_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
154
+ OA_TEMP = float(os.getenv("OPENAI_TEMP", "0.1"))
155
+ TOP_K = int(os.getenv("VERBATIFY_TOPK", "10"))
156
+
157
+ # ====================== Utils ======================
158
 
 
159
  def normalize(t:str)->str:
160
  if not isinstance(t,str): return ""
161
  return re.sub(r"\s+"," ",t.strip())
 
215
  t=re.sub(r"\b(?:\+?\d[\s.-]?){7,}\b","[tel]",t)
216
  return t
217
 
218
+ # --------- Coller du texte → DataFrame (AUTO : détecte "| note" en fin de ligne) ----------
219
+ def df_from_pasted_auto(text:str) -> pd.DataFrame:
220
  lines = [l.strip() for l in (text or "").splitlines() if l.strip()]
221
  rows = []
222
+ pat = re.compile(r"\|\s*(-?\d{1,2})\s*$")
223
  for i, line in enumerate(lines, 1):
224
+ m = pat.search(line)
225
+ if m:
226
+ verb = line[:m.start()].strip()
227
+ score = m.group(1)
228
+ rows.append({"id": i, "comment": verb, "nps_score": pd.to_numeric(score, errors="coerce")})
229
  else:
230
+ rows.append({"id": i, "comment": line, "nps_score": None})
231
  return pd.DataFrame(rows)
232
 
233
+ # --------- OpenAI helpers (auto) ----------
234
  def openai_json(model:str, system:str, user:str, temperature:float=0.0) -> Optional[dict]:
235
  if not OPENAI_AVAILABLE: return None
236
  try:
 
244
  except Exception:
245
  return None
246
 
247
+ def oa_sentiment(comment:str) -> Optional[dict]:
248
  system = "Tu es un classifieur FR. Réponds strictement en JSON."
249
  user = f'Texte: {comment}\nDonne "label" parmi ["positive","neutre","negatif"] et "score" entre -4 et 4. JSON.'
250
+ return openai_json(OA_MODEL, system, user, OA_TEMP)
251
 
252
+ def oa_themes(comment:str) -> Optional[dict]:
253
  system = "Tu maps le texte client vers un thésaurus assurance. Réponds strictement en JSON."
254
  user = f"Texte: {comment}\nThésaurus: {json.dumps(list(THEMES.keys()), ensure_ascii=False)}\nRetourne {{'themes': [...], 'counts': {{...}}}}"
255
+ return openai_json(OA_MODEL, system, user, OA_TEMP)
256
 
257
+ def oa_summary(nps:Optional[float], dist:Dict[str,int], themes_df:pd.DataFrame) -> Optional[str]:
258
  system = "Tu es un analyste CX FR. Donne une synthèse courte et actionnable en Markdown."
259
  top = [] if themes_df is None else themes_df.head(6).to_dict(orient="records")
260
  user = f"Données: NPS={None if nps is None else round(nps,1)}, Répartition={dist}, Thèmes={json.dumps(top, ensure_ascii=False)}"
261
+ j = openai_json(OA_MODEL, system, user, 0.2)
262
  if isinstance(j, dict) and "text" in j: return j["text"]
263
  if isinstance(j, dict): return ' '.join(str(v) for v in j.values())
264
  return None
265
 
266
+ # --------- HF sentiment (optionnel)
267
  def make_hf_pipe():
268
  try:
269
  from transformers import pipeline
 
273
  except Exception:
274
  return None
275
 
276
+ # --------- Inférence NPS depuis le sentiment ----------
277
  def infer_nps_from_sentiment(label: str, score: float) -> int:
278
  scaled = int(round((float(score) + 4.0) * 1.25)) # -4 -> 0, 0 -> 5, +4 -> 10
279
  scaled = max(0, min(10, scaled))
 
283
  return min(6, scaled)
284
  return 8 if score >= 0 else 7
285
 
286
+ # ====================== Graphiques ======================
287
+
288
  def fig_nps_gauge(nps: Optional[float]) -> go.Figure:
289
  v = 0.0 if nps is None else float(nps)
290
  return go.Figure(go.Indicator(mode="gauge+number", value=v,
 
309
  fig = px.bar(d2, x="theme", y="count", color="type", barmode="stack", title=f"Top {k} thèmes — balance Pos/Neg")
310
  fig.update_layout(xaxis_tickangle=-30); return fig
311
 
312
+ # ====================== Analyse principale (AUTO) ======================
 
 
 
313
 
314
+ def analyze_text(pasted_txt: str):
315
+ # 1) Parse auto + anonymisation
316
+ df = df_from_pasted_auto(pasted_txt or "")
317
  if df.empty:
318
  raise gr.Error("Colle au moins un verbatim (une ligne).")
319
+ df["comment"] = df["comment"].apply(anonymize)
320
 
321
+ # 2) Pipes
322
+ use_oa_sent = use_oa_themes = use_oa_summary = True
323
+ if not OPENAI_AVAILABLE:
 
324
  use_oa_sent = use_oa_themes = use_oa_summary = False
 
325
  hf_pipe = make_hf_pipe()
 
 
 
 
 
 
 
 
 
326
 
327
+ # 3) Boucle verbatims
328
  rows=[]; theme_agg=collections.defaultdict(lambda:{"mentions":0,"pos":0,"neg":0})
329
  used_hf=False; used_oa=False; any_inferred=False
330
 
331
  for idx, r in df.iterrows():
332
  cid=r.get("id", idx+1); comment=normalize(str(r["comment"]))
333
 
334
+ # Sentiment: OpenAI -> HF -> règles
335
  sent=None
336
  if use_oa_sent:
337
+ sent=oa_sentiment(comment); used_oa = used_oa or bool(sent)
338
+ if not sent and hf_pipe is not None and comment.strip():
339
+ try:
340
+ res=hf_pipe(comment); lab=str(res[0]["label"]).lower(); p=float(res[0].get("score",0.5))
341
+ if "1" in lab or "2" in lab: sent = {"label":"negatif","score":-4*p}
342
+ elif "3" in lab: sent = {"label":"neutre","score":0.0}
343
+ else: sent = {"label":"positive","score":4*p}
344
+ used_hf=True
345
+ except Exception:
346
+ sent=None
347
  if not sent:
348
  s=float(lexical_sentiment_score(comment))
349
  sent={"label":lexical_sentiment_label(s),"score":s}
350
 
351
+ # Thèmes: regex (+ fusion OpenAI)
352
  themes, counts = detect_themes_regex(comment)
353
  if use_oa_themes:
354
+ tjson=oa_themes(comment)
355
  if isinstance(tjson, dict):
356
  used_oa=True
357
  for th, c in (tjson.get("counts",{}) or {}).items():
 
359
  counts[th] = max(counts.get(th, 0), int(c))
360
  themes = [th for th, c in counts.items() if c > 0]
361
 
362
+ # Note NPS existante ou inférée
363
  given = r.get("nps_score", None)
364
  try:
365
  given = int(given) if given is not None and str(given).strip() != "" else None
 
401
  themes_df=pd.DataFrame(trs).sort_values(["total_mentions","net_sentiment"],ascending=[False,False])
402
 
403
  # Synthèse
404
+ method = "OpenAI + HF + règles" if (OPENAI_AVAILABLE and used_hf) else ("OpenAI + règles" if OPENAI_AVAILABLE else ("HF + règles" if used_hf else "Règles"))
405
  nps_label = "NPS global (inféré)" if any_inferred else "NPS global"
406
  lines=[ "# Synthèse NPS & ressentis clients",
407
  f"- **Méthode** : {method}",
 
415
  lines.append(f"- **{th}** : {m} occurrence(s)")
416
  summary_md="\n".join(lines)
417
 
418
+ if OPENAI_AVAILABLE:
419
+ md = oa_summary(nps, dist, themes_df)
420
  if md: summary_md = md + "\n\n---\n" + summary_md
421
 
422
  # Exports
 
460
  ench_md, irr_md, reco_md = make_panels(themes_df)
461
  fig_gauge = fig_nps_gauge(nps)
462
  fig_emots = fig_sentiment_bar(dist)
463
+ k = max(1, int(TOP_K))
464
  fig_top = fig_top_themes(themes_df, k)
465
  fig_bal = fig_theme_balance(themes_df, k)
466
 
 
468
  ench_md, irr_md, reco_md, fig_gauge, fig_emots, fig_top, fig_bal)
469
 
470
  # ====================== UI ======================
471
+
472
  apply_plotly_theme()
473
 
474
+ with gr.Blocks(title="Verbatify Analyse NPS", css=VB_CSS) as demo:
475
+ # Header
476
  gr.HTML(
477
  "<div class='vb-hero'>"
478
  f"{LOGO_SVG}"
479
  "<div><div class='vb-title'>Verbatify — Analyse NPS</div>"
480
+ "<div class='vb-sub'>Émotions • Thématiques • Occurrences • Synthèse</div></div>"
481
  "</div>"
482
  )
483
 
484
+ # Entrée minimale + bouton
485
+ with gr.Row():
486
  pasted = gr.Textbox(
487
  label="Verbatims (un par ligne)", lines=10,
488
  placeholder="Exemple :\nRemboursement rapide, télétransmission OK | 10\nImpossible de joindre un conseiller | 3\nEspace client : bug à la connexion | 4",
489
+ scale=4
490
  )
491
+ run = gr.Button("Lancer l'analyse", elem_classes=["vb-cta"], scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
492
 
493
+ # Panneaux
494
  with gr.Row():
495
  ench_panel=gr.Markdown()
496
  irr_panel=gr.Markdown()
497
  reco_panel=gr.Markdown()
498
 
499
+ # Résultats + téléchargements
500
  summary=gr.Markdown(label="Synthèse NPS & ressentis clients")
501
  themes_table=gr.Dataframe(label="Thèmes — statistiques")
502
  enriched_table=gr.Dataframe(label="Verbatims enrichis (aperçu)")
503
  files_out=gr.Files(label="Téléchargements (CSV & ZIP)")
504
 
505
+ # Graphes
506
  with gr.Row():
507
  plot_nps = gr.Plot(label="NPS — Jauge")
508
  plot_sent= gr.Plot(label="Répartition des émotions")
 
510
  plot_top = gr.Plot(label="Top thèmes — occurrences")
511
  plot_bal = gr.Plot(label="Top thèmes — balance Pos/Neg")
512
 
513
+ # Lancer
514
  run.click(
515
  analyze_text,
516
+ inputs=[pasted],
517
  outputs=[summary, themes_table, enriched_table, files_out,
518
  ench_panel, irr_panel, reco_panel,
519
  plot_nps, plot_sent, plot_top, plot_bal]
 
521
 
522
  gr.HTML(
523
  '<div class="vb-footer">© Verbatify.com — Construit par '
524
+ '<a href="https://jeremy-lagache.fr/" target="_blank" rel="noopener">Jérémy Lagache</a></div>'
525
  )
526
 
527
  if __name__ == "__main__":