kaiku03 commited on
Commit
33f9c78
·
verified ·
1 Parent(s): 35cf55c

Update tools/news_reporter.py

Browse files
Files changed (1) hide show
  1. tools/news_reporter.py +27 -24
tools/news_reporter.py CHANGED
@@ -1,39 +1,42 @@
 
1
  from agent_state import AgentState
 
2
  import os
3
- from langchain_huggingface import HuggingFaceEndpoint
4
-
5
- HF_TOKEN = os.getenv("HF_TOKEN")
6
- if not HF_TOKEN:
7
- raise RuntimeError("HF_TOKEN env var is missing")
8
 
9
  MODEL_ID = "google/gemma-3n-E4B-it" # confirm this exact repo id exists
10
 
11
- llm = HuggingFaceEndpoint(
12
- repo_id=MODEL_ID,
13
- task="text-generation",
14
- huggingfacehub_api_token=HF_TOKEN,
15
- temperature=0.7,
 
 
 
 
 
 
 
 
 
 
 
16
  max_new_tokens=512,
17
- # or: model_kwargs={"temperature":0.7, "max_new_tokens":512}
18
  )
19
 
20
  def create_news_report(state: AgentState) -> AgentState:
21
  if state.feedback:
22
- prompt = (
23
- "You are revising a news report based on the user's feedback.\n"
24
- f'Transcription: "{state.transcribed_text}"\n'
25
- f'Old Report: "{state.news_report}"\n'
26
- f'Feedback: "{state.feedback}"\n'
27
- "Rewrite it professionally."
28
- )
29
  else:
30
- prompt = (
31
- "Write a professional news article based on this transcription:\n"
32
- f'"{state.transcribed_text}"'
33
- )
34
 
35
- report = llm.invoke(prompt).strip()
36
- state.news_report = report
37
  state.feedback = None
38
  state.approved = False
39
  return state
 
1
+
2
  from agent_state import AgentState
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
  import os
 
 
 
 
 
5
 
6
  MODEL_ID = "google/gemma-3n-E4B-it" # confirm this exact repo id exists
7
 
8
+ # Optional: use a cache directory
9
+ cache_dir = os.getenv("HF_HOME", None)
10
+
11
+ # Load tokenizer and model
12
+ tokenizer = AutoTokenizer.from_pretrained(
13
+ MODEL_ID, cache_dir=cache_dir
14
+ )
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ MODEL_ID, cache_dir=cache_dir
17
+ )
18
+
19
+ # Create text generation pipeline
20
+ text_generator = pipeline(
21
+ "text-generation",
22
+ model=model,
23
+ tokenizer=tokenizer,
24
  max_new_tokens=512,
25
+ temperature=0.7,
26
  )
27
 
28
  def create_news_report(state: AgentState) -> AgentState:
29
  if state.feedback:
30
+ prompt = f"""You are revising a news report based on the user's feedback:
31
+ Transcription: "{state.transcribed_text}"
32
+ Old Report: "{state.news_report}"
33
+ Feedback: "{state.feedback}" """
 
 
 
34
  else:
35
+ prompt = f"""Write a professional news article based on this transcription:
36
+ "{state.transcribed_text}" """
 
 
37
 
38
+ generated = text_generator(prompt)[0]["generated_text"]
39
+ state.news_report = generated
40
  state.feedback = None
41
  state.approved = False
42
  return state