Spaces:
Bradarr
/
Running on Zero

Bradarr commited on
Commit
2147e35
·
verified ·
1 Parent(s): 589dbfe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,7 +12,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
12
  import logging
13
 
14
  # Configure logging
15
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
16
 
17
  # --- Authentication and Configuration --- (Moved BEFORE model loading)
18
  try:
@@ -62,7 +62,7 @@ def transcribe_audio(audio_path: str, whisper_model) -> str: # Pass whisper_mod
62
 
63
  def generate_response(text: str, model_gemma, tokenizer_gemma, device) -> str: # Pass model and tokenizer
64
  try:
65
- input_text = "Here is a response for the user. " + text
66
  input = tokenizer_gemma(input_text, return_tensors="pt").to(device)
67
  generated_output = model_gemma.generate(**input, max_length=MAX_GEMMA_LENGTH, early_stopping=True)
68
  return tokenizer_gemma.decode(generated_output[0], skip_special_tokens=True)
@@ -111,7 +111,7 @@ def infer(user_audio) -> tuple[int, np.ndarray]:
111
  logging.info("Whisper model loaded successfully.")
112
 
113
  tokenizer_gemma = AutoTokenizer.from_pretrained("google/gemma-3-1b-pt")
114
- model_gemma = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-pt").to(device)
115
  logging.info("Gemma 3 1B pt model loaded successfully.")
116
 
117
  if not user_audio:
 
12
  import logging
13
 
14
  # Configure logging
15
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
16
 
17
  # --- Authentication and Configuration --- (Moved BEFORE model loading)
18
  try:
 
62
 
63
  def generate_response(text: str, model_gemma, tokenizer_gemma, device) -> str: # Pass model and tokenizer
64
  try:
65
+ input_text = "Here is a response to the user: " + text
66
  input = tokenizer_gemma(input_text, return_tensors="pt").to(device)
67
  generated_output = model_gemma.generate(**input, max_length=MAX_GEMMA_LENGTH, early_stopping=True)
68
  return tokenizer_gemma.decode(generated_output[0], skip_special_tokens=True)
 
111
  logging.info("Whisper model loaded successfully.")
112
 
113
  tokenizer_gemma = AutoTokenizer.from_pretrained("google/gemma-3-1b-pt")
114
+ model_gemma = AutoModelForCausalLM.from_pretrained("google/gemma-3-1b-it").to(device)
115
  logging.info("Gemma 3 1B pt model loaded successfully.")
116
 
117
  if not user_audio: