serhany commited on
Commit
fcf00e5
Β·
verified Β·
1 Parent(s): 11a5899

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -33,12 +33,17 @@ def load_model_and_tokenizer(model_identifier: str, model_key: str, tokenizer_ke
33
 
34
  print(f"Loading {model_key} model ({model_identifier})...")
35
  try:
36
- tokenizer = AutoTokenizer.from_pretrained(model_identifier, trust_remote_code=True)
 
 
 
 
37
  model = AutoModelForCausalLM.from_pretrained(
38
  model_identifier,
39
  torch_dtype=torch.bfloat16,
40
  device_map="auto",
41
  trust_remote_code=True,
 
42
  )
43
  model.eval()
44
 
@@ -53,6 +58,9 @@ def load_model_and_tokenizer(model_identifier: str, model_key: str, tokenizer_ke
53
  return model, tokenizer
54
  except Exception as e:
55
  print(f"ERROR loading {model_key} model ({model_identifier}): {e}")
 
 
 
56
  _models_cache[model_key] = "error"
57
  _models_cache[tokenizer_key] = "error"
58
  raise
@@ -173,12 +181,12 @@ with gr.Blocks(theme=gr.themes.Soft(), title="🎬 CineGuide Comparison") as dem
173
  gr.Markdown(
174
  f"""
175
  # 🎬 CineGuide vs. Base Model Comparison
176
- Compare the fine-tuned CineGuide movie recommender with the base {BASE_MODEL_ID.split('/')[-1]} model.
177
 
178
- **Base Model:** `{BASE_MODEL_ID}`
179
- **Fine-tuned Model:** `{FINETUNED_MODEL_ID}`
180
 
181
- Type your movie-related query below and see how each model responds!
182
 
183
  ⚠️ **Note:** Models are loaded on first use and may take 30-60 seconds initially.
184
  """
@@ -205,7 +213,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="🎬 CineGuide Comparison") as dem
205
 
206
  with gr.Column(scale=1):
207
  gr.Markdown(f"## 🎬 CineGuide (Fine-tuned)")
208
- gr.Markdown(f"*Specialized for movie recommendations*")
209
  chatbot_ft = gr.ChatInterface(
210
  respond_ft,
211
  textbox=gr.Textbox(placeholder="Ask CineGuide about movies...", container=False, scale=7),
 
33
 
34
  print(f"Loading {model_key} model ({model_identifier})...")
35
  try:
36
+ tokenizer = AutoTokenizer.from_pretrained(
37
+ model_identifier,
38
+ trust_remote_code=True,
39
+ use_auth_token=False # Ensure we're not using auth for public models
40
+ )
41
  model = AutoModelForCausalLM.from_pretrained(
42
  model_identifier,
43
  torch_dtype=torch.bfloat16,
44
  device_map="auto",
45
  trust_remote_code=True,
46
+ use_auth_token=False # Ensure we're not using auth for public models
47
  )
48
  model.eval()
49
 
 
58
  return model, tokenizer
59
  except Exception as e:
60
  print(f"ERROR loading {model_key} model ({model_identifier}): {e}")
61
+ print(f"Error type: {type(e).__name__}")
62
+ if "404" in str(e) or "not found" in str(e).lower():
63
+ print(f"Model {model_identifier} not found. Please check the model ID.")
64
  _models_cache[model_key] = "error"
65
  _models_cache[tokenizer_key] = "error"
66
  raise
 
181
  gr.Markdown(
182
  f"""
183
  # 🎬 CineGuide vs. Base Model Comparison
184
+ Compare your fine-tuned CineGuide movie recommender with the base {BASE_MODEL_ID.split('/')[-1]} model.
185
 
186
+ **Base Model:** `{BASE_MODEL_ID}` (Standard Assistant)
187
+ **Fine-tuned Model:** `{FINETUNED_MODEL_ID}` (CineGuide - Specialized for Movies)
188
 
189
+ Type your movie-related query below and see how fine-tuning improves movie recommendations!
190
 
191
  ⚠️ **Note:** Models are loaded on first use and may take 30-60 seconds initially.
192
  """
 
213
 
214
  with gr.Column(scale=1):
215
  gr.Markdown(f"## 🎬 CineGuide (Fine-tuned)")
216
+ gr.Markdown(f"*Specialized movie recommendation model*")
217
  chatbot_ft = gr.ChatInterface(
218
  respond_ft,
219
  textbox=gr.Textbox(placeholder="Ask CineGuide about movies...", container=False, scale=7),