Eiad Gomaa commited on
Commit
960b05f
·
1 Parent(s): 335e8ff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,8 +6,8 @@ import torch
6
  def load_model():
7
  """Load model and tokenizer with caching"""
8
  try:
9
- tokenizer = AutoTokenizer.from_pretrained("eyad-silx/Quasar-32B")
10
- model = AutoModelForCausalLM.from_pretrained("eyad-silx/Quasar-32B")
11
  return model, tokenizer
12
  except Exception as e:
13
  st.error(f"Error loading model: {str(e)}")
 
6
  def load_model():
7
  """Load model and tokenizer with caching"""
8
  try:
9
+ tokenizer = AutoTokenizer.from_pretrained("NousResearch/Hermes-3-Llama-3.1-8B")
10
+ model = AutoModelForCausalLM.from_pretrained("NousResearch/Hermes-3-Llama-3.1-8B")
11
  return model, tokenizer
12
  except Exception as e:
13
  st.error(f"Error loading model: {str(e)}")