Shining-Data commited on
Commit
9f8313e
·
verified ·
1 Parent(s): 5b1d6e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -83,7 +83,7 @@ def load_pipeline(model_name):
83
  if model_name in PIPELINES.keys():
84
  return PIPELINES[model_name]
85
  repo = MODELS[model_name]["repo_id"]
86
- tokenizer = AutoTokenizer.from_pretrained(repo)
87
  model = AutoModelForCausalLM.from_pretrained(
88
  repo,
89
  device_map=device,
@@ -167,11 +167,14 @@ def chat_response(user_msg, chat_history, system_prompt,
167
  enriched = system_prompt
168
 
169
  pipe = load_pipeline(model_name)
 
 
 
170
  prompt = format_conversation(history, enriched, pipe["tokenizer"])
171
 
172
 
173
  # TODO:
174
- debug += "\nPROMPT:\n" + text
175
 
176
  prompt_debug = f"\n\n--- Prompt Preview ---\n```\n{prompt}\n```"
177
  streamer = TextIterStreamer(pipe["tokenizer"],
 
83
  if model_name in PIPELINES.keys():
84
  return PIPELINES[model_name]
85
  repo = MODELS[model_name]["repo_id"]
86
+ tokenizer = AutoTokenizer.from_pretrained(repo, trust_remote_code=True)
87
  model = AutoModelForCausalLM.from_pretrained(
88
  repo,
89
  device_map=device,
 
167
  enriched = system_prompt
168
 
169
  pipe = load_pipeline(model_name)
170
+
171
+ # TODO:
172
+ debug += "\nLOAD MODEL:\n" + model_name
173
  prompt = format_conversation(history, enriched, pipe["tokenizer"])
174
 
175
 
176
  # TODO:
177
+ debug += "\nPROMPT:\n" + prompt
178
 
179
  prompt_debug = f"\n\n--- Prompt Preview ---\n```\n{prompt}\n```"
180
  streamer = TextIterStreamer(pipe["tokenizer"],