Alysha Creelman commited on
Commit
f366fbb
Β·
unverified Β·
1 Parent(s): 6bc04a0

Removing image and adding tiny llama local model

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,7 +7,8 @@ import os
7
  # Inference client setup with token from environment
8
  token = os.getenv('HF_TOKEN')
9
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-alpha", token=token)
10
- pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")
 
11
 
12
  # Global flag to handle cancellation
13
  stop_inference = False
@@ -145,7 +146,6 @@ def restart_chatbot():
145
  # Define interface
146
  with gr.Blocks(css=custom_css) as demo:
147
  gr.Markdown("<h2 style='text-align: center;'>🍎✏️ School AI Chatbot ✏️🍎</h2>")
148
- gr.Image('wormington_headshot.jpg', type="filepath", elem_id="school_ai_image", show_label=False, interactive=False)
149
  gr.Markdown("<h1 style= 'text-align: center;'>Interact with Wormington Scholar πŸ› by selecting the appropriate level below.</h1>")
150
 
151
  with gr.Row():
 
7
  # Inference client setup with token from environment
8
  token = os.getenv('HF_TOKEN')
9
  client = InferenceClient(model="HuggingFaceH4/zephyr-7b-alpha", token=token)
10
+ pipe = pipeline("text-generation", "TinyLlama/TinyLlama_v1.1", torch_dtype=torch.bfloat16, device_map="auto")
11
+ # pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")
12
 
13
  # Global flag to handle cancellation
14
  stop_inference = False
 
146
  # Define interface
147
  with gr.Blocks(css=custom_css) as demo:
148
  gr.Markdown("<h2 style='text-align: center;'>🍎✏️ School AI Chatbot ✏️🍎</h2>")
 
149
  gr.Markdown("<h1 style= 'text-align: center;'>Interact with Wormington Scholar πŸ› by selecting the appropriate level below.</h1>")
150
 
151
  with gr.Row():