Alysha Creelman
commited on
Removing image and adding tiny llama local model
Browse files
app.py
CHANGED
@@ -7,7 +7,8 @@ import os
|
|
7 |
# Inference client setup with token from environment
|
8 |
token = os.getenv('HF_TOKEN')
|
9 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-alpha", token=token)
|
10 |
-
pipe = pipeline("text-generation", "
|
|
|
11 |
|
12 |
# Global flag to handle cancellation
|
13 |
stop_inference = False
|
@@ -145,7 +146,6 @@ def restart_chatbot():
|
|
145 |
# Define interface
|
146 |
with gr.Blocks(css=custom_css) as demo:
|
147 |
gr.Markdown("<h2 style='text-align: center;'>πβοΈ School AI Chatbot βοΈπ</h2>")
|
148 |
-
gr.Image('wormington_headshot.jpg', type="filepath", elem_id="school_ai_image", show_label=False, interactive=False)
|
149 |
gr.Markdown("<h1 style= 'text-align: center;'>Interact with Wormington Scholar π by selecting the appropriate level below.</h1>")
|
150 |
|
151 |
with gr.Row():
|
|
|
7 |
# Inference client setup with token from environment
|
8 |
token = os.getenv('HF_TOKEN')
|
9 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-alpha", token=token)
|
10 |
+
pipe = pipeline("text-generation", "TinyLlama/TinyLlama_v1.1", torch_dtype=torch.bfloat16, device_map="auto")
|
11 |
+
# pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")
|
12 |
|
13 |
# Global flag to handle cancellation
|
14 |
stop_inference = False
|
|
|
146 |
# Define interface
|
147 |
with gr.Blocks(css=custom_css) as demo:
|
148 |
gr.Markdown("<h2 style='text-align: center;'>πβοΈ School AI Chatbot βοΈπ</h2>")
|
|
|
149 |
gr.Markdown("<h1 style= 'text-align: center;'>Interact with Wormington Scholar π by selecting the appropriate level below.</h1>")
|
150 |
|
151 |
with gr.Row():
|