Alysha Creelman commited on
Commit
327f48b
·
1 Parent(s): ff1b46a

Fixing issue by adding HF token to inference client

Browse files
Files changed (2) hide show
  1. app.py +4 -2
  2. get-pip.py +0 -0
app.py CHANGED
@@ -2,9 +2,11 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import torch
4
  from transformers import pipeline
 
5
 
6
- # Inference client setup
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
8
  pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")
9
 
10
  # Global flag to handle cancellation
 
2
  from huggingface_hub import InferenceClient
3
  import torch
4
  from transformers import pipeline
5
+ import os
6
 
7
+ # Inference client setup with token from environment
8
+ token = os.getenv('HF_TOKEN')
9
+ client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta", token=token)
10
  pipe = pipeline("text-generation", "microsoft/Phi-3-mini-4k-instruct", torch_dtype=torch.bfloat16, device_map="auto")
11
 
12
  # Global flag to handle cancellation
get-pip.py ADDED
The diff for this file is too large to render. See raw diff