maahin commited on
Commit
2bf176d
·
verified ·
1 Parent(s): 76bf82c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -1,14 +1,23 @@
 
1
  import streamlit as st
2
  from PIL import Image
3
  import torch
4
  from transformers import AutoProcessor, AutoModelForVision2Seq
5
 
 
 
 
 
 
 
 
 
6
  # Load the PaliGemma model and processor
7
  @st.cache_resource
8
  def load_model():
9
  model_name = "google/paligemma2-3b-mix-224"
10
- processor = AutoProcessor.from_pretrained(model_name)
11
- model = AutoModelForVision2Seq.from_pretrained(model_name)
12
  return processor, model
13
 
14
  processor, model = load_model()
@@ -30,4 +39,4 @@ if uploaded_file:
30
  output = model.generate(**inputs)
31
 
32
  answer = processor.batch_decode(output, skip_special_tokens=True)[0]
33
- st.success(f"Answer: {answer}")
 
1
+ import os
2
  import streamlit as st
3
  from PIL import Image
4
  import torch
5
  from transformers import AutoProcessor, AutoModelForVision2Seq
6
 
7
+ # Get Hugging Face API key from Hugging Face Spaces secrets
8
+ HF_TOKEN = os.getenv("HF_KEY")
9
+
10
+ # Ensure API key is available
11
+ if not HF_TOKEN:
12
+ st.error("❌ Hugging Face API key not found! Set it as 'HF_KEY' in Spaces secrets.")
13
+ st.stop()
14
+
15
  # Load the PaliGemma model and processor
16
  @st.cache_resource
17
  def load_model():
18
  model_name = "google/paligemma2-3b-mix-224"
19
+ processor = AutoProcessor.from_pretrained(model_name, token=HF_TOKEN)
20
+ model = AutoModelForVision2Seq.from_pretrained(model_name, token=HF_TOKEN)
21
  return processor, model
22
 
23
  processor, model = load_model()
 
39
  output = model.generate(**inputs)
40
 
41
  answer = processor.batch_decode(output, skip_special_tokens=True)[0]
42
+ st.success(f"Answer: {answer}")