witchEverly commited on
Commit
d8b7142
·
verified ·
1 Parent(s): d6036da

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -0
app.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import BlipProcessor, BlipForConditionalGeneration
3
+
4
+ # Load model and processor
5
+ model_id = "Salesforce/blip-2-captioning-base"
6
+ model = BlipForConditionalGeneration.from_pretrained(model_id)
7
+ processor = BlipProcessor.from_pretrained(model_id)
8
+
9
+ def generate_caption(image):
10
+ inputs = processor(images=image, return_tensors="pt")
11
+ outputs = model.generate(**inputs)
12
+ return processor.decode(outputs[0], skip_special_tokens=True)
13
+
14
+ st.title('BLIP-2 Image Captioning Demo')
15
+
16
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
17
+ if uploaded_file is not None:
18
+ # Display the image
19
+ st.image(uploaded_file, caption='Uploaded Image', use_column_width=True)
20
+ # Generate and display caption
21
+ caption = generate_caption(uploaded_file.getvalue())
22
+ st.write(f"Caption: {caption}")