Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,22 +1,98 @@
|
|
1 |
import streamlit as st
|
2 |
-
from
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from PIL import Image, ImageOps
|
3 |
+
import numpy as np
|
4 |
+
import pickle
|
5 |
+
from torchvision import transforms
|
6 |
+
|
7 |
+
|
8 |
+
# use $ streamlit run app.py to run app!
|
9 |
+
|
10 |
+
# hide deprication warnings
|
11 |
+
import warnings
|
12 |
+
warnings.filterwarnings("ignore")
|
13 |
+
|
14 |
+
# BLIP Model
|
15 |
+
from transformers import BlipForConditionalGeneration, BlipProcessor
|
16 |
+
blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
17 |
+
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
|
18 |
+
|
19 |
+
# Caption Model
|
20 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
21 |
+
caption_tokenizer = AutoTokenizer.from_pretrained("prasanthsagirala/text-to-social-media-captions")
|
22 |
+
caption_model = AutoModelForSeq2SeqLM.from_pretrained("prasanthsagirala/text-to-social-media-captions")
|
23 |
+
|
24 |
+
# with open("models/blip_model.pkl", "rb") as f:
|
25 |
+
# blip_model = pickle.load(f)
|
26 |
+
|
27 |
+
# with open("models/caption_tokenizer.pkl", "rb") as f:
|
28 |
+
# caption_tokenizer = pickle.load(f)
|
29 |
+
|
30 |
+
# with open("models/caption_model.pkl", "rb") as f:
|
31 |
+
# caption_model = pickle.load(f)
|
32 |
+
|
33 |
+
|
34 |
+
# Set pre-defined page configurations
|
35 |
+
st.set_page_config(
|
36 |
+
page_title="Instamuse", # Title
|
37 |
+
page_icon=":camera:", # log-icon
|
38 |
+
initial_sidebar_state='auto' # page loading state
|
39 |
+
)
|
40 |
+
|
41 |
+
# Sidebard (left side of page)
|
42 |
+
with st.sidebar:
|
43 |
+
st.image('insta.png')
|
44 |
+
st.title("InstaMuse")
|
45 |
+
st.subheader(
|
46 |
+
"Welcome to InstaMuse, the ultimate tool for turning your snapshots into social media sensations!")
|
47 |
+
st.write(
|
48 |
+
"Start turning heads with your posts. Use InstaMuse now and watch your likes soar! ")
|
49 |
+
|
50 |
+
# Main page text
|
51 |
+
st.write("""
|
52 |
+
# InstaMuse 🌟📸
|
53 |
+
|
54 |
+
Struggling to find the perfect words to match your pictures? Let InstaMuse do the heavy \
|
55 |
+
lifting! With just a simple upload, our app uses cutting-edge technology to analyze your \
|
56 |
+
image and generate a witty, engaging, or inspiring caption that captures the essence of \
|
57 |
+
your moment. \n \
|
58 |
+
|
59 |
+
Whether you’re a selfie savant, a nature explorer, or a foodie fanatic, InstaMuse is here to \
|
60 |
+
amplify your Instagram presence. Jazz up your feed with tailored captions that resonate with your \
|
61 |
+
followers and attract new eyes to your profile. It’s quick, easy, and fun!
|
62 |
+
|
63 |
+
**Drag your photo here and spark some caption magic!** ✨
|
64 |
+
"""
|
65 |
+
)
|
66 |
+
|
67 |
+
# For Bhumika!!!
|
68 |
+
# Modeling part
|
69 |
+
file = st.file_uploader("", type=["jpg", "png"])
|
70 |
+
|
71 |
+
|
72 |
+
def import_and_predict(image_data): # Will also need to import model I think?
|
73 |
+
transform = transforms.Compose([
|
74 |
+
transforms.Resize((1080, 1080))
|
75 |
+
])
|
76 |
+
image = transform(image_data)
|
77 |
+
|
78 |
+
# BLIP Description Generation
|
79 |
+
inputs = blip_processor(images=image, return_tensors="pt")
|
80 |
+
generated_ids = blip_model.generate(**inputs, max_new_tokens=50)
|
81 |
+
generated_text = blip_processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
|
82 |
+
|
83 |
+
# Caption Generation
|
84 |
+
inputs = ["Instagram captionize:" + generated_text]
|
85 |
+
inputs = caption_tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
|
86 |
+
output = caption_model.generate(**inputs, num_beams=8, do_sample=True, min_length=10, max_length=64)
|
87 |
+
decoded_output = caption_tokenizer.batch_decode(output, skip_special_tokens=True)[0]
|
88 |
+
return decoded_output
|
89 |
+
|
90 |
+
|
91 |
+
if file is None:
|
92 |
+
st.text("Upload your photo now and let the caption fun begin!")
|
93 |
+
else:
|
94 |
+
image = Image.open(file).convert('RGB')
|
95 |
+
st.image(image, use_column_width=True)
|
96 |
+
predictions = import_and_predict(image)
|
97 |
+
st.markdown("## Captions:")
|
98 |
+
st.info(predictions)
|