import streamlit as st | |
from PIL import Image | |
import numpy as np | |
import open_clip | |
#from transformers import CLIPProcessor, CLIPModel | |
knnpath = '20241204-ams-no-env-open_clip_ViT-H-14-378-quickgelu.npz' | |
clip_model_name = 'ViT-H-14-378-quickgelu' | |
#model, preprocess = open_clip.create_model_from_pretrained('hf-hub:laion/CLIP-ViT-g-14-laion2B-s12B-b42K') | |
#tokenizer = open_clip.get_tokenizer('hf-hub:laion/CLIP-ViT-g-14-laion2B-s12B-b42K') | |
model, preprocess = open_clip.create_model_from_pretrained(clip_model_name) | |
tokenizer = open_clip.get_tokenizer(clip_model_name) | |
st.write(model) | |
#clip_model = CLIPModel.from_pretrained() | |
#clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") | |
knn = np.load(modelpath) | |
st.write(knn['walkability_vecs'].shape) | |
file = st.file_uploader('Upload An Image') | |
if file: | |
with Image.open(file) as img: | |
st.write(file) | |
st.write(img.size) | |