import requests from PIL import Image import streamlit as st # Load model directly from transformers import AutoTokenizer, AutoModelForImageTextToText tokenizer = AutoTokenizer.from_pretrained("microsoft/trocr-large-printed") model = AutoModelForImageTextToText.from_pretrained("microsoft/trocr-large-printed") st.title("Duh!") # load image from the IAM dataset url = "https://parivahan.gov.in/rcdlstatus/DispplayCaptcha?txtp_cd=1&bkgp_cd=2&noise_cd=2&gimp_cd=3&txtp_length=5&pfdrid_c=true?1429026471&pfdrid_c=true" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") col1, col2 = st.columns(2) processor = TrOCRProcessor.from_pretrained('microsoft/trocr-large-printed') model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-large-printed') pixel_values = processor(images=image, return_tensors="pt").pixel_values generated_ids = model.generate(pixel_values) generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] col1.image(image, use_column_width=True) col2.subheader(f"Detected Text: {generated_text}")