import streamlit as st from PIL import Image from transformers import pipeline from gtts import gTTS import torch st.set_page_config(page_title="Your Image to Audio Story", page_icon="🦜") def generate_caption(image_file): image = Image.open(image_file) caption_generator = pipeline( "image-to-text", model="Salesforce/blip-image-captioning-base", ) caption_results = caption_generator(image) caption = caption_results[0]['generated_text'] return caption def generate_story(caption): story_generator = pipeline( "text-generation", model="Qwen/Qwen2-1.5B", ) prompt = ( "You are a talented children's story writer renowned for your creativity and captivating narratives. " "Using the image details provided below, please compose an enchanting tale tailored for children aged 3 to 10. " "Rather than simply rephrasing the image details, enrich your story with imaginative characters, quirky adventures, " "and delightful surprises. Let your narrative flow naturally and spark wonder in your young audience. " "Please ensure that your story is engaging, coherent, and falls between 100 and 300 words in length.\n\n" f"Image Details: {caption}\n\nStory:" ) result = story_generator(prompt, num_return_sequences=1) full_text = result[0]['generated_text'] if "Story:" in full_text: story = full_text.split("Story:", 1)[1].strip() else: story = full_text.strip() words = story.split() if len(words) > 300: story = " ".join(words[:300]) elif len(words) < 100: story += "\n\n(Note: The generated story is shorter than the desired 100 words.)" return story def text_to_speech(text, output_file="output.mp3"): tts = gTTS(text=text, lang="en") tts.save(output_file) return output_file def main(): st.markdown("