Spaces:
Runtime error
Runtime error
File size: 4,352 Bytes
95431fa eb723d1 810e901 8e9ad66 eb723d1 95431fa eb723d1 95431fa eb723d1 95431fa eb723d1 95431fa eb723d1 8e9ad66 810e901 eb723d1 95431fa 810e901 eb723d1 810e901 95431fa 810e901 95431fa 810e901 0501446 810e901 0501446 810e901 95431fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
from transformers import MBartForConditionalGeneration, MBart50Tokenizer, AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
import torch
from diffusers import FluxPipeline
import os
# Load the translation model and tokenizer
model_name = "facebook/mbart-large-50-many-to-one-mmt"
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
model = MBartForConditionalGeneration.from_pretrained(model_name)
# Load a smaller text generation model to reduce generation time
text_generation_model_name = "EleutherAI/gpt-neo-1.3B"
text_tokenizer = AutoTokenizer.from_pretrained(text_generation_model_name)
text_model = AutoModelForCausalLM.from_pretrained(text_generation_model_name)
# Create a pipeline for text generation using the selected model
text_generator = pipeline("text-generation", model=text_model, tokenizer=text_tokenizer)
# Get the Hugging Face API token from environment variables
hf_token = os.getenv("HF_TOKEN")
# Authenticate and set up the new FluxPipeline for the text-to-image model
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
use_auth_token=hf_token, # Use the token for authentication
torch_dtype=torch.bfloat16
)
pipe.enable_model_cpu_offload() # Enable CPU offloading to save GPU memory if needed
# Function to generate an image using the new FluxPipeline model
def generate_image_from_text(translated_text):
try:
print(f"Generating image from translated text: {translated_text}")
# Use the FluxPipeline to generate an image from the text
image = pipe(translated_text).images[0]
print("Image generation completed.")
return image, None
except Exception as e:
print(f"Error during image generation: {e}")
return None, f"Error during image generation: {e}"
# Function to generate a shorter paragraph based on the translated text
def generate_short_paragraph_from_text(translated_text):
try:
print(f"Generating a short paragraph from translated text: {translated_text}")
# Generate a shorter paragraph from the translated text using smaller settings
paragraph = text_generator(translated_text, max_length=150, num_return_sequences=1, temperature=0.2, top_p=0.8)[0]['generated_text']
print(f"Paragraph generation completed: {paragraph}")
return paragraph
except Exception as e:
print(f"Error during paragraph generation: {e}")
return f"Error during paragraph generation: {e}"
# Define the function to translate Tamil text, generate a short paragraph, and create an image
def translate_generate_paragraph_and_image(tamil_text):
# Step 1: Translate Tamil text to English using mbart-large-50
try:
print("Translating Tamil text to English...")
tokenizer.src_lang = "ta_IN"
inputs = tokenizer(tamil_text, return_tensors="pt")
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
print(f"Translation completed: {translated_text}")
except Exception as e:
return f"Error during translation: {e}", "", None, None
# Step 2: Generate a shorter paragraph based on the translated English text
paragraph = generate_short_paragraph_from_text(translated_text)
if "Error" in paragraph:
return translated_text, paragraph, None, None
# Step 3: Generate an image using the translated English text
image, error_message = generate_image_from_text(translated_text)
if error_message:
return translated_text, paragraph, None, error_message
return translated_text, paragraph, image, None
# Gradio interface setup
iface = gr.Interface(
fn=translate_generate_paragraph_and_image,
inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
outputs=[gr.Textbox(label="Translated English Text"),
gr.Textbox(label="Generated Short Paragraph"),
gr.Image(label="Generated Image")],
title="Tamil to English Translation, Short Paragraph Generation, and Image Creation",
description="Translate Tamil text to English using Facebook's mbart-large-50 model, generate a short paragraph, and create an image using the translated text.",
)
# Launch the Gradio app
iface.launch()
|