Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,16 +5,32 @@ import gradio as gr
|
|
5 |
from transformers import MarianMTModel, MarianTokenizer
|
6 |
import os
|
7 |
|
|
|
8 |
model_name = "Helsinki-NLP/opus-mt-mul-en"
|
9 |
model = MarianMTModel.from_pretrained(model_name)
|
10 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
11 |
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
translated_tokens = model.generate(**inputs)
|
15 |
translation = tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
|
16 |
return translation
|
17 |
|
|
|
18 |
def query_gemini_api(translated_text, gemini_api_key):
|
19 |
url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-latest:generateContent"
|
20 |
headers = {"Content-Type": "application/json"}
|
@@ -31,6 +47,7 @@ def query_gemini_api(translated_text, gemini_api_key):
|
|
31 |
else:
|
32 |
return f"Error: {response.status_code} - {response.text}"
|
33 |
|
|
|
34 |
def query_image(payload):
|
35 |
huggingface_api_key = os.getenv('HUGGINGFACE_API_KEY')
|
36 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
@@ -38,25 +55,28 @@ def query_image(payload):
|
|
38 |
response = requests.post(API_URL, headers=headers, json=payload)
|
39 |
return response.content
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
47 |
return translated_output, creative_output, image
|
48 |
|
49 |
-
|
50 |
iface = gr.Interface(
|
51 |
fn=process_input,
|
52 |
-
inputs=[gr.Textbox(label="
|
|
|
53 |
outputs=[
|
54 |
-
gr.Textbox(label="Translated Text"),
|
55 |
gr.Textbox(label="Creative Text"),
|
56 |
gr.Image(label="Generated Image")
|
57 |
],
|
58 |
-
title="TRANSART🎨
|
59 |
-
description="
|
60 |
)
|
61 |
|
62 |
iface.launch()
|
|
|
5 |
from transformers import MarianMTModel, MarianTokenizer
|
6 |
import os
|
7 |
|
8 |
+
# Model and language settings
|
9 |
model_name = "Helsinki-NLP/opus-mt-mul-en"
|
10 |
model = MarianMTModel.from_pretrained(model_name)
|
11 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
12 |
|
13 |
+
# Define a function for language translation
|
14 |
+
def translate_text(input_text, language):
|
15 |
+
# Dictionary mapping the selected language to the respective language tag
|
16 |
+
language_map = {
|
17 |
+
"Tamil": "ta",
|
18 |
+
"French": "fr",
|
19 |
+
"Hindi": "hi",
|
20 |
+
"German": "de"
|
21 |
+
}
|
22 |
+
|
23 |
+
# Prepare input with the appropriate language code for the model
|
24 |
+
lang_prefix = f">>{language_map[language]}<< " # Add the prefix needed for the model
|
25 |
+
text_with_lang = lang_prefix + input_text
|
26 |
+
inputs = tokenizer(text_with_lang, return_tensors="pt", padding=True)
|
27 |
+
|
28 |
+
# Perform translation
|
29 |
translated_tokens = model.generate(**inputs)
|
30 |
translation = tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
|
31 |
return translation
|
32 |
|
33 |
+
# Function to query the Gemini API for creative text generation
|
34 |
def query_gemini_api(translated_text, gemini_api_key):
|
35 |
url = "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-latest:generateContent"
|
36 |
headers = {"Content-Type": "application/json"}
|
|
|
47 |
else:
|
48 |
return f"Error: {response.status_code} - {response.text}"
|
49 |
|
50 |
+
# Function to query the image generation API
|
51 |
def query_image(payload):
|
52 |
huggingface_api_key = os.getenv('HUGGINGFACE_API_KEY')
|
53 |
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
|
|
|
55 |
response = requests.post(API_URL, headers=headers, json=payload)
|
56 |
return response.content
|
57 |
|
58 |
+
# Function to process the entire input
|
59 |
+
def process_input(input_text, language):
|
60 |
+
gemini_api_key = os.getenv('GEMINI_API_KEY') # Use your Gemini API key
|
61 |
+
translated_output = translate_text(input_text, language) # Translate text from selected language to English
|
62 |
+
creative_output = query_gemini_api(translated_output, gemini_api_key) # Generate creative text
|
63 |
+
image_bytes = query_image({"inputs": translated_output}) # Generate an image based on the translated text
|
64 |
+
image = Image.open(io.BytesIO(image_bytes)) # Convert bytes to image
|
65 |
+
|
66 |
return translated_output, creative_output, image
|
67 |
|
68 |
+
# Create Gradio Interface
|
69 |
iface = gr.Interface(
|
70 |
fn=process_input,
|
71 |
+
inputs=[gr.Textbox(label="Enter text in the selected language"),
|
72 |
+
gr.Dropdown(choices=["Tamil", "French", "Hindi", "German"], label="Select Source Language")],
|
73 |
outputs=[
|
74 |
+
gr.Textbox(label="Translated Text (English)"),
|
75 |
gr.Textbox(label="Creative Text"),
|
76 |
gr.Image(label="Generated Image")
|
77 |
],
|
78 |
+
title="TRANSART 🎨",
|
79 |
+
description="Select a language, enter text, and get the English translation along with creative content and image."
|
80 |
)
|
81 |
|
82 |
iface.launch()
|