Spaces:
Sleeping
Sleeping
File size: 1,651 Bytes
86d39de 3f40561 e7dcd52 6b27907 e7dcd52 3f40561 6b27907 3f40561 86d39de 3f40561 bf1807a 6b27907 bf1807a 6b27907 bf1807a 86d39de 3f40561 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
from transformers import AutoTokenizer, AutoModel
import torch
from gradio_client import Client
from functools import lru_cache
# Cache the model and tokenizer using lru_cache
@lru_cache(maxsize=1)
def load_model_and_tokenizer():
model_name = "./all-MiniLM-L6-v2" # Replace with your Space and model path
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
return tokenizer, model
# Load the model and tokenizer
tokenizer, model = load_model_and_tokenizer()
# Function to detect context (simplified)
def detect_context(input_text):
# Tokenize the input text
inputs = tokenizer([input_text], padding=True, truncation=True, return_tensors="pt")
# Run the model
with torch.no_grad():
outputs = model(**inputs)
# Get the embedding (mean pooling)
input_embedding = outputs.last_hidden_state.mean(dim=1).numpy()
# For now, return a placeholder context
# You can replace this with a more sophisticated logic if needed
return ["general"]
# Translation client
translation_client = Client("Frenchizer/space_3")
def translate_text(input_text):
return translation_client.predict(input_text)
def process_request(input_text):
context = detect_context(input_text)
print(f"Detected context: {context}")
translation = translate_text(input_text)
return translation
# Gradio interface
interface = gr.Interface(
fn=process_request,
inputs="text",
outputs="text",
title="Frenchizer",
description="Translate text from English to French with context detection."
)
interface.launch() |