from fastapi import FastAPI from pydantic import BaseModel from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # 创建FastAPI实例 app = FastAPI() # 加载T5模型和Tokenizer model_name = "danibor/flan-t5-base-humanizer" model = AutoModelForSeq2SeqLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # 定义输入数据的结构 class ContentRequest(BaseModel): content: str # Humanize的Prompt def generate_humanized_content(content: str) -> str: prompt = f""" change the following text to sound like a warm, engaging blog post written by a passionate human. Use vivid imagery, personal anecdotes, and conversational language. Ensure the entire text is rewritten: {content} Rewrite: """ inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) output = model.generate(inputs["input_ids"], max_length=512, num_beams=4, do_sample=True, temperature=0.7, no_repeat_ngram_size=3, early_stopping=True) decoded_output = tokenizer.decode(output[0], skip_special_tokens=True) print("Model output:", decoded_output) # 打印模型输出 return decoded_output # API端点,接收内容并返回“人性化”后的文本 @app.post("/humanize/") async def humanize_content(request: ContentRequest): humanized_content = generate_humanized_content(request.content) return {"humanized_content": humanized_content} # 启动FastAPI应用时,使用`uvicorn main:app --reload`