File size: 1,495 Bytes
03c87c4
 
c7ecc83
03c87c4
 
 
 
 
c7ecc83
 
 
03c87c4
 
 
 
 
 
 
7a2af76
 
 
 
 
71d86ad
03c87c4
7827173
6352e06
4d3a726
6352e06
03c87c4
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# 创建FastAPI实例
app = FastAPI()

# 加载T5模型和Tokenizer
model_name = "danibor/flan-t5-base-humanizer"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# 定义输入数据的结构
class ContentRequest(BaseModel):
    content: str

# Humanize的Prompt
def generate_humanized_content(content: str) -> str:
    prompt = f"""
    change the following text to sound like a warm, engaging blog post written by a passionate human. 
    Use vivid imagery, personal anecdotes, and conversational language. Ensure the entire text is rewritten:
    {content}
    Rewrite:
    """
    inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
    output = model.generate(inputs["input_ids"], max_length=512, num_beams=4, do_sample=True, temperature=0.7, no_repeat_ngram_size=3, early_stopping=True)
    decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
    print("Model output:", decoded_output)  # 打印模型输出
    return decoded_output

# API端点,接收内容并返回“人性化”后的文本
@app.post("/humanize/")
async def humanize_content(request: ContentRequest):
    humanized_content = generate_humanized_content(request.content)
    return {"humanized_content": humanized_content}

# 启动FastAPI应用时,使用`uvicorn main:app --reload`