Update main.py
Browse files
main.py
CHANGED
@@ -17,12 +17,12 @@ class ContentRequest(BaseModel):
|
|
17 |
# Humanize的Prompt
|
18 |
def generate_humanized_content(content: str) -> str:
|
19 |
prompt = f"""
|
20 |
-
Rewrite the following text to make it sound more human, engaging, and relatable:
|
21 |
-
|
22 |
Rewrite:
|
23 |
"""
|
24 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
25 |
-
output = model.generate(inputs["input_ids"], max_length=
|
26 |
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
27 |
print("Model output:", decoded_output) # 打印模型输出
|
28 |
return decoded_output
|
|
|
17 |
# Humanize的Prompt
|
18 |
def generate_humanized_content(content: str) -> str:
|
19 |
prompt = f"""
|
20 |
+
Rewrite the following text to make it sound more human, engaging, and relatable. Ensure the entire text is rewritten:
|
21 |
+
{content}
|
22 |
Rewrite:
|
23 |
"""
|
24 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
25 |
+
output = model.generate(inputs["input_ids"], max_length=1024, num_beams=1, do_sample=True, temperature=0.7, top_k=50, early_stopping=True)
|
26 |
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
27 |
print("Model output:", decoded_output) # 打印模型输出
|
28 |
return decoded_output
|