Update main.py
Browse files
main.py
CHANGED
@@ -18,15 +18,15 @@ class ContentRequest(BaseModel):
|
|
18 |
def generate_humanized_content(content: str) -> str:
|
19 |
prompt = f"""
|
20 |
Example input: "Summer is hot."
|
21 |
-
Example output: "
|
22 |
|
23 |
-
|
24 |
Use vivid imagery, personal anecdotes, and conversational language. Ensure the entire text is rewritten:
|
25 |
{content}
|
26 |
Rewrite:
|
27 |
"""
|
28 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
29 |
-
output = model.generate(inputs["input_ids"], max_length=512, num_beams=
|
30 |
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
31 |
print("Model output:", decoded_output) # 打印模型输出
|
32 |
return decoded_output
|
|
|
18 |
def generate_humanized_content(content: str) -> str:
|
19 |
prompt = f"""
|
20 |
Example input: "Summer is hot."
|
21 |
+
Example output: "Summer is a sizzling season that loves to give us a warm, steamy welcome..."
|
22 |
|
23 |
+
change the following text to sound like a warm, engaging blog post written by a passionate human.
|
24 |
Use vivid imagery, personal anecdotes, and conversational language. Ensure the entire text is rewritten:
|
25 |
{content}
|
26 |
Rewrite:
|
27 |
"""
|
28 |
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
|
29 |
+
output = model.generate(inputs["input_ids"], max_length=512, num_beams=4, do_sample=True, temperature=0.7, early_stopping=True)
|
30 |
decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
|
31 |
print("Model output:", decoded_output) # 打印模型输出
|
32 |
return decoded_output
|