MatteoScript commited on
Commit
1b97fe5
·
verified ·
1 Parent(s): 4e47f5d

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +11 -7
main.py CHANGED
@@ -47,11 +47,18 @@ class PostSpazio(BaseModel):
47
 
48
  #--------------------------------------------------- Generazione TESTO ------------------------------------------------------
49
  @app.post("/Genera")
50
- def read_root(request: Request, input_data: InputData):
51
  temperature = input_data.temperature
52
  max_new_tokens = input_data.max_new_tokens
53
  top_p = input_data.top_p
54
  repetition_penalty = input_data.repetition_penalty
 
 
 
 
 
 
 
55
  if input_data.instruction.startswith("http"):
56
  try:
57
  resp = requests.get(input_data.instruction)
@@ -60,7 +67,7 @@ def read_root(request: Request, input_data: InputData):
60
  except requests.exceptions.RequestException as e:
61
  input_data.instruction = ""
62
  history = []
63
- if input_data.systemRole != "" or input_data.systemStyle != "" or input_data.instruction != ""
64
  input_text = f'''
65
  {{
66
  "input": {{
@@ -82,10 +89,7 @@ def read_root(request: Request, input_data: InputData):
82
  '''
83
  else:
84
  input_text = input_data.input
85
- print(f"{datetime.now()} - Input Text: {input_text}")
86
- generated_response = generate(input_text, history, temperature, max_new_tokens, top_p, repetition_penalty)
87
- print(f"{datetime.now()} - Response Text: {generated_response}")
88
- return {"response": generated_response}
89
 
90
  def generate(prompt, history, temperature=0.7, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
91
  temperature = float(temperature)
@@ -115,7 +119,7 @@ def format_prompt(message, history):
115
 
116
  #--------------------------------------------------- Generazione TESTO ASYNC ------------------------------------------------------
117
  @app.post("/GeneraAsync")
118
- def read_rootAsync(request: Request, input_data: InputDataAsync):
119
  print(input_data.input)
120
  result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
121
  return {"response": result_data}
 
47
 
48
  #--------------------------------------------------- Generazione TESTO ------------------------------------------------------
49
  @app.post("/Genera")
50
+ def generate_text(request: Request, input_data: InputData):
51
  temperature = input_data.temperature
52
  max_new_tokens = input_data.max_new_tokens
53
  top_p = input_data.top_p
54
  repetition_penalty = input_data.repetition_penalty
55
+ input_text = generate_input_text(input_data)
56
+ print(f"{datetime.now()} - Input Text: {input_text}")
57
+ generated_response = generate(input_text, history, temperature, max_new_tokens, top_p, repetition_penalty)
58
+ print(f"{datetime.now()} - Response Text: {generated_response}")
59
+ return {"response": generated_response}
60
+
61
+ def generate_input_text(input_data):
62
  if input_data.instruction.startswith("http"):
63
  try:
64
  resp = requests.get(input_data.instruction)
 
67
  except requests.exceptions.RequestException as e:
68
  input_data.instruction = ""
69
  history = []
70
+ if input_data.systemRole != "" or input_data.systemStyle != "" or input_data.instruction != "":
71
  input_text = f'''
72
  {{
73
  "input": {{
 
89
  '''
90
  else:
91
  input_text = input_data.input
92
+ return input_text
 
 
 
93
 
94
  def generate(prompt, history, temperature=0.7, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
95
  temperature = float(temperature)
 
119
 
120
  #--------------------------------------------------- Generazione TESTO ASYNC ------------------------------------------------------
121
  @app.post("/GeneraAsync")
122
+ def generate_textAsync(request: Request, input_data: InputDataAsync):
123
  print(input_data.input)
124
  result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
125
  return {"response": result_data}