MatteoScript commited on
Commit
d9d21f7
·
verified ·
1 Parent(s): a199841

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +41 -448
main.py CHANGED
@@ -1,450 +1,43 @@
1
- from fastapi import FastAPI, Request
2
- from fastapi.middleware.cors import CORSMiddleware # Importa il middleware CORS
3
- from pydantic import BaseModel
4
- from huggingface_hub import InferenceClient
5
- from datetime import datetime
6
- from gradio_client import Client
7
- import base64
8
- import requests
9
- import os
10
- import socket
11
- import time
12
- from enum import Enum
13
- import random
14
- import aiohttp
15
- import asyncio
16
- import json
17
-
18
- #--------------------------------------------------- Definizione Server FAST API ------------------------------------------------------
19
- app = FastAPI()
20
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
21
-
22
- app.add_middleware(
23
- CORSMiddleware,
24
- allow_origins=["*"],
25
- allow_credentials=True,
26
- allow_methods=["*"],
27
- allow_headers=["*"],
28
  )
29
 
30
- class InputData(BaseModel):
31
- input: str
32
- systemRole: str = ''
33
- systemStyle: str = ''
34
- instruction: str = ''
35
- temperature: float = 0.7
36
- max_new_tokens: int = 2000
37
- top_p: float = 0.95
38
- repetition_penalty: float = 1.0
39
- asincrono: bool = False
40
- NumeroGenerazioni: int = 1
41
- StringaSplit: str = '********'
42
- NumeroCaratteriSplitInstruction: int = 30000
43
- EliminaRisposteNonPertinenti: bool = False
44
- UnificaRispostaPertinente: bool = False
45
-
46
- class InputDataAsync(InputData):
47
- test: str = ''
48
-
49
- class PostSpazio(BaseModel):
50
- nomeSpazio: str
51
- input: str = ''
52
- api_name: str = "/chat"
53
-
54
- def LoggaTesto(log_type, data, serializza=True):
55
- if serializza:
56
- formatted_data = json.dumps(data, indent=2)
57
- else:
58
- formatted_data = data
59
- print(f"\n{datetime.now()}: ---------------------------------------------------------------| {log_type} |--------------------------------------------------------------\n{formatted_data}")
60
-
61
- #--------------------------------------------------- Generazione TESTO ------------------------------------------------------
62
- @app.post("/Genera")
63
- def generate_text(request: Request, input_data: InputData):
64
- if not input_data.asincrono:
65
- temperature = input_data.temperature
66
- max_new_tokens = input_data.max_new_tokens
67
- top_p = input_data.top_p
68
- repetition_penalty = input_data.repetition_penalty
69
- input_text = generate_input_text(input_data)
70
- LoggaTesto("RICHIESTA", input_text, False)
71
- max_new_tokens = min(max_new_tokens, 29500 - len(input_text))
72
- history = []
73
- generated_response = generate(input_text, history, temperature, max_new_tokens, top_p, repetition_penalty)
74
- LoggaTesto("RISPOSTA", {"response": generated_response})
75
- return {"response": generated_response}
76
- else:
77
- input_data.asincrono = False
78
- if input_data.EliminaRisposteNonPertinenti:
79
- msgEliminaRisposteNonPertinenti = " (Rispondi solo sulla base delle ISTRUZIONI che hai ricevuto. se non trovi corrispondenza tra RICHIESTA e ISTRUZIONI rispondi con <NOTFOUND>!!!)"
80
- input_data.input = input_data.input + msgEliminaRisposteNonPertinenti
81
- input_data.systemRole = input_data.systemRole + msgEliminaRisposteNonPertinenti
82
- result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
83
- LoggaTesto("RISPOSTA ASINCRONA", {"response": result_data})
84
- if input_data.EliminaRisposteNonPertinenti:
85
- result_data = [item for item in result_data if "NOTFOUND" not in item["response"]]
86
- if input_data.UnificaRispostaPertinente:
87
- input_data.input= f'''Metti insieme le seguenti risposte. Basati solo su questo TESTO e non AGGIUNGERE ALTRO!!!!: {result_data}'''
88
- input_data.systemRole = ''
89
- input_data.systemStyle = 'Rispondi in ITALIANO'
90
- input_data.instruction =''
91
- result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
92
- LoggaTesto("RISPOSTA ASINCRONA UNIFICATA", {"response": result_data})
93
- return {"response": result_data}
94
-
95
- def generate_input_text(input_data):
96
- if input_data.instruction.startswith("http"):
97
- try:
98
- resp = requests.get(input_data.instruction)
99
- resp.raise_for_status() # Lancia un'eccezione per errori HTTP
100
- input_data.instruction = resp.text
101
- except requests.exceptions.RequestException as e:
102
- input_data.instruction = ""
103
- history = []
104
- if input_data.systemRole != "" or input_data.systemStyle != "" or input_data.instruction != "":
105
- input_text = f'''
106
- {{
107
- "input": {{
108
- "role": "system",
109
- "content": "{input_data.systemRole}",
110
- "style": "{input_data.systemStyle}"
111
- }},
112
- "messages": [
113
- {{
114
- "role": "instructions",
115
- "content": "{input_data.instruction} "("{input_data.systemStyle}")"
116
- }},
117
- {{
118
- "role": "user",
119
- "content": "{input_data.input}"
120
- }}
121
- ]
122
- }}
123
- '''
124
- else:
125
- input_text = input_data.input
126
- return input_text
127
-
128
- def generate(prompt, history, temperature=0.7, max_new_tokens=30000, top_p=0.95, repetition_penalty=1.0):
129
- temperature = float(temperature)
130
- if temperature < 1e-2:
131
- temperature = 1e-2
132
- top_p = float(top_p)
133
- generate_kwargs = dict(
134
- temperature=temperature,
135
- max_new_tokens=max_new_tokens,
136
- top_p=top_p,
137
- repetition_penalty=repetition_penalty,
138
- do_sample=True,
139
- seed=random.randint(0, 10**7),
140
- )
141
- formatted_prompt = format_prompt(prompt, history)
142
- output = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=False)
143
- return output
144
-
145
- def format_prompt(message, history):
146
- prompt = "<s>"
147
- for user_prompt, bot_response in history:
148
- prompt += f"[INST] {user_prompt} [/INST]"
149
- prompt += f" {bot_response}</s> "
150
- now = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
151
- prompt += f"[{now}] [INST] {message} [/INST]"
152
- return prompt
153
-
154
- #--------------------------------------------------- Generazione TESTO ASYNC ------------------------------------------------------
155
- @app.post("/GeneraAsync")
156
- def generate_textAsync(request: Request, input_data: InputDataAsync):
157
- result_data = asyncio.run(GeneraTestoAsync("https://matteoscript-fastapi.hf.space/Genera", input_data))
158
- return {"response": result_data}
159
-
160
- async def make_request(session, token, data, url, index, max_retries=3):
161
- headers = {
162
- 'Content-Type': 'application/json',
163
- 'Authorization': 'Bearer ' + token
164
- }
165
- if (int(index)+1) % 3 == 1:
166
- data['max_new_tokens'] = data['max_new_tokens']
167
- elif (int(index)+1) % 3 == 2:
168
- data['max_new_tokens'] = max(200, data['max_new_tokens'] - 200)
169
- else:
170
- data['max_new_tokens'] = data['max_new_tokens'] + 200
171
- for _ in range(max_retries):
172
- try:
173
- async with session.post(url, headers=headers, json=data) as response:
174
- response.raise_for_status()
175
- try:
176
- result_data = await response.json()
177
- except aiohttp.ContentTypeError:
178
- result_data = await response.text()
179
- return result_data
180
- except (asyncio.TimeoutError, aiohttp.ClientError, requests.exceptions.HTTPError) as e:
181
- LoggaTesto("ERRORE ASYNC", {e})
182
- if isinstance(e, (asyncio.TimeoutError, requests.exceptions.HTTPError)) and e.response.status in [502, 504]:
183
- break
184
-
185
- await asyncio.sleep(1)
186
- raise Exception("Max retries reached or skipping retries. Unable to make the request.")
187
-
188
- async def CreaListaInput(input_data):
189
- if input_data.instruction.startswith("http"):
190
- try:
191
- resp = requests.get(input_data.instruction)
192
- resp.raise_for_status()
193
- input_data.instruction = resp.text
194
- except requests.exceptions.RequestException as e:
195
- input_data.instruction = ""
196
- try:
197
- lista_dizionari = []
198
- nuova_lista_dizionari = []
199
- lista_dizionari = json.loads(input_data.instruction)
200
- if lista_dizionari and "Titolo" in lista_dizionari[0]:
201
- nuova_lista_dizionari = DividiInstructionJSON(lista_dizionari, input_data)
202
- else:
203
- nuova_lista_dizionari = DividiInstructionText(input_data)
204
- except json.JSONDecodeError:
205
- nuova_lista_dizionari = DividiInstructionText(input_data)
206
- return nuova_lista_dizionari
207
-
208
- def split_at_space_or_dot(input_string, length):
209
- delimiters = ['\n\n', '.\n', ';\n', '.', ' ']
210
- positions = [input_string.rfind(d, 0, length) for d in delimiters]
211
- valid_positions = [pos for pos in positions if pos >= 0]
212
- lastpos = max(valid_positions) if valid_positions else length
213
- indice_divisione = int(lastpos)
214
- return indice_divisione + 1
215
-
216
- def DividiInstructionJSON(lista_dizionari, input_data):
217
- ListaInput = []
218
- nuova_lista_dizionari = []
219
- for dizionario in lista_dizionari:
220
- titolo = dizionario["Titolo"]
221
- testo_completo = dizionario["Testo"]
222
- while len(testo_completo) > input_data.NumeroCaratteriSplitInstruction:
223
- indice_divisione = split_at_space_or_dot(testo_completo, input_data.NumeroCaratteriSplitInstruction)
224
- indice_divisione_precedente = split_at_space_or_dot(testo_completo, input_data.NumeroCaratteriSplitInstruction-100)
225
- sottostringa = testo_completo[:indice_divisione].strip()
226
- testo_completo = testo_completo[indice_divisione_precedente:].strip()
227
- nuovo_dizionario = {"Titolo": titolo, "Testo": sottostringa}
228
- nuova_lista_dizionari.append(nuovo_dizionario)
229
-
230
- if len(testo_completo) > 0:
231
- nuovo_dizionario = {"Titolo": titolo, "Testo": testo_completo}
232
- nuova_lista_dizionari.append(nuovo_dizionario)
233
-
234
- input_strings = input_data.input.split(input_data.StringaSplit)
235
- for input_string in input_strings:
236
- for dizionario in nuova_lista_dizionari:
237
- data = {
238
- 'input': input_string,
239
- 'instruction': str(dizionario),
240
- 'temperature': input_data.temperature,
241
- 'max_new_tokens': input_data.max_new_tokens,
242
- 'top_p': input_data.top_p,
243
- 'repetition_penalty': input_data.repetition_penalty,
244
- 'systemRole': input_data.systemRole,
245
- 'systemStyle': input_data.systemStyle
246
- }
247
- ListaInput.append(data)
248
- return ListaInput
249
-
250
- def DividiInstructionText(input_data):
251
- ListaInput = []
252
- input_str = input_data.instruction
253
- StringaSplit = input_data.StringaSplit
254
- sottostringhe = []
255
- indice_inizio = 0
256
- if len(input_str) > input_data.NumeroCaratteriSplitInstruction:
257
- while indice_inizio < len(input_str):
258
- lunghezza_sottostringa = split_at_space_or_dot(input_str[indice_inizio:], input_data.NumeroCaratteriSplitInstruction)
259
- sottostringhe.append(input_str[indice_inizio:indice_inizio + lunghezza_sottostringa].strip())
260
- indice_inizio += lunghezza_sottostringa
261
- else:
262
- sottostringhe.append(input_str)
263
- testoSeparato = StringaSplit.join(sottostringhe)
264
- instruction_strings = testoSeparato.split(StringaSplit)
265
- input_strings = input_data.input.split(input_data.StringaSplit)
266
- for input_string in input_strings:
267
- for instruction_string in instruction_strings:
268
- data = {
269
- 'input': input_string.strip(),
270
- 'instruction': str([instruction_string.strip()]),
271
- 'temperature': input_data.temperature,
272
- 'max_new_tokens': input_data.max_new_tokens,
273
- 'top_p': input_data.top_p,
274
- 'repetition_penalty': input_data.repetition_penalty,
275
- 'systemRole': input_data.systemRole,
276
- 'systemStyle': input_data.systemStyle
277
- }
278
- ListaInput.append(data)
279
- return ListaInput
280
-
281
- async def GeneraTestoAsync(url, input_data):
282
- token = os.getenv('TOKEN')
283
- async with aiohttp.ClientSession() as session:
284
- tasks = []
285
- ListaInput = await CreaListaInput(input_data)
286
- for data in ListaInput:
287
- LoggaTesto("RICHIESTA ASINCRONA", data)
288
- tasks.extend([make_request(session, token, data, url, index) for index in range(input_data.NumeroGenerazioni)])
289
- return await asyncio.gather(*tasks)
290
-
291
-
292
- #--------------------------------------------------- Generazione IMMAGINE ------------------------------------------------------
293
- style_image = {
294
- "PROFESSIONAL-PHOTO": {
295
- "descrizione": "Professional photo {prompt} . Vivid colors, Mirrorless, 35mm lens, f/1.8 aperture, ISO 100, natural daylight",
296
- "negativePrompt": "out of frame, lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
297
- },
298
- "CINEMATIC-PHOTO": {
299
- "descrizione": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
300
- "negativePrompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly"
301
- },
302
- "CINEMATIC-PORTRAIT": {
303
- "descrizione": "cinematic portrait {prompt} 8k, ultra realistic, good vibes, vibrant",
304
- "negativePrompt": "drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly"
305
- },
306
- "LINE-ART-DRAWING": {
307
- "descrizione": "line art drawing {prompt} . professional, sleek, modern, minimalist, graphic, line art, vector graphics",
308
- "negativePrompt": "anime, photorealistic, 35mm film, deformed, glitch, blurry, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, mutated, realism, realistic, impressionism, expressionism, oil, acrylic"
309
- },
310
- "COMIC": {
311
- "descrizione": "comic {prompt} . graphic illustration, comic art, graphic novel art, vibrant, highly detailed",
312
- "negativePrompt": "photograph, deformed, glitch, noisy, realistic, stock photo"
313
- },
314
- "ADVERTISING-POSTER-STYLE": {
315
- "descrizione": "advertising poster style {prompt} . Professional, modern, product-focused, commercial, eye-catching, highly detailed",
316
- "negativePrompt": "noisy, blurry, amateurish, sloppy, unattractive"
317
- },
318
- "RETAIL-PACKAGING-STYLE": {
319
- "descrizione": "retail packaging style {prompt} . vibrant, enticing, commercial, product-focused, eye-catching, professional, highly detailed",
320
- "negativePrompt": "noisy, blurry, amateurish, sloppy, unattractive"
321
- },
322
- "GRAFFITI-STYLE": {
323
- "descrizione": "graffiti style {prompt} . street art, vibrant, urban, detailed, tag, mural",
324
- "negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic"
325
- },
326
- "POP-ART-STYLE": {
327
- "descrizione": "pop Art style {prompt} . bright colors, bold outlines, popular culture themes, ironic or kitsch",
328
- "negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, minimalist"
329
- },
330
- "ISOMETRIC-STYLE": {
331
- "descrizione": "isometric style {prompt} . vibrant, beautiful, crisp, detailed, ultra detailed, intricate",
332
- "negativePrompt": "deformed, mutated, ugly, disfigured, blur, blurry, noise, noisy, realistic, photographic"
333
- },
334
- "LOW-POLY-STYLE": {
335
- "descrizione": "low-poly style {prompt}. ambient occlusion, low-poly game art, polygon mesh, jagged, blocky, wireframe edges, centered composition",
336
- "negativePrompt": "noisy, sloppy, messy, grainy, highly detailed, ultra textured, photo"
337
- },
338
- "CLAYMATION-STYLE": {
339
- "descrizione": "claymation style {prompt} . sculpture, clay art, centered composition, play-doh",
340
- "negativePrompt": ""
341
- },
342
- "PROFESSIONAL-3D-MODEL": {
343
- "descrizione": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
344
- "negativePrompt": "ugly, deformed, noisy, low poly, blurry, painting"
345
- },
346
- "ANIME-ARTWORK": {
347
- "descrizione": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
348
- "negativePrompt": "photo, deformed, black and white, realism, disfigured, low contrast"
349
- },
350
- "ETHEREAL-FANTASY-CONCEPT-ART": {
351
- "descrizione": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
352
- "negativePrompt": "photographic, realistic, realism, 35mm film, dslr, cropped, frame, text, deformed, glitch, noise, noisy, off-center, deformed, cross-eyed, closed eyes, bad anatomy, ugly, disfigured, sloppy, duplicate, mutated, black and white"
353
- },
354
- "CYBERNETIC-STYLE": {
355
- "descrizione": "cybernetic style {prompt} . futuristic, technological, cybernetic enhancements, robotics, artificial intelligence themes",
356
- "negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, historical, medieval"
357
- },
358
- "FUTURISTIC-STYLE": {
359
- "descrizione": "futuristic style {prompt} . sleek, modern, ultramodern, high tech, detailed",
360
- "negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, vintage, antique"
361
- },
362
- "SCI-FI-STYLE": {
363
- "descrizione": "sci-fi style {prompt} . futuristic, technological, alien worlds, space themes, advanced civilizations",
364
- "negativePrompt": "ugly, deformed, noisy, blurry, low contrast, realism, photorealistic, historical, medieval"
365
- },
366
- "DIGITAL-ART": {
367
- "descrizione": "Digital Art {prompt} . vibrant, cute, digital, handmade",
368
- "negativePrompt": ""
369
- },
370
- "SIMPLE-LOGO": {
371
- "descrizione": "Minimalist Logo {prompt} . material design, primary colors, stylized, minimalist",
372
- "negativePrompt": "3D, high detail, noise, grainy, blurry, painting, drawing, photo, disfigured"
373
- },
374
- "MINIMALISTIC-LOGO": {
375
- "descrizione": "Ultra-minimalist Material Design logo for a BRAND: {prompt} . simple, few colors, clean lines, minimal details, modern color palette, no shadows",
376
- "negativePrompt": "3D, high detail, noise, grainy, blurry, painting, drawing, photo, disfigured"
377
- }
378
- }
379
-
380
- class InputImage(BaseModel):
381
- input: str
382
- negativePrompt: str = ''
383
- style: str = ''
384
- steps: int = 25
385
- cfg: int = 6
386
- seed: int = -1
387
-
388
- @app.post("/Immagine")
389
- def generate_image(request: Request, input_data: InputImage):
390
- client = Client("https://manjushri-sdxl-1-0.hf.space/")
391
-
392
- if input_data.style:
393
- print(input_data.style)
394
- if input_data.style == 'RANDOM':
395
- random_style = random.choice(list(style_image.keys()))
396
- style_info = style_image[random_style]
397
- input_data.input = style_info["descrizione"].format(prompt=input_data.input)
398
- input_data.negativePrompt = style_info["negativePrompt"]
399
- elif input_data.style in style_image:
400
- style_info = style_image[input_data.style]
401
- input_data.input = style_info["descrizione"].format(prompt=input_data.input)
402
- input_data.negativePrompt = style_info["negativePrompt"]
403
- max_attempts = 2
404
- attempt = 0
405
- while attempt < max_attempts:
406
- try:
407
- result = client.predict(
408
- input_data.input, # str in 'What you want the AI to generate. 77 Token Limit. A Token is Any Word, Number, Symbol, or Punctuation. Everything Over 77 Will Be Truncated!' Textbox component
409
- input_data.negativePrompt, # str in 'What you Do Not want the AI to generate. 77 Token Limit' Textbox component
410
- 1024, # int | float (numeric value between 512 and 1024) in 'Height' Slider component
411
- 1024, # int | float (numeric value between 512 and 1024) in 'Width' Slider component
412
- input_data.cfg, # int | float (numeric value between 1 and 15) in 'Guidance Scale: How Closely the AI follows the Prompt' Slider component
413
- input_data.steps, # int | float (numeric value between 25 and 100) in 'Number of Iterations' Slider component
414
- 0, # int | float (numeric value between 0 and 999999999999999999) in 'Seed: 0 is Random' Slider component
415
- "Yes", # str in 'Upscale?' Radio component
416
- "", # str in 'Embedded Prompt' Textbox component
417
- "", # str in 'Embedded Negative Prompt' Textbox component
418
- 0.99, # int | float (numeric value between 0.7 and 0.99) in 'Refiner Denoise Start %' Slider component
419
- 100, # int | float (numeric value between 1 and 100) in 'Refiner Number of Iterations %' Slider component
420
- api_name="/predict"
421
- )
422
- image_url = result[0]
423
- print(image_url)
424
- with open(image_url, 'rb') as img_file:
425
- img_binary = img_file.read()
426
- img_base64 = base64.b64encode(img_binary).decode('utf-8')
427
- return {"response": img_base64}
428
- except requests.exceptions.HTTPError as e:
429
- time.sleep(1)
430
- attempt += 1
431
- if attempt < max_attempts:
432
- continue
433
- else:
434
- return {"error": "Errore interno del server persistente"}
435
- return {"error": "Numero massimo di tentativi raggiunto"}
436
-
437
-
438
- #--------------------------------------------------- API PostSpazio ------------------------------------------------------
439
- @app.post("/PostSpazio")
440
- def generate_postspazio(request: Request, input_data: PostSpazio):
441
- client = Client(input_data.nomeSpazio)
442
- result = client.predict(
443
- input_data.input,
444
- api_name=input_data.api_name
445
- )
446
- return {"response": result}
447
-
448
- @app.get("/")
449
- def read_general():
450
- return {"response": "Benvenuto. Per maggiori info: https://matteoscript-fastapi.hf.space/docs"}
 
1
+ from contextlib import asynccontextmanager
2
+ from http import HTTPStatus
3
+ from telegram import Update
4
+ from telegram.ext import Application, CommandHandler
5
+ from telegram.ext._contexttypes import ContextTypes
6
+ from fastapi import FastAPI, Request, Response
7
+
8
+ # Initialize python telegram bot
9
+ ptb = (
10
+ Application.builder()
11
+ .updater(None)
12
+ .token('6770617809:AAEhytQUOl3uZOFINVE7-o0KkIoAz8perGU') # replace <your-bot-token>
13
+ .read_timeout(7)
14
+ .get_updates_read_timeout(42)
15
+ .build()
 
 
 
 
 
 
 
 
 
 
 
 
16
  )
17
 
18
+ @asynccontextmanager
19
+ async def lifespan(_: FastAPI):
20
+ await ptb.bot.setWebhook('https://matteoscript-telegrambot.hf.space/') # replace <your-webhook-url>
21
+ async with ptb:
22
+ await ptb.start()
23
+ yield
24
+ await ptb.stop()
25
+
26
+ # Initialize FastAPI app (similar to Flask)
27
+ app = FastAPI(lifespan=lifespan)
28
+
29
+ @app.post("/")
30
+ async def process_update(request: Request):
31
+ print('entrato')
32
+ req = await request.json()
33
+ print(req)
34
+ update = Update.de_json(req, ptb.bot)
35
+ await ptb.process_update(update)
36
+ return Response(status_code=HTTPStatus.OK)
37
+
38
+ # Example handler
39
+ async def start(update, _: ContextTypes.DEFAULT_TYPE):
40
+ """Send a message when the command /start is issued."""
41
+ await update.message.reply_text("starting...")
42
+
43
+ ptb.add_handler(CommandHandler("start", start))