File size: 2,079 Bytes
594459e
 
 
 
16e3929
 
 
bbf76f2
16e3929
0e21d45
16e3929
893ae9d
 
 
334dd01
893ae9d
0e21d45
893ae9d
 
0e21d45
893ae9d
 
 
 
334dd01
893ae9d
0e21d45
893ae9d
 
0e21d45
893ae9d
 
 
 
334dd01
893ae9d
0e21d45
893ae9d
 
0e21d45
893ae9d
 
 
 
334dd01
893ae9d
345d08b
0e21d45
893ae9d
 
594459e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from diffusers import DiffusionPipeline as Pipe
import torch

class Generador:
    def img_to_bytes(image) -> bytes:
        import io
        _imgByteArr = io.BytesIO()
        image.save(_imgByteArr, format="png")
        return _imgByteArr.getvalue()
    def using_runway_sd_15(prompt:str)->bytes:
        try:            
            _generador = Pipe.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
            _generador.to("cuda")
            _imagen = _generador(prompt).images[0]
            _response = bytes(Generador.img_to_bytes(image=_imagen))
        except Exception as e:
            _response = bytes(str(e), 'utf-8')
        finally:
            return _response
    def using_stability_sd_21(prompt:str)->bytes:
        try:
            _generador = Pipe.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16)
            _generador.to("cuda")
            _imagen = _generador(prompt).images[0]
            _response = bytes(Generador.img_to_bytes(image=_imagen))
        except Exception as e:
            _response = bytes(str(e), 'utf-8')
        finally:
            return _response
    def using_realistic_v14(prompt:str)->bytes:
        try:
            _generador = Pipe.from_pretrained("SG161222/Realistic_Vision_V1.4", torch_dtype=torch.float16)
            _generador.to("cuda")
            _imagen = _generador(prompt).images[0]
            _response = bytes(Generador.img_to_bytes(image=_imagen))
        except Exception as e:
            _response = bytes(str(e), 'utf-8')
        finally:
            return _response
    def using_prompthero_openjourney(prompt:str)->bytes:
        try:
            _generador = Pipe.from_pretrained("prompthero/openjourney", torch_dtype=torch.float16)
            _generador.to("cuda")
            _imagen = _generador(prompt).images[0]
            _response = bytes(Generador.img_to_bytes(image=_imagen))
        except Exception as e:
            print(e)
            _response = bytes(str(e), 'utf-8')
        finally:
            return _response