openfree commited on
Commit
5bd1a65
·
verified ·
1 Parent(s): 3081cf0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -13
app.py CHANGED
@@ -13,28 +13,61 @@ from transformers import pipeline as hf_pipeline
13
  ##############################################################################
14
  # 1) ZeroGPU 환경 처리 + device, dtype 설정
15
  ##############################################################################
16
- # ZeroGPU 환경이면 무조건 CPU 사용, 아니면 cuda 가용 여부에 따라 결정
17
- if os.getenv("ZERO_GPU"):
18
- device = "cpu"
19
- else:
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
21
 
22
  # GPU일 때만 bfloat16, 그 외에는 float32
23
  dtype = torch.bfloat16 if device == "cuda" else torch.float32
24
 
 
 
25
  ##############################################################################
26
  # 2) 모델 로드: 번역 모델, DiffusionPipeline
27
  ##############################################################################
28
- translator = hf_pipeline(
29
- "translation",
30
- model="Helsinki-NLP/opus-mt-ko-en",
31
- device=0 if device == "cuda" else -1
32
- )
 
33
 
34
- pipe = DiffusionPipeline.from_pretrained(
35
- "black-forest-labs/FLUX.1-schnell",
36
- torch_dtype=dtype
37
- ).to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  MAX_SEED = np.iinfo(np.int32).max
40
  MAX_IMAGE_SIZE = 2048
 
13
  ##############################################################################
14
  # 1) ZeroGPU 환경 처리 + device, dtype 설정
15
  ##############################################################################
16
+ # ZeroGPU 초기화 시도
17
+ try:
18
+ import zerogpu
19
+ zerogpu.init()
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
+ except ImportError:
22
+ # ZeroGPU가 설치되지 않은 경우
23
+ if os.getenv("ZERO_GPU"):
24
+ print("ZeroGPU environment variable is set but zerogpu package is not installed.")
25
+ device = "cpu"
26
+ else:
27
+ device = "cuda" if torch.cuda.is_available() else "cpu"
28
+ except Exception as e:
29
+ print(f"Error initializing ZeroGPU: {e}")
30
+ device = "cpu"
31
 
32
  # GPU일 때만 bfloat16, 그 외에는 float32
33
  dtype = torch.bfloat16 if device == "cuda" else torch.float32
34
 
35
+ print(f"Using device: {device}, dtype: {dtype}")
36
+
37
  ##############################################################################
38
  # 2) 모델 로드: 번역 모델, DiffusionPipeline
39
  ##############################################################################
40
+ try:
41
+ translator = hf_pipeline(
42
+ "translation",
43
+ model="Helsinki-NLP/opus-mt-ko-en",
44
+ device=0 if device == "cuda" else -1
45
+ )
46
 
47
+ pipe = DiffusionPipeline.from_pretrained(
48
+ "black-forest-labs/FLUX.1-schnell",
49
+ torch_dtype=dtype
50
+ ).to(device)
51
+
52
+ print("Models loaded successfully")
53
+ except Exception as e:
54
+ print(f"Error loading models: {e}")
55
+ # 모델 로드 에러 처리를 위한 더미 함수들
56
+ def dummy_translator(text):
57
+ return [{'translation_text': text}]
58
+
59
+ class DummyPipe:
60
+ def __call__(self, **kwargs):
61
+ from PIL import Image
62
+ import numpy as np
63
+ dummy_img = Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))
64
+ class DummyResult:
65
+ def __init__(self, img):
66
+ self.images = [img]
67
+ return DummyResult(dummy_img)
68
+
69
+ translator = dummy_translator
70
+ pipe = DummyPipe()
71
 
72
  MAX_SEED = np.iinfo(np.int32).max
73
  MAX_IMAGE_SIZE = 2048