Spaces:
Sleeping
Sleeping
revert
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ model = None
|
|
13 |
@spaces.GPU
|
14 |
def load_model():
|
15 |
global tokenizer, model
|
16 |
-
if model is None
|
17 |
print("Model yükleniyor...")
|
18 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
19 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -21,27 +21,22 @@ def load_model():
|
|
21 |
)
|
22 |
print("Model yüklendi.")
|
23 |
|
24 |
-
#
|
25 |
load_model()
|
26 |
|
27 |
@spaces.GPU
|
28 |
def generate(prompt):
|
29 |
global tokenizer, model
|
30 |
-
if model is None or tokenizer is None: # Model yüklenmemişse yeniden yükle
|
31 |
-
print("UYARI: Model yüklenmemiş! Tekrar yükleniyor...")
|
32 |
-
load_model()
|
33 |
-
|
34 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
35 |
output = model.generate(**inputs, max_length=500)
|
36 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
37 |
return response
|
38 |
|
39 |
-
# Gradio UI
|
40 |
demo = gr.Interface(
|
41 |
fn=generate,
|
42 |
inputs=gr.Textbox(placeholder="Enter prompt..."),
|
43 |
outputs=gr.Textbox(),
|
44 |
)
|
45 |
|
46 |
-
|
47 |
-
demo.launch(server_name="0.0.0.0", server_port=7860, enable_api=True, share=True)
|
|
|
13 |
@spaces.GPU
|
14 |
def load_model():
|
15 |
global tokenizer, model
|
16 |
+
if model is None:
|
17 |
print("Model yükleniyor...")
|
18 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
19 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
21 |
)
|
22 |
print("Model yüklendi.")
|
23 |
|
24 |
+
# İlk başlatmada modeli yükle
|
25 |
load_model()
|
26 |
|
27 |
@spaces.GPU
|
28 |
def generate(prompt):
|
29 |
global tokenizer, model
|
|
|
|
|
|
|
|
|
30 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
31 |
output = model.generate(**inputs, max_length=500)
|
32 |
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
33 |
return response
|
34 |
|
35 |
+
# Gradio UI
|
36 |
demo = gr.Interface(
|
37 |
fn=generate,
|
38 |
inputs=gr.Textbox(placeholder="Enter prompt..."),
|
39 |
outputs=gr.Textbox(),
|
40 |
)
|
41 |
|
42 |
+
demo.launch()
|
|