Spaces:
Sleeping
Sleeping
made an arangment of the device
Browse files
app.py
CHANGED
|
@@ -5,6 +5,9 @@ import torch
|
|
| 5 |
tokenizer = AutoTokenizer.from_pretrained("nebiyu29/fintunned-v2-roberta_GA")
|
| 6 |
model = AutoModelForSequenceClassification.from_pretrained("nebiyu29/fintunned-v2-roberta_GA")
|
| 7 |
|
|
|
|
|
|
|
|
|
|
| 8 |
# Define a function to split a text into segments of 512 tokens
|
| 9 |
def split_text(text):
|
| 10 |
# Tokenize the text
|
|
@@ -47,10 +50,6 @@ def classify_text(text):
|
|
| 47 |
# Initialize empty list for predictions
|
| 48 |
predictions = []
|
| 49 |
|
| 50 |
-
# Move device to GPU if available
|
| 51 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 52 |
-
model = model.to(device)
|
| 53 |
-
|
| 54 |
# Loop through segments, process, and store predictions
|
| 55 |
for segment in segments:
|
| 56 |
inputs = tokenizer([segment], padding=True, return_tensors="pt")
|
|
|
|
| 5 |
tokenizer = AutoTokenizer.from_pretrained("nebiyu29/fintunned-v2-roberta_GA")
|
| 6 |
model = AutoModelForSequenceClassification.from_pretrained("nebiyu29/fintunned-v2-roberta_GA")
|
| 7 |
|
| 8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 9 |
+
model = model.to(device)
|
| 10 |
+
|
| 11 |
# Define a function to split a text into segments of 512 tokens
|
| 12 |
def split_text(text):
|
| 13 |
# Tokenize the text
|
|
|
|
| 50 |
# Initialize empty list for predictions
|
| 51 |
predictions = []
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
# Loop through segments, process, and store predictions
|
| 54 |
for segment in segments:
|
| 55 |
inputs = tokenizer([segment], padding=True, return_tensors="pt")
|