Emmanuel Frimpong Asante commited on
Commit
287eae5
·
1 Parent(s): 218f351

update space

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import tensorflow as tf
3
  from keras.models import load_model
@@ -9,6 +10,7 @@ from pymongo import MongoClient
9
  from transformers import AutoModelForCausalLM, AutoTokenizer
10
  from concurrent.futures import ThreadPoolExecutor
11
  import torch
 
12
 
13
  # Ensure the Hugging Face token is set
14
  tok = os.environ.get('HF_Token')
@@ -160,6 +162,8 @@ print("Loading Llama 3.2 model and tokenizer...")
160
  model_name = "meta-llama/Llama-3.2-1B"
161
  tokenizer = AutoTokenizer.from_pretrained(model_name)
162
  try:
 
 
163
  model = AutoModelForCausalLM.from_pretrained(model_name).to('cuda' if torch.cuda.is_available() else 'cpu')
164
  print("Llama 3.2 model and tokenizer loaded successfully.")
165
  except Exception as e:
 
1
+
2
  import os
3
  import tensorflow as tf
4
  from keras.models import load_model
 
10
  from transformers import AutoModelForCausalLM, AutoTokenizer
11
  from concurrent.futures import ThreadPoolExecutor
12
  import torch
13
+ import random
14
 
15
  # Ensure the Hugging Face token is set
16
  tok = os.environ.get('HF_Token')
 
162
  model_name = "meta-llama/Llama-3.2-1B"
163
  tokenizer = AutoTokenizer.from_pretrained(model_name)
164
  try:
165
+ # Use a random seed to avoid potential CUDA random_device error
166
+ torch.manual_seed(random.randint(0, 10000))
167
  model = AutoModelForCausalLM.from_pretrained(model_name).to('cuda' if torch.cuda.is_available() else 'cpu')
168
  print("Llama 3.2 model and tokenizer loaded successfully.")
169
  except Exception as e: