soufyane commited on
Commit
b7a60d3
·
verified ·
1 Parent(s): 0db2352

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -51
app.py CHANGED
@@ -1,53 +1,3 @@
1
- import gradio as gr
2
- import spaces
3
- import torch
4
- import keras
5
- import keras_nlp
6
  import tensorflow as tf
7
 
8
- gpus = tf.config.experimental.list_physical_devices('GPU')
9
- if gpus:
10
- try:
11
- # Restrict TensorFlow to only allocate memory as needed
12
- for gpu in gpus:
13
- tf.config.experimental.set_memory_growth(gpu, True)
14
- except RuntimeError as e:
15
- print(e)
16
-
17
- keras.utils.set_random_seed(42)
18
-
19
- def create_model():
20
- gemma_lm = keras_nlp.models.CausalLM.from_preset("hf://soufyane/gemma_2b_instruct_FT_DATA_SCIENCE_lora36_1")
21
- return gemma_lm
22
-
23
- gemma_lm = create_model()
24
-
25
- @spaces.GPU
26
- def generate_answer(history, question):
27
- # Replace this with the actual code to generate the answer using your model
28
- answer = gemma_lm.generate(f"You are an AI Agent specialized to answer to questions about Data Science and be greatfull and nice and helpfull\n\nQuestion:\n{question}\n\nAnswer:\n", max_length=1024)
29
- history.append((question, answer))
30
- return history
31
-
32
- # Gradio interface
33
- with gr.Blocks() as demo:
34
- gr.Markdown("# Chatbot")
35
- chatbot = gr.Chatbot()
36
- with gr.Row():
37
- txt = gr.Textbox(show_label=False, placeholder="Enter your question here...")
38
- txt.submit(generate_answer, [chatbot, txt], chatbot)
39
-
40
- # Launch the interface
41
- demo.launch()
42
-
43
- ###################################
44
- # zero = torch.Tensor([0]).cuda()
45
- # print(zero.device) # <-- 'cpu' 🤔
46
-
47
- # @spaces.GPU
48
- # def greet(n):
49
- # print(zero.device) # <-- 'cuda:0' 🤗
50
- # return f"Hello {zero + n} Tensor"
51
-
52
- # demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
53
- # demo.launch()
 
 
 
 
 
 
1
  import tensorflow as tf
2
 
3
+ print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))