Canstralian commited on
Commit
1a5b5ad
·
verified ·
1 Parent(s): 4a86329

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -68
app.py CHANGED
@@ -1,79 +1,51 @@
1
  import gradio as gr
2
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModel
3
- import dask.dataframe as dd
4
- from datasets import load_dataset
5
  import torch
6
 
7
- # Load models and tokenizer
8
  def load_models():
9
- # Load model 1
10
- model_1 = AutoModel.from_pretrained("Canstralian/RedTeamAI")
11
-
12
- # Load model 2
13
- model_2 = AutoModel.from_pretrained("mradermacher/BashCopilot-6B-preview-GGUF")
14
-
15
- # Load tokenizer and sequence classification model
16
- tokenizer = AutoTokenizer.from_pretrained("bash1130/bert-base-finetuned-ynat")
17
- model_3 = AutoModelForSequenceClassification.from_pretrained("bash1130/bert-base-finetuned-ynat")
18
-
19
- return model_1, model_2, tokenizer, model_3
20
 
21
- # Load dataset using Dask
22
- def load_data():
23
- # Example of loading a dataset using Dask (adjust paths as necessary)
24
- splits = {'creative_content': 'data/creative_content-00000-of-00001.parquet'}
25
- df = dd.read_parquet("hf://datasets/microsoft/orca-agentinstruct-1M-v1/" + splits["creative_content"])
26
- return df.head()
27
 
28
- # Function for model inference
29
- def infer_model(input_text, model_type):
30
- # Choose the model based on the input (you can add more models or conditions as needed)
31
- if model_type == 'RedTeamAI':
32
- model = models[0]
33
- elif model_type == 'BashCopilot':
34
- model = models[1]
35
- elif model_type == 'BertModel':
36
- model = models[3]
37
- inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True)
38
- outputs = model(**inputs)
39
- return outputs.logits.argmax(dim=-1).item()
40
- else:
41
- return "Model type not recognized."
42
-
43
- # If you need to generate outputs based on the models directly, you can use:
44
- # outputs = model.generate(input_text) or other inference methods depending on the model.
45
- return f"Model {model_type} inference not implemented yet."
46
 
47
- # Gradio Interface setup
48
- def build_interface():
49
- # Load models and data
50
- model_1, model_2, tokenizer, model_3 = load_models()
51
- global models
52
- models = [model_1, model_2, tokenizer, model_3]
53
-
54
- # Load the dataset (example function, you can add more functionality)
55
- data_preview = load_data()
56
-
57
- print(f"Dataset preview: {data_preview}")
58
-
59
- # Create Gradio interface
60
  with gr.Blocks() as demo:
61
- gr.Markdown("# Chagrin AI - Model Inference & Dataset Explorer")
62
-
63
- # Model selection dropdown
64
- model_type = gr.Dropdown(choices=["RedTeamAI", "BashCopilot", "BertModel"], label="Choose Model")
65
-
66
- # Textbox for user input
67
- input_text = gr.Textbox(label="Enter your input text")
68
-
69
- # Button to trigger inference
70
- result = gr.Textbox(label="Inference Result")
71
-
72
- submit_btn = gr.Button("Run Inference")
73
- submit_btn.click(infer_model, inputs=[input_text, model_type], outputs=result)
74
-
75
  demo.launch()
76
 
77
- # Run the app
78
  if __name__ == "__main__":
79
- build_interface()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
3
  import torch
4
 
5
+ # Load models and tokenizers
6
  def load_models():
7
+ # Load a conversational model and tokenizer (you can customize it further)
8
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
9
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
10
+ return model, tokenizer
 
 
 
 
 
 
 
11
 
12
+ # Generate responses
13
+ def chat_with_model(user_input, model, tokenizer, chat_history):
14
+ # Tokenize the user input and chat history
15
+ new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
 
 
16
 
17
+ # Append new user input to chat history
18
+ bot_input_ids = torch.cat([chat_history, new_user_input_ids], dim=-1) if chat_history is not None else new_user_input_ids
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ # Generate a response from the model
21
+ chat_history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
22
+
23
+ # Decode the model's output and return
24
+ bot_output = tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
25
+ return chat_history, bot_output
26
+
27
+ # Initialize model and tokenizer
28
+ model, tokenizer = load_models()
29
+
30
+ # Build Gradio interface
31
+ def build_gradio_interface():
 
32
  with gr.Blocks() as demo:
33
+ gr.Markdown("# Chagrin AI Chatbot")
34
+
35
+ # Set up chat window
36
+ chatbot = gr.Chatbot()
37
+
38
+ # Create text input box for user to type
39
+ user_input = gr.Textbox(label="Type your message", placeholder="Ask something...", interactive=True)
40
+
41
+ # Create button for sending the input
42
+ submit_btn = gr.Button("Send Message")
43
+
44
+ # Button click function
45
+ submit_btn.click(chat_with_model, inputs=[user_input, model, tokenizer, chatbot], outputs=[chatbot, chatbot])
46
+
47
  demo.launch()
48
 
49
+ # Run the Gradio interface
50
  if __name__ == "__main__":
51
+ build_gradio_interface()