DharavathSri commited on
Commit
f37351a
·
verified ·
1 Parent(s): cef2901

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -142
app.py CHANGED
@@ -1,154 +1,66 @@
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
3
- import torch
4
 
5
- # ======================
6
- # 🎨 STYLING & LAYOUT
7
- # ======================
8
- st.set_page_config(
9
- page_title="LLM Fine-Tuning Studio",
10
- page_icon="🧠",
11
- layout="wide"
12
- )
13
 
14
- # Custom CSS
15
- st.markdown("""
16
- <style>
17
- /* Main container */
18
- .main {
19
- background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
20
- }
21
-
22
- /* Headers */
23
- h1 {
24
- color: #2c3e50;
25
- text-align: center;
26
- font-family: 'Arial', sans-serif;
27
- border-bottom: 2px solid #4CAF50;
28
- padding-bottom: 10px;
29
- }
30
-
31
- /* Sidebar */
32
- [data-testid="stSidebar"] {
33
- background: linear-gradient(195deg, #2c3e50 0%, #4CAF50 100%) !important;
34
- color: white;
35
- }
36
-
37
- /* Buttons */
38
- .stButton>button {
39
- background: linear-gradient(to right, #4CAF50, #2E8B57);
40
- color: white;
41
- border: none;
42
- border-radius: 25px;
43
- padding: 10px 24px;
44
- font-weight: bold;
45
- }
46
-
47
- /* Chat bubbles */
48
- .user-message {
49
- background: #e3f2fd;
50
- border-radius: 15px 15px 0 15px;
51
- padding: 12px;
52
- margin: 5px 0;
53
- }
54
-
55
- .bot-message {
56
- background: #4CAF50;
57
- color: white;
58
- border-radius: 15px 15px 15px 0;
59
- padding: 12px;
60
- margin: 5px 0;
61
- }
62
-
63
- /* Input box */
64
- .stTextInput>div>div>input {
65
- border-radius: 20px !important;
66
- padding: 10px 15px !important;
67
- }
68
- </style>
69
- """, unsafe_allow_html=True)
70
 
71
- # ======================
72
- # 🧠 MODEL LOADING
73
- # ======================
74
- @st.cache_resource
75
- def load_model():
76
- model_name = "mistralai/Mistral-7B-v0.1"
77
- tokenizer = AutoTokenizer.from_pretrained(model_name)
78
- model = AutoModelForCausalLM.from_pretrained(
79
- model_name,
80
- torch_dtype=torch.float16,
81
- device_map="auto"
82
- )
83
- return tokenizer, model
84
 
85
- tokenizer, model = load_model()
 
 
86
 
87
- # ======================
88
- # 🎛️ SIDEBAR CONTROLS
89
- # ======================
90
- with st.sidebar:
91
- st.title("⚙️ Fine-Tuning Controls")
92
-
93
- st.subheader("Model Parameters")
94
- temperature = st.slider("Temperature", 0.1, 1.0, 0.7, 0.05)
95
- max_length = st.slider("Max Length", 50, 500, 150)
96
-
97
- st.subheader("Fine-Tuning Options")
98
- use_lora = st.checkbox("Use LoRA", True)
99
- quantize = st.selectbox("Quantization", ["None", "4-bit", "8-bit"])
100
-
101
- if st.button("🔄 Apply Changes"):
102
- st.toast("Settings updated!", icon="✅")
103
 
104
- # ======================
105
- # 💬 MAIN CHAT INTERFACE
106
- # ======================
107
- st.title("🧠 LLM Fine-Tuning Studio")
108
- st.caption("Fine-tune and deploy state-of-the-art language models")
109
 
110
- # Initialize chat history
111
- if "messages" not in st.session_state:
112
- st.session_state.messages = [
113
- {"role": "assistant", "content": "Hello! I'm your fine-tuned AI assistant. How can I help you today?"}
114
- ]
115
 
116
- # Display chat messages
117
- for message in st.session_state.messages:
118
- if message["role"] == "assistant":
119
- with st.chat_message("assistant"):
120
- st.markdown(f'<div class="bot-message">{message["content"]}</div>', unsafe_allow_html=True)
 
121
  else:
122
- with st.chat_message("user"):
123
- st.markdown(f'<div class="user-message">{message["content"]}</div>', unsafe_allow_html=True)
124
 
125
- # Chat input
126
- if prompt := st.chat_input("Type your message..."):
127
- st.session_state.messages.append({"role": "user", "content": prompt})
128
-
129
- with st.chat_message("user"):
130
- st.markdown(f'<div class="user-message">{prompt}</div>', unsafe_allow_html=True)
131
-
132
- # Generate response
133
- with st.chat_message("assistant"):
134
- with st.spinner("🧠 Thinking..."):
135
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
136
- outputs = model.generate(
137
- **inputs,
138
- max_new_tokens=max_length,
139
- temperature=temperature,
140
- do_sample=True
141
- )
142
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
143
-
144
- st.markdown(f'<div class="bot-message">{response}</div>', unsafe_allow_html=True)
145
- st.session_state.messages.append({"role": "assistant", "content": response})
146
 
147
- # ======================
148
- # 📊 FINE-TUNE STATUS
149
- # ======================
150
- st.sidebar.markdown("---")
151
- st.sidebar.subheader("Training Metrics")
152
- st.sidebar.metric("Loss", "0.45", delta="-0.02")
153
- st.sidebar.metric("Accuracy", "87%", delta="+2%")
154
- st.sidebar.progress(75)
 
1
+ # We'll generate a sample Streamlit app for LLM fine-tuning and deployment simulation.
2
+ # Since we can't actually fine-tune large models in this script due to constraints,
3
+ # we'll simulate the UI and interaction as if the model was already fine-tuned.
4
+
5
+ import os
6
+
7
+ # Create a simple streamlit app template
8
+ streamlit_app_code = """
9
  import streamlit as st
10
+ from transformers import pipeline
 
11
 
12
+ st.set_page_config(page_title="LLM Fine-Tuned Chatbot", page_icon="🧠", layout="wide")
 
 
 
 
 
 
 
13
 
14
+ # Custom CSS styling
15
+ st.markdown(\"""
16
+ <style>
17
+ .main {
18
+ background-color: #f4f4f9;
19
+ }
20
+ .stTextInput>div>div>input {
21
+ border-radius: 10px;
22
+ }
23
+ .stButton>button {
24
+ background-color: #4CAF50;
25
+ color: white;
26
+ border-radius: 10px;
27
+ height: 3em;
28
+ width: 100%;
29
+ }
30
+ </style>
31
+ \""", unsafe_allow_html=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
+ st.title("🧠 Fine-Tuned LLM Chatbot")
34
+ st.subheader("Chat with your own fine-tuned LLM model")
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ # Sidebar Info
37
+ st.sidebar.title("Model Info")
38
+ st.sidebar.info("This chatbot uses a fine-tuned LLM (simulated via Hugging Face pipeline)")
39
 
40
+ # Load pipeline (simulation for actual fine-tuned model)
41
+ @st.cache_resource
42
+ def load_pipeline():
43
+ # Replace with your fine-tuned model, e.g., "your-username/your-fine-tuned-model"
44
+ return pipeline("text-generation", model="gpt2")
 
 
 
 
 
 
 
 
 
 
 
45
 
46
+ generator = load_pipeline()
 
 
 
 
47
 
48
+ # User input
49
+ user_input = st.text_input("Enter your prompt here")
 
 
 
50
 
51
+ if st.button("Generate Response"):
52
+ if user_input.strip() != "":
53
+ with st.spinner("Generating response..."):
54
+ response = generator(user_input, max_length=100, do_sample=True)[0]['generated_text']
55
+ st.success("Response:")
56
+ st.write(response)
57
  else:
58
+ st.warning("Please enter a prompt to generate response.")
59
+ """
60
 
61
+ # Save to file for deployment
62
+ file_path = "/mnt/data/llm_finetuned_chatbot.py"
63
+ with open(file_path, "w") as f:
64
+ f.write(streamlit_app_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ file_path