Spaces:
Running
Running
File size: 6,697 Bytes
3f77356 1518493 3f77356 1518493 3f77356 1518493 9c9750b 1518493 cf1c5f5 1518493 cf1c5f5 1518493 cf1c5f5 1518493 9c9750b 1518493 821b4f5 9c9750b 821b4f5 1518493 9c9750b 1518493 cf1c5f5 1518493 3f77356 821b4f5 3f77356 1518493 3f77356 9c9750b 1518493 95eb2b4 cf1c5f5 1518493 cf1c5f5 1518493 cf1c5f5 1518493 cf1c5f5 1518493 cf1c5f5 821b4f5 cf1c5f5 821b4f5 1518493 cf1c5f5 1518493 cf1c5f5 1518493 cf1c5f5 1518493 cf1c5f5 821b4f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import gradio as gr
from huggingface_hub import InferenceClient
import langdetect
import json
# Initialize Hugging Face client with the new model
client = InferenceClient(model="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5")
# Default system message to guide the assistant
default_system_message = (
"You are NLPToolkit Agent, an advanced assistant specializing in NLP tasks such as text summarization, "
"sentiment analysis, text classification, and entity recognition. Adapt your responses to the selected task."
)
# Predefined task-specific instructions
task_instructions = {
"Summarization": "Summarize the text clearly and concisely.",
"Sentiment Analysis": "Analyze the sentiment of the text (positive, neutral, negative).",
"Text Classification": "Classify the text into relevant categories.",
"Entity Recognition": "Identify and list named entities in the text."
}
# Enhanced text preprocessing function (from pipeline)
def preprocess_input(text):
"""
Clean and validate the user's input text with better error handling and language detection.
"""
try:
# Detect input language
language = langdetect.detect(text)
if language != "en":
return f"Input language detected as {language}. Please provide input in English."
except langdetect.lang_detect_exception.LangDetectException:
return "Unable to detect language. Please provide valid text input."
except Exception as e:
return f"An error occurred while processing the text: {str(e)}"
return text.strip()
# Model inference function (from pipeline)
def run_model_inference(messages, max_tokens, temperature, top_p):
"""
Run model inference based on the messages with specified parameters.
"""
try:
response = ""
for chunk in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = chunk.choices[0].delta.content
response += token
yield response
except Exception as e:
yield f"Error generating response: {str(e)}"
# Output postprocessing (from pipeline)
def postprocess_output(response):
"""
Postprocess the model's response before presenting it to the user.
"""
# Example: Clean up the response or format it if necessary
return response.strip()
# Enhanced respond function with pipeline integration
def respond(task, message, history, system_message, max_tokens, temperature, top_p):
"""
Handle user messages and generate responses using the NLP model with integrated pipeline.
"""
# Apply task-specific instructions
system_message = f"{system_message} Task: {task_instructions.get(task, 'General NLP task')}"
# Preprocess the user's input using the pipeline
message = preprocess_input(message)
if message.startswith("Input language detected") or message.startswith("Unable to detect"):
return message # Early exit on language issues
# Prepare conversation history
messages = [{"role": "system", "content": system_message}]
for user_message, assistant_message in history:
if user_message:
messages.append({"role": "user", "content": user_message})
if assistant_message:
messages.append({"role": "assistant", "content": assistant_message})
messages.append({"role": "user", "content": message})
# Get model response using the pipeline function (streamed)
response = ""
for chunk in run_model_inference(
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
):
response = chunk
# Postprocess the model's response before sending it to the user
return postprocess_output(response)
# Improved chat history management functions with better file handling
def save_history(history, filename="chat_history.json"):
try:
with open(filename, "w") as f:
json.dump(history, f)
return "Chat history saved successfully."
except Exception as e:
return f"Error saving chat history: {str(e)}"
def load_history(filename="chat_history.json"):
try:
with open(filename, "r") as f:
history = json.load(f)
return history
except FileNotFoundError:
return []
except json.JSONDecodeError:
return [] # Handle case where the file is empty or corrupt
# Refactor the Gradio interface to be more organized and responsive
def create_interface():
"""
Create and enhance the Gradio interface for the chatbot with improved layout and feedback.
"""
with gr.Blocks() as demo:
gr.Markdown("## 🧠 NLPToolkit Agent\nAn advanced assistant for NLP tasks, powered by Hugging Face.")
# Organize task selection and parameters in a better layout
with gr.Row():
task = gr.Dropdown(
choices=["Summarization", "Sentiment Analysis", "Text Classification", "Entity Recognition"],
value="Summarization",
label="Select NLP Task"
)
with gr.Row():
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
system_message = gr.Textbox(value=default_system_message, label="System Message")
with gr.Row():
chat_history = gr.State(value=[])
assistant_response = gr.Textbox(label="Assistant Response", interactive=False)
with gr.Row():
max_tokens = gr.Slider(1, 2048, value=512, label="Max Tokens")
temperature = gr.Slider(0.1, 4.0, value=0.7, label="Temperature")
top_p = gr.Slider(0.1, 1.0, value=0.95, label="Top-p (Nucleus Sampling)")
with gr.Row():
save_button = gr.Button("Save Chat History")
load_button = gr.Button("Load Chat History")
with gr.Row():
submit_button = gr.Button("Generate Response")
# Connect button actions and ensure smooth flow
submit_button.click(
fn=respond,
inputs=[task, user_input, chat_history, system_message, max_tokens, temperature, top_p],
outputs=assistant_response
)
save_button.click(fn=save_history, inputs=chat_history, outputs=None)
load_button.click(fn=load_history, inputs=None, outputs=chat_history)
gr.Markdown("### 🚀 Powered by Hugging Face and Gradio | Developed by Canstralian")
return demo
# Run the enhanced Gradio app
if __name__ == "__main__":
demo = create_interface()
demo.launch() |