Anubhutineo commited on
Commit
56dcc59
Β·
verified Β·
1 Parent(s): a0d7445

Delete app.py

Browse files

import torch
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
import random
import re
from typing import Dict, List
import warnings
import os

warnings.filterwarnings('ignore')

# Set environment variables for better performance
os.environ['TOKENIZERS_PARALLELISM'] = 'false'

class AIStoryteller:
"""AI Storyteller optimized for Hugging Face Spaces"""

def __init__(self):
self.model = None
self.tokenizer = None
self.model_loaded = False
self.load_model()

def load_model(self):
"""Load the AI model"""
try:
print("πŸ“₯ Loading DistilGPT-2 model...")
model_name = "distilgpt2"

self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
)

if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token

self.model_loaded = True
print("βœ… Model loaded successfully!")
return True

except Exception as e:
print(f"❌ Model loading failed: {e}")
return False

def generate_story(self, prompt, max_length=100):
"""Generate a story from a prompt"""
if not self.model_loaded:
return "❌ Model not loaded. Please try again."

try:
inputs = self.tokenizer.encode(prompt, return_tensors='pt', max_length=256, truncation=True)

with torch.no_grad():
outputs = self.model.generate(
inputs,
max_length=min(inputs.shape[1] + max_length, 256),
temperature=0.8,
do_sample=True,
top_p=0.9,
top_k=50,
pad_token_id=self.tokenizer.eos_token_id,
no_repeat_ngram_size=2,
repetition_penalty=1.1
)

full_story = self.tokenizer.decode(outputs[0], skip_special_tokens=True)

if full_story.startswith(prompt):
generated_part = full_story[len(prompt):].strip()
return f"{prompt} {generated_part}"
else:
return full_story

except Exception as e:
return f"❌ Error generating story: {str(e)}"

# Initialize the storyteller
print("πŸš€ Initializing AI Storyteller...")
storyteller = AIStoryteller()

def generate_story_interface(prompt, genre, story_length):
"""Main interface function"""
if not prompt or not prompt.strip():
return "❌ Please enter a story prompt!"

genre_starters = {
"Fantasy": "In a realm of magic and wonder,",
"Sci-Fi": "In the distant future among the stars,",
"Mystery": "On a foggy night filled with secrets,",
"Horror": "In the darkness where nightmares dwell,",
"Romance": "When two hearts found each other,",
"Adventure": "On a daring quest for glory,",
"Comedy": "In a world of laughter and mishaps,",
"Drama": "In a tale of human emotion,"
}

if genre in genre_starters:
full_prompt = f"{genre_starters[genre]} {prompt.strip()}"
else:
full_prompt = prompt.strip()

return storyteller.generate_story(full_prompt, max_length=int(story_length))

# Create Gradio interface
interface = gr.Interface(
fn=generate_story_interface,
inputs=[
gr.Textbox(
label="πŸ“ Story Prompt",
placeholder="Enter your story idea (e.g., 'a detective finds a mysterious letter')",
lines=3
),
gr.Dropdown(
choices=["Fantasy", "Sci-Fi", "Mystery", "Horror", "Romance", "Adventure", "Comedy", "Drama"],
label="🎭 Genre",
value="Fantasy"
),
gr.Slider(
minimum=30,
maximum=120,
value=80,
label="πŸ“ Story Length"
)
],
outputs=gr.Textbox(
label="πŸ“š Generated Story",
lines=8
),
title="🎭 AI Storyteller",
description="""
🌟 **Create Amazing Stories with AI!** 🌟

Enter a creative prompt, choose your favorite genre, and let AI craft a unique story for you!
Perfect for writers, students, and anyone who loves creative storytelling.
""",
examples=[
["a young wizard discovers a hidden library", "Fantasy", 100],
["a detective receives a cryptic phone call", "Mystery", 80],
["robots develop feelings", "Sci-Fi", 90],
["two strangers meet in a coffee shop", "Romance", 70],
["an explorer finds a secret cave", "Adventure", 85]
],
theme=gr.themes.Soft()
)

# Launch the app
if __name__ == "__main__":
interface.launch()

Files changed (1) hide show
  1. app.py +0 -64
app.py DELETED
@@ -1,64 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()