RAHULJUNEJA33's picture
Rename app.py to app_2pages.py
2494420 verified
# ---------------------------------------------------------------------------
# Model Recommendations and Description:
# ---------------------------------------------------------------------------
# 1. **DistilGPT-2**: A distilled, lighter version of GPT-2.
# - **Size**: ~500 MB (smaller and faster than full GPT-2).
# - **Performance**: Great for text generation tasks with reduced inference time.
# - **Use case**: If you need a lightweight, efficient model with decent quality.
# - **Hugging Face Model**: "distilgpt2"
#
# 2. **GPT-Neo 1.3B**: A smaller alternative to GPT-3 with reasonable performance.
# - **Size**: ~5 GB.
# - **Performance**: Powerful text generation model with good results on a wide variety of tasks.
# - **Use case**: Ideal for slightly larger tasks where better quality is needed than DistilGPT-2.
# - **Hugging Face Model**: "EleutherAI/gpt-neo-1.3B"
#
# 3. **Mistral 7B**: Open-source model with smaller weights.
# - **Size**: ~4.5 GB.
# - **Performance**: Comparable to larger models, with good speed and quality.
# - **Use case**: Ideal for high-quality generation at a relatively smaller size.
# - **Hugging Face Model**: "mistralai/Mistral-7B-Instruct-v0.1"
#
# 4. **TinyBERT or MiniLM**: Excellent for lighter tasks, even smaller models.
# - **Size**: ~100 MB.
# - **Performance**: Great for tasks like classification or sentence embeddings but not suitable for long text generation.
# - **Use case**: Perfect for applications requiring minimal memory and fast processing but not for full-fledged generation.
# - **Hugging Face Model**: "sentence-transformers/all-MiniLM-L6-v2"
# Choose your model from the above options:
MODEL_NAME = "google/flan-t5-base" # Change this to one of the other models based on your needs.
# ---------------------------------------------------------------------------
# Code Below to Load, Generate, and Save Functional Requirement Documents
# ---------------------------------------------------------------------------
import streamlit as st
from transformers import pipeline
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
import os
import textwrap
# Load Hugging Face Token (Ensure it's set in Env Variables)
HF_TOKEN = os.getenv("HF_TOKEN")
# βœ… Optimized Model (Flan-T5 for Lower Memory Usage)
MODEL_NAME = "google/flan-t5-base"
# πŸ“Œ Load Model Efficiently (Avoid Reloading)
@st.cache_resource
def load_model():
try:
return pipeline("text2text-generation", model=MODEL_NAME, token=HF_TOKEN)
except Exception as e:
st.error(f"❌ Error loading model: {str(e)}")
return None
# Load once and reuse
generator = load_model()
# πŸ“Œ Function to Generate Functional Requirement Document
def generate_functional_requirements(topic):
if generator is None:
return "Error: Model failed to load."
sections = [
"Introduction: Overview, Purpose, Intended Users",
"Scope: System Description, Key Functionalities",
"Functional Specifications: Features, User Roles, Transactions",
"Security & Compliance: Regulations, Data Protection",
"User Interface Requirements: Wireframes, UI/UX Considerations",
"System Architecture: High-Level Architecture, Technology Stack",
"Performance & Scalability: Expected Load, Response Time, Uptime",
"Regulatory & Legal Compliance: Banking Regulations, Audits",
"Data Management: Data Flow, Storage, Backup Strategies",
"Conclusion: Summary, Future Enhancements"
]
document = ""
for section in sections:
prompt = f"Generate a **detailed and structured** section on '{section}' for **{topic}** in banking."
for _ in range(3): # Ensure enough content is generated
output = generator(prompt, max_length=2048, do_sample=True, temperature=0.7)
if output and isinstance(output, list) and len(output) > 0 and "generated_text" in output[0]:
document += f"\n\n### {section}\n\n" + output[0]["generated_text"] + "\n\n"
else:
return "Error: Model failed to generate text."
return document
# πŸ“Œ Function to Save Generated Content as PDF
def save_to_pdf(content, filename):
if not content.strip():
st.error("❌ Error: No content available to write to the PDF.")
return
c = canvas.Canvas(filename, pagesize=letter)
c.setFont("Helvetica", 10)
# Handling text wrapping & new pages correctly
max_width = 80 # Approximate max characters per line
lines_per_page = 50 # Approximate lines per page
y_position = 750 # Start position for text
paragraphs = content.split("\n\n") # Preserve paragraph structure
for para in paragraphs:
wrapped_lines = textwrap.wrap(para, max_width)
for line in wrapped_lines:
if y_position < 50: # If near bottom, create a new page
c.showPage()
c.setFont("Helvetica", 10)
y_position = 750 # Reset text position
c.drawString(40, y_position, line)
y_position -= 14 # Move to next line
c.save()
# πŸ“Œ Streamlit UI
def main():
st.title("πŸ“„ AI-Powered Functional Requirement Generator for Banking")
banking_topics = [
"Core Banking System", "Loan Management System", "Payment Processing Gateway",
"Risk and Fraud Detection", "Regulatory Compliance Management", "Digital Banking APIs",
"Customer Onboarding & KYC", "Treasury Management", "Wealth & Portfolio Management"
]
topic = st.selectbox("Select a Banking Functional Requirement Topic", banking_topics)
if st.button("Generate Functional Requirement Document"):
with st.spinner("Generating... This may take a while."):
content = generate_functional_requirements(topic)
if "Error" in content:
st.error(content)
else:
# Show document preview before saving to PDF (for debugging)
st.text_area("Generated Document Preview", content[:5000], height=400)
filename = "functional_requirement.pdf"
save_to_pdf(content, filename)
st.success("βœ… Document Generated Successfully!")
st.download_button("πŸ“₯ Download PDF", data=open(filename, "rb"), file_name=filename, mime="application/pdf")
os.remove(filename) # Cleanup after download
if __name__ == "__main__":
main()