File size: 6,569 Bytes
cc39f3a f34ee85 cc39f3a 5ea0d14 cc39f3a 7af69f2 cc39f3a 7af69f2 bdab721 7af69f2 5ea0d14 ed13e5d 7af69f2 5ea0d14 1439088 5ea0d14 7af69f2 5ea0d14 7af69f2 ed13e5d 7af69f2 5ea0d14 7af69f2 ed13e5d 7af69f2 ed13e5d 7af69f2 007aac0 cc39f3a c6d0cfb 007aac0 5ea0d14 bdab721 007aac0 c6d0cfb bdab721 007aac0 5ea0d14 ed13e5d cc39f3a bdab721 ed13e5d c6d0cfb bdab721 007aac0 bdab721 c6d0cfb bdab721 c6d0cfb ed13e5d 7af69f2 ed13e5d 5ea0d14 7af69f2 5ea0d14 7af69f2 ed13e5d 5ea0d14 c6d0cfb 7af69f2 ed13e5d 7af69f2 bdab721 c6d0cfb ed13e5d 5ea0d14 bdab721 5ea0d14 bdab721 7af69f2 007aac0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
# ---------------------------------------------------------------------------
# Model Recommendations and Description:
# ---------------------------------------------------------------------------
# 1. **DistilGPT-2**: A distilled, lighter version of GPT-2.
# - **Size**: ~500 MB (smaller and faster than full GPT-2).
# - **Performance**: Great for text generation tasks with reduced inference time.
# - **Use case**: If you need a lightweight, efficient model with decent quality.
# - **Hugging Face Model**: "distilgpt2"
#
# 2. **GPT-Neo 1.3B**: A smaller alternative to GPT-3 with reasonable performance.
# - **Size**: ~5 GB.
# - **Performance**: Powerful text generation model with good results on a wide variety of tasks.
# - **Use case**: Ideal for slightly larger tasks where better quality is needed than DistilGPT-2.
# - **Hugging Face Model**: "EleutherAI/gpt-neo-1.3B"
#
# 3. **Mistral 7B**: Open-source model with smaller weights.
# - **Size**: ~4.5 GB.
# - **Performance**: Comparable to larger models, with good speed and quality.
# - **Use case**: Ideal for high-quality generation at a relatively smaller size.
# - **Hugging Face Model**: "mistralai/Mistral-7B-Instruct-v0.1"
#
# 4. **TinyBERT or MiniLM**: Excellent for lighter tasks, even smaller models.
# - **Size**: ~100 MB.
# - **Performance**: Great for tasks like classification or sentence embeddings but not suitable for long text generation.
# - **Use case**: Perfect for applications requiring minimal memory and fast processing but not for full-fledged generation.
# - **Hugging Face Model**: "sentence-transformers/all-MiniLM-L6-v2"
# Choose your model from the above options:
MODEL_NAME = "google/flan-t5-base" # Change this to one of the other models based on your needs.
# ---------------------------------------------------------------------------
# Code Below to Load, Generate, and Save Functional Requirement Documents
# ---------------------------------------------------------------------------
import streamlit as st
from transformers import pipeline
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
import os
import textwrap
# Load Hugging Face Token (Ensure it's set in Env Variables)
HF_TOKEN = os.getenv("HF_TOKEN")
# β
Optimized Model (Flan-T5 for Lower Memory Usage)
MODEL_NAME = "google/flan-t5-base"
# π Load Model Efficiently (Avoid Reloading)
@st.cache_resource
def load_model():
try:
return pipeline("text2text-generation", model=MODEL_NAME, token=HF_TOKEN)
except Exception as e:
st.error(f"β Error loading model: {str(e)}")
return None
# Load once and reuse
generator = load_model()
# π Function to Generate Functional Requirement Document
def generate_functional_requirements(topic):
if generator is None:
return "Error: Model failed to load."
sections = [
"Introduction: Overview, Purpose, Intended Users",
"Scope: System Description, Key Functionalities",
"Functional Specifications: Features, User Roles, Transactions",
"Security & Compliance: Regulations, Data Protection",
"User Interface Requirements: Wireframes, UI/UX Considerations",
"System Architecture: High-Level Architecture, Technology Stack",
"Performance & Scalability: Expected Load, Response Time, Uptime",
"Regulatory & Legal Compliance: Banking Regulations, Audits",
"Data Management: Data Flow, Storage, Backup Strategies",
"Conclusion: Summary, Future Enhancements"
]
document = ""
for section in sections:
prompt = f"Generate a **detailed and structured** section on '{section}' for **{topic}** in banking."
for _ in range(3): # Ensure enough content is generated
output = generator(prompt, max_length=2048, do_sample=True, temperature=0.7)
if output and isinstance(output, list) and len(output) > 0 and "generated_text" in output[0]:
document += f"\n\n### {section}\n\n" + output[0]["generated_text"] + "\n\n"
else:
return "Error: Model failed to generate text."
return document
# π Function to Save Generated Content as PDF
def save_to_pdf(content, filename):
if not content.strip():
st.error("β Error: No content available to write to the PDF.")
return
c = canvas.Canvas(filename, pagesize=letter)
c.setFont("Helvetica", 10)
# Handling text wrapping & new pages correctly
max_width = 80 # Approximate max characters per line
lines_per_page = 50 # Approximate lines per page
y_position = 750 # Start position for text
paragraphs = content.split("\n\n") # Preserve paragraph structure
for para in paragraphs:
wrapped_lines = textwrap.wrap(para, max_width)
for line in wrapped_lines:
if y_position < 50: # If near bottom, create a new page
c.showPage()
c.setFont("Helvetica", 10)
y_position = 750 # Reset text position
c.drawString(40, y_position, line)
y_position -= 14 # Move to next line
c.save()
# π Streamlit UI
def main():
st.title("π AI-Powered Functional Requirement Generator for Banking")
banking_topics = [
"Core Banking System", "Loan Management System", "Payment Processing Gateway",
"Risk and Fraud Detection", "Regulatory Compliance Management", "Digital Banking APIs",
"Customer Onboarding & KYC", "Treasury Management", "Wealth & Portfolio Management"
]
topic = st.selectbox("Select a Banking Functional Requirement Topic", banking_topics)
if st.button("Generate Functional Requirement Document"):
with st.spinner("Generating... This may take a while."):
content = generate_functional_requirements(topic)
if "Error" in content:
st.error(content)
else:
# Show document preview before saving to PDF (for debugging)
st.text_area("Generated Document Preview", content[:5000], height=400)
filename = "functional_requirement.pdf"
save_to_pdf(content, filename)
st.success("β
Document Generated Successfully!")
st.download_button("π₯ Download PDF", data=open(filename, "rb"), file_name=filename, mime="application/pdf")
os.remove(filename) # Cleanup after download
if __name__ == "__main__":
main() |