Spaces:
Sleeping
Sleeping
# Example of how to integrate the granite_model.py into your main app.py | |
# At the top of your app.py, add this import: | |
# Use a pipeline as a high-level helper | |
from transformers import pipeline | |
pipe = pipeline("text-generation", model="ibm-granite/granite-3.3-8b-instruct") | |
messages = [ | |
{"role": "user", "content": "Who are you?"}, | |
] | |
pipe(messages) | |
try: | |
from granite_model import GraniteModelIntegration | |
GRANITE_AVAILABLE = True | |
except ImportError: | |
GRANITE_AVAILABLE = False | |
logger.warning("granite_model.py not found. Granite features will be disabled.") | |
# In your AdvancedDocumentSummarizer.__init__ method, add: | |
def __init__(self): | |
self.summarizer = None | |
self.sentiment_analyzer = None | |
self.granite_integration = None # Add this line | |
self.cache = {} | |
# Initialize AI models | |
if TRANSFORMERS_AVAILABLE: | |
self._initialize_ai_models() | |
# Initialize Granite integration | |
if GRANITE_AVAILABLE: | |
try: | |
self.granite_integration = GraniteModelIntegration() | |
logger.info(f"Granite integration status: {'Available' if self.granite_integration.is_available() else 'Not Available'}") | |
except Exception as e: | |
logger.warning(f"Failed to initialize Granite integration: {e}") | |
# Initialize sentiment analyzer | |
if NLTK_AVAILABLE: | |
try: | |
self.sentiment_analyzer = SentimentIntensityAnalyzer() | |
except Exception as e: | |
logger.warning(f"Failed to initialize sentiment analyzer: {e}") | |
# Add these methods to your AdvancedDocumentSummarizer class: | |
def granite_enhanced_summary(self, text: str, summary_type: str = "medium") -> str: | |
"""Generate enhanced summary using Granite model""" | |
if not (self.granite_integration and self.granite_integration.is_available()): | |
return self.advanced_extractive_summary(text) | |
return self.granite_integration.generate_summary(text, summary_type) | |
def granite_analyze_document(self, text: str) -> Dict: | |
"""Use Granite model for advanced document analysis""" | |
if not (self.granite_integration and self.granite_integration.is_available()): | |
return {'analysis_available': False} | |
result = self.granite_integration.analyze_document(text) | |
return { | |
'granite_analysis': result.get('analysis', 'Analysis failed'), | |
'analysis_available': result.get('success', False), | |
'model_used': result.get('model_used', 'Unknown') | |
} | |
def granite_generate_questions(self, text: str, num_questions: int = 5) -> list: | |
"""Generate comprehension questions using Granite""" | |
if not (self.granite_integration and self.granite_integration.is_available()): | |
return [] | |
return self.granite_integration.generate_questions(text, num_questions) | |
# In your process_document method, update the summary generation part: | |
# Generate summary - prioritize Granite if available for AI mode | |
if summary_type == "ai": | |
if self.granite_integration and self.granite_integration.is_available(): | |
summary = self.granite_enhanced_summary(text, summary_length) | |
elif self.summarizer: | |
summary = self.ai_summary(text, params["max_length"], params["min_length"]) | |
else: | |
summary = self.advanced_extractive_summary(text, params["sentences"]) | |
else: | |
summary = self.advanced_extractive_summary(text, params["sentences"]) | |
# Get Granite analysis and questions if available | |
granite_analysis = self.granite_analyze_document(text) | |
granite_questions = self.granite_generate_questions(text, 5) | |
# Add to result dictionary: | |
result = { | |
'original_text': text[:2000] + "..." if len(text) > 2000 else text, | |
'full_text_length': len(text), | |
'summary': summary, | |
'key_points': key_points, | |
'outline': outline, | |
'stats': stats, | |
'granite_analysis': granite_analysis, | |
'granite_questions': granite_questions, # Add this | |
'readability_score': readability_score, | |
'file_name': Path(file_path).name, | |
'file_size': os.path.getsize(file_path), | |
'processing_time': datetime.now().isoformat(), | |
'summary_type': summary_type, | |
'summary_length': summary_length, | |
'model_used': 'Granite 3.2 8B' if (summary_type == "ai" and self.granite_integration and self.granite_integration.is_available()) else ('AI (BART/T5)' if self.summarizer else 'Extractive') | |
} | |
# In your UI section, add the questions display: | |
# Add Granite questions if available | |
granite_questions_html = "" | |
if result.get("granite_questions"): | |
questions_list = "".join([f"<li style='margin-bottom: 10px; padding: 8px; background: rgba(255,255,255,0.1); border-radius: 6px;'>{q}</li>" | |
for q in result["granite_questions"]]) | |
granite_questions_html = f''' | |
<div style="background: linear-gradient(135deg, #11998e 0%, #38ef7d 100%); color: white; padding: 20px; border-radius: 12px; margin: 15px 0; box-shadow: 0 6px 20px rgba(0,0,0,0.1);"> | |
<h3>AI-Generated Questions</h3> | |
<p style="margin-bottom: 15px; opacity: 0.9;">Test your understanding with these Granite-generated questions:</p> | |
<ol style="padding-left: 20px; line-height: 1.6;"> | |
{questions_list} | |
</ol> | |
</div> | |
''' | |
# Update your system status display: | |
**Granite 3.2 8B:** {"✅ Available" if (GRANITE_AVAILABLE and hasattr(summarizer, 'granite_integration') and summarizer.granite_integration and summarizer.granite_integration.is_available()) else "❌ Not Available"} |