File size: 8,564 Bytes
6e827c2
f69b9a6
 
4263f88
f69b9a6
3f3176a
 
f5eff85
b9c754c
8445ab0
 
 
 
 
20a9402
f6cb88d
20a9402
f69b9a6
6e827c2
8445ab0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f756686
8445ab0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f756686
 
b9c754c
8445ab0
 
 
f756686
8445ab0
 
 
 
 
 
 
 
 
 
 
f756686
 
 
 
 
 
 
 
 
8445ab0
 
 
a15a384
8445ab0
 
a15a384
8445ab0
 
35edc8e
8445ab0
 
 
 
 
 
 
f5eff85
8445ab0
 
 
 
a15a384
8445ab0
 
 
 
35edc8e
a15a384
 
35edc8e
8445ab0
a15a384
35edc8e
8445ab0
 
 
 
 
35edc8e
8445ab0
 
a15a384
 
8445ab0
 
 
 
 
 
 
 
a15a384
8445ab0
a15a384
8445ab0
 
 
 
 
 
 
f5eff85
8445ab0
f756686
8445ab0
 
f756686
 
 
8445ab0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9c754c
 
8445ab0
b9c754c
 
 
a15a384
8445ab0
35edc8e
8445ab0
b9c754c
8445ab0
b9c754c
8445ab0
 
b9c754c
 
8445ab0
 
 
 
b9c754c
 
 
8445ab0
35edc8e
b9c754c
 
8445ab0
 
 
 
 
 
 
 
 
f756686
8445ab0
f756686
3f3176a
6e827c2
 
20a9402
8445ab0
 
32648a9
 
 
35edc8e
 
32648a9
20a9402
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
import gradio as gr
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
import logging
import warnings
from typing import List, Tuple, Dict
import random
import hashlib
from datetime import datetime
from dataclasses import dataclass
from enum import Enum
import json
import re
from pathlib import Path

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
warnings.filterwarnings('ignore')

class ThemeType(Enum):
    MARRIAGE = "casamento"
    FAMILY = "familia"
    SPIRITUAL = "vida_espiritual"
    WORK = "trabalho"
    RELATIONSHIPS = "relacionamentos"
    GENERAL = "geral"

@dataclass
class BiblicalExample:
    question: str
    passage: str
    text: str
    base_response: str
    application: str
    keywords: List[str]
    sentiment: str  # Added for emotional context

class ResponseGenerator:
    def __init__(self):
        self.nlp_model = None  # Placeholder for sentiment analysis
        
    def analyze_sentiment(self, text: str) -> str:
        # Basic sentiment analysis based on keywords
        positive_words = {'alegria', 'esperança', 'paz', 'amor', 'gratidão'}
        negative_words = {'tristeza', 'medo', 'ansiedade', 'preocupação', 'angústia'}
        
        text_words = set(text.lower().split())
        pos_count = len(text_words.intersection(positive_words))
        neg_count = len(text_words.intersection(negative_words))
        
        return 'positive' if pos_count > neg_count else 'negative' if neg_count > pos_count else 'neutral'

    def personalize_response(self, example: BiblicalExample, question: str) -> str:
        sentiment = self.analyze_sentiment(question)
        
        # Customize response based on sentiment
        intro = {
            'positive': "Que bom que você está buscando orientação! ",
            'negative': "Entendo que você possa estar passando por um momento difícil. ",
            'neutral': "Agradeço sua busca por sabedoria. "
        }
        
        return f"{intro[sentiment]}{example.base_response}"

class EnhancedBiblicalCounselor:
    def __init__(self):
        logger.info("Inicializando conselheiro bíblico aprimorado...")
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model_name = "pierreguillou/bert-base-cased-squad-v1.1-portuguese"
        self.session_history = []
        self.response_generator = ResponseGenerator()
        self.load_examples()
        self.setup_model()
        
    def load_examples(self):
        # Load examples from JSON file (you would need to create this)
        examples_path = Path("biblical_examples.json")
        if examples_path.exists():
            with open(examples_path) as f:
                self.biblical_examples = json.load(f)
        else:
            # Fallback to default examples
            self.biblical_examples = self._get_default_examples()
    
    def setup_model(self):
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            self.model = AutoModelForQuestionAnswering.from_pretrained(self.model_name)
            self.model.to(self.device)
            logger.info(f"Modelo carregado com sucesso no dispositivo: {self.device}")
        except Exception as e:
            logger.error(f"Erro ao carregar modelo: {str(e)}")
            raise

    def get_unique_response(self, question: str, theme: str = None) -> Tuple[str, Dict, str]:
        if not theme:
            theme = self.find_best_theme(question)
        
        example = self._select_best_example(question, theme)
        personalized_response = self.response_generator.personalize_response(example, question)
        
        # Generate AI response using the model
        model_response = self._generate_model_response(question, example['texto'])
        
        # Create enhanced response with better formatting and structure
        response = self._format_enhanced_response(
            personalized_response,
            example,
            model_response,
            self._get_additional_resources(theme)
        )
        
        metadata = self._create_metadata(example, theme)
        history = self.save_to_history(question, theme, response, metadata)
        
        return response, metadata, history

    def _format_enhanced_response(self, personalized_response, example, model_response, resources):
        return f"""
🌟 Orientação Personalizada:
{personalized_response}

📖 Passagem Bíblica:
{example['passagem']}: {example['texto']}

✨ Aplicação Prática:
{example['aplicacao']}

💭 Reflexão Gerada por IA:
{model_response}

📚 Recursos Adicionais:
{resources}

🙏 Observação: Esta orientação é baseada em princípios bíblicos. Para questões específicas, 
considere consultar sua liderança espiritual local.
"""

    def _get_additional_resources(self, theme):
        # Add relevant books, articles, or other resources based on the theme
        resources = {
            "casamento": ["'O Significado do Casamento' - Timothy Keller", 
                         "'Casamento Blindado' - Renato e Cristiane Cardoso"],
            "familia": ["'Criando Filhos' - Tim Kimmel",
                       "'Limites' - Henry Cloud e John Townsend"]
            # Add more resources for other themes
        }
        return "\n".join(resources.get(theme, ["Bíblia de Estudo"]))

def create_enhanced_interface():
    counselor = EnhancedBiblicalCounselor()
    
    custom_theme = gr.themes.Soft().set(
        button_primary_background_fill="indigo",
        button_primary_background_fill_dark="darkblue",
    )

    with gr.Blocks(theme=custom_theme) as demo:
        gr.Markdown("""
        # 🕊️ Conselheiro Bíblico Plus
        ### Orientação Bíblica Personalizada com Inteligência Artificial
        """)

        with gr.Row():
            with gr.Column(scale=1):
                verse_of_day = gr.Textbox(
                    label="🌟 Versículo do Dia",
                    value=counselor.get_verse_of_the_day(),
                    lines=4,
                    interactive=False
                )
            
            with gr.Column(scale=1):
                prayer_focus = gr.Textbox(
                    label="🙏 Foco de Oração do Dia",
                    value=counselor.get_daily_prayer_focus(),
                    lines=4,
                    interactive=False
                )

        with gr.Tabs():
            with gr.TabItem("📝 Nova Consulta", id=1):
                with gr.Row():
                    with gr.Column():
                        theme = gr.Dropdown(
                            choices=counselor.get_themes(),
                            label="🎯 Tema",
                            value="geral",
                            info="Selecione um tema ou deixe em automático"
                        )
                        
                        question = gr.Textbox(
                            label="❓ Sua Pergunta",
                            placeholder="Digite sua pergunta sobre qualquer tema bíblico...",
                            lines=3
                        )
                        
                        with gr.Row():
                            submit_btn = gr.Button("🙏 Buscar Orientação", variant="primary")
                            clear_btn = gr.Button("🔄 Limpar", variant="secondary")

                    with gr.Column():
                        answer_output = gr.Textbox(
                            label="✨ Orientação",
                            lines=12
                        )
                        
                        with gr.Accordion("📚 Detalhes e Referências"):
                            metadata_output = gr.JSON(
                                label="📋 Informações Detalhadas"
                            )
                            
                        feedback = gr.Radio(
                            ["👍 Útil", "👎 Precisa Melhorar"],
                            label="📢 Sua Opinião"
                        )

            # Add more tabs and features...

    return demo

if __name__ == "__main__":
    try:
        logger.info("Iniciando aplicação aprimorada...")
        demo = create_enhanced_interface()
        demo.launch(
            server_name="0.0.0.0",
            share=True,
            show_error=True,
            server_port=7860
        )
    except Exception as e:
        logger.error(f"Erro ao iniciar aplicação: {str(e)}")
        raise