|
|
|
import anthropic |
|
import os |
|
import logging |
|
from typing import Dict, Generator |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
class ChatProcessor: |
|
def __init__(self): |
|
""" |
|
Inicializa el procesador de chat con la API de Claude |
|
Raises: |
|
ValueError: Si no se encuentra la clave API |
|
""" |
|
api_key = os.environ.get("ANTHROPIC_API_KEY") |
|
if not api_key: |
|
raise ValueError("No se encontr贸 ANTHROPIC_API_KEY en las variables de entorno") |
|
|
|
self.client = anthropic.Anthropic(api_key=api_key) |
|
self.conversation_history = [] |
|
|
|
def process_chat_input(self, message: str, lang_code: str) -> Generator[str, None, None]: |
|
""" |
|
Procesa el input del chat y genera respuestas por chunks |
|
Args: |
|
message: Mensaje del usuario |
|
lang_code: C贸digo del idioma para contexto |
|
Yields: |
|
str: Chunks de la respuesta |
|
""" |
|
try: |
|
|
|
self.conversation_history.append(f"Human: {message}") |
|
|
|
|
|
system_prompt = f"You are an AI assistant for AIdeaText. Respond in {lang_code}." |
|
|
|
|
|
response = self.client.messages.create( |
|
model="claude-3-opus-20240229", |
|
max_tokens=300, |
|
temperature=0.7, |
|
system=system_prompt, |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": message |
|
} |
|
], |
|
stream=True |
|
) |
|
|
|
|
|
full_response = "" |
|
try: |
|
for chunk in response: |
|
if chunk.delta.text: |
|
chunk_text = chunk.delta.text |
|
yield chunk_text |
|
full_response += chunk_text |
|
|
|
|
|
if full_response: |
|
self.conversation_history.append(f"Assistant: {full_response}") |
|
|
|
except Exception as e: |
|
logger.error(f"Error en streaming de respuesta: {str(e)}") |
|
yield f"Error en la comunicaci贸n: {str(e)}" |
|
|
|
except Exception as e: |
|
logger.error(f"Error en process_chat_input: {str(e)}") |
|
yield f"Error: {str(e)}" |
|
|
|
def get_conversation_history(self) -> list: |
|
""" |
|
Retorna el historial de la conversaci贸n |
|
Returns: |
|
list: Lista de mensajes |
|
""" |
|
return self.conversation_history |
|
|
|
def clear_history(self): |
|
""" |
|
Limpia el historial de la conversaci贸n |
|
""" |
|
self.conversation_history = [] |