Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	File size: 3,378 Bytes
			
			| 4f61861 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | import requests
import json
from typing import Generator, Optional
import os
from dotenv import load_dotenv
import re
from fastapi import FastAPI, HTTPException, Query
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
# Load environment variables from .env file
load_dotenv()
app = FastAPI()
class ChatRequest(BaseModel):
    user_prompt: str
    system_prompt: Optional[str] = "You are a helpful AI assistant."
class CHATv1:
    """
    A class to interact with the CHATv1.info API.
    """
    def __init__(
        self,
        timeout: int = 300,
        proxies: dict = {},
    ):
        """
        Initializes the CHATv1.info API with given parameters.
        
        Args:
            timeout (int, optional): Http request timeout. Defaults to 300.
            proxies (dict, optional): Http request proxies. Defaults to {}.
        """
        self.session = requests.Session()
        self.api_endpoint = os.getenv("CHATv1")
        self.timeout = timeout
        self.headers = {
            "content-type": "application/json",
        }
        self.session.headers.update(self.headers)
        self.session.proxies = proxies
    def ask(self, user_prompt: str, system_prompt: str) -> Generator[str, None, None]:
        """
        Chat with AI
        Args:
            user_prompt (str): User's prompt to be sent.
            system_prompt (str): System prompt to set the AI's behavior.
        Yields:
            str: Incremental text responses.
        """
        payload = {
            "messages": [
                {
                    "role": "system",
                    "content": system_prompt
                },
                {
                    "role": "user",
                    "content": user_prompt
                }
            ]
        }
        response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
        
        if not response.ok:
            raise HTTPException(
                status_code=response.status_code,
                detail=f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
            )
        buffer = ""
        for line in response.iter_lines(decode_unicode=True):
            if line:
                if line.startswith("data: "):
                    data_str = line[6:]
                    try:
                        data_json = json.loads(data_str)
                        content = data_json.get("data", "")
                        if content:
                            buffer += content
                            lines = buffer.split('\n')
                            if len(lines) > 1:
                                for complete_line in lines[:-1]:
                                    yield self.format_text(complete_line) + '\n'
                                buffer = lines[-1]
                    except json.JSONDecodeError:
                        pass
        if buffer:
            yield self.format_text(buffer)
        
        yield "[DONE]"
    def format_text(self, text: str) -> str:
        text = re.sub(r'\*(.*?)\*', r'<i>\1</i>', text)
        return text
    def chat(self, user_prompt: str, system_prompt: str) -> Generator[str, None, None]:
        """Stream responses as string chunks"""
        return self.ask(user_prompt, system_prompt)
 | 
