File size: 2,139 Bytes
646e683
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import logging
import random

from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
from core.model_runtime.errors.invoke import InvokeBadRequestError
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
from extensions.ext_hosting_provider import hosting_configuration
from models.provider import ProviderType

logger = logging.getLogger(__name__)


def check_moderation(model_config: ModelConfigWithCredentialsEntity, text: str) -> bool:
    moderation_config = hosting_configuration.moderation_config
    if (
        moderation_config
        and moderation_config.enabled is True
        and "openai" in hosting_configuration.provider_map
        and hosting_configuration.provider_map["openai"].enabled is True
    ):
        using_provider_type = model_config.provider_model_bundle.configuration.using_provider_type
        provider_name = model_config.provider
        if using_provider_type == ProviderType.SYSTEM and provider_name in moderation_config.providers:
            hosting_openai_config = hosting_configuration.provider_map["openai"]
            assert hosting_openai_config is not None

            # 2000 text per chunk
            length = 2000
            text_chunks = [text[i : i + length] for i in range(0, len(text), length)]

            if len(text_chunks) == 0:
                return True

            text_chunk = random.choice(text_chunks)

            try:
                model_type_instance = OpenAIModerationModel()
                # FIXME, for type hint using assert or raise ValueError is better here?
                moderation_result = model_type_instance.invoke(
                    model="text-moderation-stable", credentials=hosting_openai_config.credentials or {}, text=text_chunk
                )

                if moderation_result is True:
                    return True
            except Exception as ex:
                logger.exception(f"Fails to check moderation, provider_name: {provider_name}")
                raise InvokeBadRequestError("Rate limit exceeded, please try again later.")

    return False