hqms commited on
Commit
8578816
·
1 Parent(s): 04f526c

initial commit

Browse files
Dockerfile ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1
2
+
3
+ # Comments are provided throughout this file to help you get started.
4
+ # If you need more help, visit the Dockerfile reference guide at
5
+ # https://docs.docker.com/engine/reference/builder/
6
+
7
+ ARG PYTHON_VERSION=3.11.9
8
+ FROM python:${PYTHON_VERSION}-slim as base
9
+
10
+ # Prevents Python from writing pyc files.
11
+ ENV PYTHONDONTWRITEBYTECODE=1
12
+
13
+ # Keeps Python from buffering stdout and stderr to avoid situations where
14
+ # the application crashes without emitting any logs due to buffering.
15
+ ENV PYTHONUNBUFFERED=1
16
+
17
+ WORKDIR /app
18
+
19
+ # Create a non-privileged user that the app will run under.
20
+ # See https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#user
21
+ ARG UID=10001
22
+ RUN adduser \
23
+ --disabled-password \
24
+ --gecos "" \
25
+ --home "/nonexistent" \
26
+ --shell "/sbin/nologin" \
27
+ --no-create-home \
28
+ --uid "${UID}" \
29
+ appuser
30
+
31
+ # Download dependencies as a separate step to take advantage of Docker's caching.
32
+ # Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
33
+ # Leverage a bind mount to requirements.txt to avoid having to copy them into
34
+ # into this layer.
35
+ RUN --mount=type=cache,target=/root/.cache/pip \
36
+ --mount=type=bind,source=requirements.txt,target=requirements.txt \
37
+ python -m pip install -r requirements.txt
38
+
39
+ RUN mkdir /nonexistent && chmod -cR 777 /nonexistent
40
+
41
+ # Switch to the non-privileged user to run the application.
42
+ USER appuser
43
+
44
+ # Copy the source code into the container.
45
+ COPY . .
46
+
47
+ # Expose the port that the application listens on.
48
+ EXPOSE 8001
49
+
50
+ # Run the application.
51
+ CMD uvicorn 'main:app' --host=0.0.0.0 --port=7860
__init__.py ADDED
File without changes
compose.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Comments are provided throughout this file to help you get started.
2
+ # If you need more help, visit the Docker compose reference guide at
3
+ # https://docs.docker.com/compose/compose-file/
4
+
5
+ # Here the instructions define your application as a service called "server".
6
+ # This service is built from the Dockerfile in the current directory.
7
+ # You can add other services your application may depend on here, such as a
8
+ # database or a cache. For examples, see the Awesome Compose repository:
9
+ # https://github.com/docker/awesome-compose
10
+ services:
11
+ server:
12
+ build:
13
+ context: .
14
+ ports:
15
+ - 8001:8001
16
+
17
+ # The commented out section below is an example of how to define a PostgreSQL
18
+ # database that your application can use. `depends_on` tells Docker Compose to
19
+ # start the database before your application. The `db-data` volume persists the
20
+ # database data between container restarts. The `db-password` secret is used
21
+ # to set the database password. You must create `db/password.txt` and add
22
+ # a password of your choosing to it before running `docker compose up`.
23
+ # depends_on:
24
+ # db:
25
+ # condition: service_healthy
26
+ # db:
27
+ # image: postgres
28
+ # restart: always
29
+ # user: postgres
30
+ # secrets:
31
+ # - db-password
32
+ # volumes:
33
+ # - db-data:/var/lib/postgresql/data
34
+ # environment:
35
+ # - POSTGRES_DB=example
36
+ # - POSTGRES_PASSWORD_FILE=/run/secrets/db-password
37
+ # expose:
38
+ # - 5432
39
+ # healthcheck:
40
+ # test: [ "CMD", "pg_isready" ]
41
+ # interval: 10s
42
+ # timeout: 5s
43
+ # retries: 5
44
+ # volumes:
45
+ # db-data:
46
+ # secrets:
47
+ # db-password:
48
+ # file: db/password.txt
49
+
main.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from routers import router
4
+ app = FastAPI()
5
+
6
+ app.include_router(router=router)
7
+
8
+ # Set all CORS enabled origins
9
+ app.add_middleware(
10
+ CORSMiddleware,
11
+ allow_origins=["*"],
12
+ allow_credentials=True,
13
+ allow_methods=["*"],
14
+ allow_headers=["*"],
15
+ expose_headers=["*"],
16
+ )
model/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+ label = {
3
+ "emotion": ["sedih", "marah", "takut", "cinta", "senang", "netral"],
4
+ "sentiment": ["positif", "netral", "negatif"]
5
+ }
model/emotion.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.model import Model
2
+
3
+
4
+ class EmotionAnalysis(Model):
5
+ def __init__(self) -> None:
6
+ self.model_name = "thoriqfy/indobert-emotion-classification"
7
+ self.tasks = "emotion"
8
+ self.load_model(model_name=self.model_name, tasks=self.tasks)
9
+
10
+ def predict(self, sentences):
11
+ outputs = super().predict(sentences, self.tasks)
12
+
13
+ return {
14
+ "result": outputs["label"],
15
+ "score": outputs["score"]
16
+ }
17
+
18
+ emotion = EmotionAnalysis()
model/llm.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from transformers import AutoTokenizer, pipeline, logging
2
+ # from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
3
+
4
+ # model_name_or_path = "asyafiqe/Merak-7B-v3-Mini-Orca-Indo-GPTQ"
5
+ # model_basename = "Merak-7B-v3-Mini-Orca-Indo-GPTQ"
6
+
7
+ # use_triton = False
8
+
9
+ # tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
10
+
11
+ # model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
12
+ # model_basename=model_basename,
13
+ # use_safetensors=True,
14
+ # trust_remote_code=True,
15
+ # device="cuda:0",
16
+ # use_triton=use_triton,
17
+ # quantize_config=None)
18
+
19
+ # def predict(prompt):
20
+ # # prompt = "Buat rencana untuk menghemat listrik di rumah"
21
+ # system_message = "Anda adalah asisten AI. Anda akan diberi tugas. Anda harus menghasilkan jawaban yang rinci dan panjang.\n"
22
+ # prompt_template=f'''SYSTEM: {system_message}
23
+ # USER: {prompt}
24
+ # ASSISTANT: '''
25
+
26
+ # print("\n\n*** Generate:")
27
+
28
+ # input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
29
+ # output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512)
30
+ # print(tokenizer.decode(output[0]))
31
+
32
+ # # Inference can also be done using transformers' pipeline
33
+
34
+ # # Prevent printing spurious transformers error when using pipeline with AutoGPTQ
35
+ # logging.set_verbosity(logging.CRITICAL)
36
+
37
+ # print("*** Pipeline:")
38
+ # pipe = pipeline(
39
+ # "text-generation",
40
+ # model=model,
41
+ # tokenizer=tokenizer,
42
+ # max_new_tokens=512,
43
+ # temperature=0.7,
44
+ # top_p=0.95,
45
+ # repetition_penalty=1.15
46
+ # )
47
+
48
+ # result = pipe(prompt_template)[0]['generated_text']
49
+
50
+ # return result
model/llm_agent.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.tools import llm
2
+ from model.tools.wikipedia import wikipedia_tool
3
+ # from model.tools.sql_tools import sql_tool
4
+ from model.tools.predictor import word_problem_tool
5
+ from langchain.agents.agent_types import AgentType
6
+ from langchain.agents import initialize_agent
7
+
8
+ class LLMAgent(object):
9
+ def __init__(self) -> None:
10
+ self.agent = initialize_agent(
11
+ tools=[wikipedia_tool, word_problem_tool], #sql_tool],
12
+ llm=llm,
13
+ agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
14
+ verbose=False,
15
+ handle_parsing_errors=True,
16
+ max_execution_time=3600, # Set the maximum execution time (in seconds)
17
+ max_iterations=15 # Set the maximum number of iterations
18
+ )
19
+
20
+ def prompt(self, text):
21
+ result = self.agent.invoke(text)
22
+
23
+ return result
24
+
25
+ llm_agent = LLMAgent()
model/model.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ast import literal_eval
2
+ import torch
3
+ from transformers import pipeline
4
+ from transformers import AutoTokenizer, AutoModelForTokenClassification, AutoModelForSequenceClassification
5
+ from transformers import BertForSequenceClassification, BertTokenizer, BertConfig
6
+ from math import exp
7
+ from . import label
8
+
9
+
10
+ class Model(object):
11
+ def __init__(self) -> None:
12
+ self.model_name = "indolem/indobert-base-uncased"
13
+ self.tokenizer = None
14
+ self.model = None
15
+ self.config = None
16
+
17
+ def load_model(self, model_name: str = None, tasks: str = None):
18
+ print(model_name)
19
+ if tasks == "emotion":
20
+ self.config = BertConfig.from_pretrained(model_name)
21
+
22
+ self.tokenizer = BertTokenizer.from_pretrained(model_name) \
23
+ if tasks == "emotion" else \
24
+ AutoTokenizer.from_pretrained(model_name)
25
+
26
+ if tasks == "emotion":
27
+ self.model = BertForSequenceClassification.from_pretrained(model_name, config=self.config)
28
+ elif tasks == "ner":
29
+ self.model = AutoModelForTokenClassification.from_pretrained(model_name)
30
+ else:
31
+ self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
32
+
33
+ def predict(self, sentences, tasks: str = None):
34
+ encoded_input = self.tokenizer(sentences,
35
+ return_tensors="pt",
36
+ padding=True,
37
+ truncation=True)
38
+
39
+ with torch.no_grad():
40
+ if tasks in ["emotion", "sentiment"]:
41
+ outputs = self.model(**encoded_input)
42
+ predicted_class = torch.argmax(outputs.logits, dim=1).item()
43
+ logits = outputs.logits.numpy()
44
+ probability = [exp(output)/(1+exp(output)) for output in logits[0]]
45
+ else:
46
+ recognizer = pipeline("token-classification", model=self.model, tokenizer=self.tokenizer)
47
+ outputs = recognizer(sentences)
48
+
49
+ if tasks in ["emotion", "sentiment"]:
50
+ result = {"label": label[tasks][predicted_class],
51
+ "score": probability[predicted_class]}
52
+ elif tasks == "ner":
53
+ result = []
54
+ for output in outputs:
55
+ result.append(
56
+ {
57
+ "entity": output["entity"],
58
+ "score": float(output["score"]),
59
+ "index": int(output["index"]),
60
+ "word": output["word"],
61
+ "start": int(output["start"]),
62
+ "end": int(output["end"])
63
+ }
64
+ )
65
+ else:
66
+ result = ""
67
+
68
+ return result
model/ner.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.model import Model
2
+
3
+ class NER(Model):
4
+ def __init__(self) -> None:
5
+ self.model_name = None
6
+ self.tasks = "ner"
7
+ self.load_model(model_name="syafiqfaray/indobert-model-ner", tasks=self.tasks)
8
+
9
+ def predict(self, sentences):
10
+ outputs = super().predict(sentences, self.tasks)
11
+ return {"result": outputs}
12
+
13
+ name_entity = NER()
model/pos_tagging.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.model import Model
2
+
3
+ class POSTagging(Model):
4
+ def __init__(self) -> None:
5
+ self.model_name = None
6
+ self.tasks = "pos-tagging"
7
+ self.load_model(model_name="w11wo/indonesian-roberta-base-posp-tagger", tasks=self.tasks)
8
+
9
+ def predict(self, sentences):
10
+ outputs = super().predict(sentences, self.tasks)
11
+ return {"result": outputs}
12
+
13
+ pos_tagging = POSTagging()
model/sentiment.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.model import Model
2
+
3
+ class SentimentAnalysis(Model):
4
+ def __init__(self) -> None:
5
+ self.model_name = None
6
+ self.tasks = "sentiment"
7
+ self.load_model(model_name="crypter70/IndoBERT-Sentiment-Analysis" , tasks=self.tasks)
8
+
9
+ def predict(self, sentences):
10
+ outputs = super().predict(sentences, self.tasks)
11
+
12
+ return {
13
+ "result": outputs["label"],
14
+ "score": outputs["score"]
15
+ }
16
+
17
+ sentiment = SentimentAnalysis()
model/tools/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from langchain.chains import LLMChain
2
+ from langchain.agents import Tool
3
+ from langchain import HuggingFaceHub
4
+
5
+ llm = HuggingFaceHub(repo_id = "microsoft/Phi-3-mini-128k-instruct")
model/tools/predictor.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import Tool, LLMChain, llm
2
+ from langchain.memory import ConversationBufferMemory
3
+ from langchain_core.messages import SystemMessage
4
+ from langchain_core.prompts.chat import (
5
+ ChatPromptTemplate,
6
+ HumanMessagePromptTemplate,
7
+ MessagesPlaceholder,
8
+ )
9
+
10
+ system_message = "You are AI Assistant. You need to give crystal clear answer.\n"
11
+ template_messages = [
12
+ SystemMessage(content=system_message),
13
+ MessagesPlaceholder(variable_name="chat_history"),
14
+ HumanMessagePromptTemplate.from_template("{text}"),
15
+ ]
16
+ prompt_template = ChatPromptTemplate.from_messages(template_messages)
17
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
18
+
19
+ word_problem_chain = LLMChain(llm=llm, prompt=prompt_template, memory=memory)
20
+ word_problem_tool = Tool.from_function(name="Reasoning Tool", \
21
+ func=word_problem_chain.run, \
22
+ description="Useful for when you need to answer logic-based/reasoning \
23
+ questions.",
24
+ )
model/tools/sql_tools.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from llama_index.core.tools import QueryEngineTool
2
+ # from sqlalchemy import create_engine
3
+
4
+
5
+ # username = "test"
6
+ # password = "test"
7
+ # host = "localhost"
8
+ # port = "5432"
9
+ # mydatabase = "database"
10
+ # # Initialize your query engine (replace query_engine with your actual query engine)
11
+ # pg_uri = f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{mydatabase}"
12
+ # engine = create_engine(pg_uri)
13
+
14
+ # # Create a tool configuration
15
+ # sql_tool = QueryEngineTool.from_defaults(
16
+ # engine,
17
+ # name="SQLTool",
18
+ # description="Searching from DB",
19
+ # return_direct=True,
20
+ # )
model/tools/wikipedia.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import Tool
2
+ from langchain_community.utilities import WikipediaAPIWrapper
3
+
4
+ wikipedia = WikipediaAPIWrapper()
5
+ # Wikipedia Tool
6
+ wikipedia_tool = Tool(
7
+ name="Wikipedia",
8
+ func=wikipedia.run,
9
+ description="A useful tool for searching the Internet to find information on world events, issues, dates, "
10
+ "years, etc. Worth using for general topics. Use precise questions.",
11
+ )
model/topic_modeling.py ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.30.1
2
+ aiohttp==3.9.5
3
+ aiosignal==1.3.1
4
+ annotated-types==0.6.0
5
+ anyio==4.3.0
6
+ attrs==23.2.0
7
+ auto-gptq==0.7.1
8
+ beautifulsoup4==4.12.3
9
+ certifi==2024.2.2
10
+ charset-normalizer==3.3.2
11
+ click==8.1.7
12
+ dataclasses-json==0.6.6
13
+ datasets==2.19.1
14
+ Deprecated==1.2.14
15
+ dill==0.3.8
16
+ dirtyjson==1.0.8
17
+ distro==1.9.0
18
+ dnspython==2.6.1
19
+ email_validator==2.1.1
20
+ fastapi==0.111.0
21
+ fastapi-cli==0.0.3
22
+ filelock==3.14.0
23
+ frozenlist==1.4.1
24
+ fsspec==2024.3.1
25
+ gekko==1.1.1
26
+ greenlet==3.0.3
27
+ h11==0.14.0
28
+ httpcore==1.0.5
29
+ httptools==0.6.1
30
+ httpx==0.27.0
31
+ huggingface-hub==0.23.0
32
+ idna==3.7
33
+ Jinja2==3.1.4
34
+ joblib==1.4.2
35
+ jsonpatch==1.33
36
+ jsonpointer==2.4
37
+ langchain==0.1.20
38
+ langchain-community==0.0.38
39
+ langchain-core==0.1.52
40
+ langchain-text-splitters==0.0.1
41
+ langsmith==0.1.57
42
+ llama-index==0.10.36
43
+ llama-index-agent-openai==0.2.4
44
+ llama-index-cli==0.1.12
45
+ llama-index-core==0.10.36
46
+ llama-index-embeddings-openai==0.1.9
47
+ llama-index-indices-managed-llama-cloud==0.1.6
48
+ llama-index-legacy==0.9.48
49
+ llama-index-llms-openai==0.1.18
50
+ llama-index-multi-modal-llms-openai==0.1.5
51
+ llama-index-program-openai==0.1.6
52
+ llama-index-question-gen-openai==0.1.3
53
+ llama-index-readers-file==0.1.22
54
+ llama-index-readers-llama-parse==0.1.4
55
+ llama-parse==0.4.2
56
+ llamaindex-py-client==0.1.19
57
+ markdown-it-py==3.0.0
58
+ MarkupSafe==2.1.5
59
+ marshmallow==3.21.2
60
+ mdurl==0.1.2
61
+ mpmath==1.3.0
62
+ multidict==6.0.5
63
+ multiprocess==0.70.16
64
+ mypy-extensions==1.0.0
65
+ nest-asyncio==1.6.0
66
+ networkx==3.3
67
+ nltk==3.8.1
68
+ numpy==1.26.4
69
+ openai==1.28.1
70
+ orjson==3.10.3
71
+ packaging==23.2
72
+ pandas==2.2.2
73
+ peft==0.10.0
74
+ pillow==10.3.0
75
+ psutil==5.9.8
76
+ pyarrow==16.0.0
77
+ pyarrow-hotfix==0.6
78
+ pydantic==2.7.1
79
+ pydantic_core==2.18.2
80
+ Pygments==2.18.0
81
+ pypdf==4.2.0
82
+ python-dateutil==2.9.0.post0
83
+ python-dotenv==1.0.1
84
+ python-multipart==0.0.9
85
+ pytz==2024.1
86
+ PyYAML==6.0.1
87
+ regex==2024.5.10
88
+ requests==2.31.0
89
+ rich==13.7.1
90
+ rouge==1.0.1
91
+ safetensors==0.4.3
92
+ sentencepiece==0.2.0
93
+ shellingham==1.5.4
94
+ six==1.16.0
95
+ sniffio==1.3.1
96
+ soupsieve==2.5
97
+ SQLAlchemy==2.0.30
98
+ starlette==0.37.2
99
+ striprtf==0.0.26
100
+ sympy==1.12
101
+ tenacity==8.3.0
102
+ tiktoken==0.6.0
103
+ tokenizers==0.19.1
104
+ torch==2.3.0
105
+ tqdm==4.66.4
106
+ transformers==4.40.2
107
+ typer==0.12.3
108
+ typing-inspect==0.9.0
109
+ typing_extensions==4.11.0
110
+ tzdata==2024.1
111
+ ujson==5.9.0
112
+ urllib3==2.2.1
113
+ uvicorn==0.29.0
114
+ uvloop==0.19.0
115
+ watchfiles==0.21.0
116
+ websockets==12.0
117
+ wikipedia==1.4.0
118
+ wrapt==1.16.0
119
+ xxhash==3.4.1
120
+ yarl==1.9.4
routers/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .emotion import emotion_analysis
2
+ from .ner import ner
3
+ from .prompt import llm_prompt
4
+ from .sentiment import sentiment_analysis
5
+ from .pos_tagging import pos
6
+ from .topic_modeling import topic_modeling
7
+ from fastapi import APIRouter
8
+
9
+ router = APIRouter()
10
+ router.add_api_route("/emotion-analysis", emotion_analysis.predict, methods=["POST"])
11
+ router.add_api_route("/ner", ner.predict, methods=["POST"])
12
+ router.add_api_route("/pos-tagging", pos.predict, methods=["POST"])
13
+ router.add_api_route("/prompt", llm_prompt.prompt, methods=["POST"])
14
+ router.add_api_route("/sentiment-analysis", sentiment_analysis.predict, methods=["POST"])
15
+ # router.add_api_route("/topic-modeling", topic_modeling.predict, methods=["POST"])
routers/emotion.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.emotion import emotion
2
+
3
+ class EmotionAnalysis(object):
4
+ def __init__(self):
5
+ pass
6
+
7
+ def predict(self, req: dict):
8
+ text = req.get("text")
9
+ result = emotion.predict(text)
10
+
11
+ return result
12
+
13
+ emotion_analysis = EmotionAnalysis()
routers/ner.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.ner import name_entity
2
+
3
+ class NER(object):
4
+ def __init__(self):
5
+ pass
6
+
7
+ def predict(self, req: dict):
8
+ text = req.get("text")
9
+ result = name_entity.predict(text)
10
+
11
+ return result
12
+
13
+ ner = NER()
routers/pos_tagging.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.pos_tagging import pos_tagging
2
+
3
+ class POSTagging(object):
4
+ def __init__(self):
5
+ pass
6
+
7
+ def predict(self, req: dict):
8
+ text = req.get("text")
9
+ result = pos.predict(text)
10
+
11
+ return result
12
+
13
+ pos = POSTagging()
routers/prompt.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.llm_agent import llm_agent
2
+
3
+
4
+ class LLMPrompt(object):
5
+ def __init__(self):
6
+ pass
7
+
8
+ def prompt(self, req: dict):
9
+ text = req.get("text")
10
+ result = llm_agent.prompt(text)
11
+
12
+ return {"result": result}
13
+
14
+ llm_prompt = LLMPrompt()
routers/sentiment.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from model.sentiment import sentiment
2
+
3
+ class SentimentAnalysis(object):
4
+ def __init__(self):
5
+ pass
6
+
7
+ def predict(self, req: dict):
8
+ text = req.get("text")
9
+ result = sentiment.predict(text)
10
+
11
+ return result
12
+
13
+ sentiment_analysis = SentimentAnalysis()
routers/topic_modeling.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ class TopicModeling(object):
2
+ def __init__(self):
3
+ pass
4
+
5
+ def predict(self, req: dict):
6
+ text = req.get("text")
7
+
8
+ return {"result": "Jakarta", "score": 0.0}
9
+
10
+ topic_modeling = TopicModeling()