avfranco commited on
Commit
36e511f
·
1 Parent(s): 0484f6a

ea4all-gradio-mcp-hackathon-submission

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +179 -0
  2. Dockerfile +22 -0
  3. README.md +116 -7
  4. app.py +32 -0
  5. ea4all/.gitattributes +1 -0
  6. ea4all/__main__.py +21 -0
  7. ea4all/ea4all_mcp.py +386 -0
  8. ea4all/ea4all_store/APM-ea4all (test-split).xlsx +0 -0
  9. ea4all/ea4all_store/apm_qna_mock.txt +4 -0
  10. ea4all/ea4all_store/dbr.txt +32 -0
  11. ea4all/ea4all_store/ea4all-portfolio-management.csv +31 -0
  12. ea4all/ea4all_store/ea4all_overview.txt +36 -0
  13. ea4all/ea4all_store/pmo_qna_mock.txt +3 -0
  14. ea4all/ea4all_store/reference_architecture_dbr_assistant.txt +9 -0
  15. ea4all/ea4all_store/reference_architecture_dbr_demo.txt +43 -0
  16. ea4all/ea4all_store/strategic_principles.txt +40 -0
  17. ea4all/main.py +6 -0
  18. ea4all/packages.txt +1 -0
  19. ea4all/src/__init__.py +4 -0
  20. ea4all/src/ea4all_apm/configuration.py +35 -0
  21. ea4all/src/ea4all_apm/graph.py +906 -0
  22. ea4all/src/ea4all_apm/prompts.py +292 -0
  23. ea4all/src/ea4all_apm/state.py +59 -0
  24. ea4all/src/ea4all_gra/configuration.py +46 -0
  25. ea4all/src/ea4all_gra/data.py +131 -0
  26. ea4all/src/ea4all_gra/graph.py +410 -0
  27. ea4all/src/ea4all_gra/state.py +85 -0
  28. ea4all/src/ea4all_gra/togaf_task1/graph.py +125 -0
  29. ea4all/src/ea4all_gra/togaf_task1/state.py +53 -0
  30. ea4all/src/ea4all_gra/togaf_task2/graph.py +457 -0
  31. ea4all/src/ea4all_gra/togaf_task2/state.py +50 -0
  32. ea4all/src/ea4all_gra/togaf_task3/graph.py +246 -0
  33. ea4all/src/ea4all_gra/togaf_task3/state.py +65 -0
  34. ea4all/src/ea4all_gra/utils.py +125 -0
  35. ea4all/src/ea4all_indexer/__init__.py +5 -0
  36. ea4all/src/ea4all_indexer/configuration.py +22 -0
  37. ea4all/src/ea4all_indexer/graph.py +57 -0
  38. ea4all/src/ea4all_indexer/state.py +44 -0
  39. ea4all/src/ea4all_vqa/configuration.py +42 -0
  40. ea4all/src/ea4all_vqa/graph.py +401 -0
  41. ea4all/src/ea4all_vqa/state.py +64 -0
  42. ea4all/src/graph.py +254 -0
  43. ea4all/src/shared/__init__.py +1 -0
  44. ea4all/src/shared/configuration.py +165 -0
  45. ea4all/src/shared/prompts.py +393 -0
  46. ea4all/src/shared/state.py +84 -0
  47. ea4all/src/shared/utils.py +487 -0
  48. ea4all/src/shared/vectorstore.py +196 -0
  49. ea4all/src/tools/tools.py +111 -0
  50. ea4all/utils/utils.py +182 -0
.gitignore ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+ .DS_Store
9
+
10
+ # Projects hosted @Hugging Face
11
+ ../ea4all-agentic-live/ # EA4ALL Agentic Live
12
+ ../ea4all-agentic-system/ # EA4ALL Agentic System
13
+ ../ea4all-agentic-staging/ # EA4ALL Agentic Build/Test
14
+
15
+ # EA4ALL artifacts
16
+ *.wav
17
+ *.png
18
+ *.faiss
19
+ *.pkl
20
+ togaf_runway_*
21
+
22
+ # Langchain / Langgraph
23
+ .langgraph_api/
24
+ lgs-dev-start
25
+
26
+
27
+ # Distribution / packaging
28
+ .Python
29
+ build/
30
+ develop-eggs/
31
+ dist/
32
+ downloads/
33
+ eggs/
34
+ .eggs/
35
+ lib/
36
+ lib64/
37
+ parts/
38
+ sdist/
39
+ var/
40
+ wheels/
41
+ share/python-wheels/
42
+ *.egg-info/
43
+ .installed.cfg
44
+ *.egg
45
+ MANIFEST
46
+
47
+ # PyInstaller
48
+ # Usually these files are written by a python script from a template
49
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
50
+ *.manifest
51
+ *.spec
52
+
53
+ # Installer logs
54
+ pip-log.txt
55
+ pip-delete-this-directory.txt
56
+
57
+ # Unit test / coverage reports
58
+ htmlcov/
59
+ .tox/
60
+ .nox/
61
+ .coverage
62
+ .coverage.*
63
+ .cache
64
+ nosetests.xml
65
+ coverage.xml
66
+ *.cover
67
+ *.py,cover
68
+ .hypothesis/
69
+ .pytest_cache/
70
+ cover/
71
+
72
+ # Translations
73
+ *.mo
74
+ *.pot
75
+
76
+ # Django stuff:
77
+ *.log
78
+ local_settings.py
79
+ db.sqlite3
80
+ db.sqlite3-journal
81
+
82
+ # Flask stuff:
83
+ instance/
84
+ .webassets-cache
85
+
86
+ # Scrapy stuff:
87
+ .scrapy
88
+
89
+ # Sphinx documentation
90
+ docs/_build/
91
+
92
+ # PyBuilder
93
+ .pybuilder/
94
+ target/
95
+
96
+ # Jupyter Notebook
97
+ .ipynb_checkpoints
98
+
99
+ # IPython
100
+ profile_default/
101
+ ipython_config.py
102
+
103
+ # pyenv
104
+ # For a library or package, you might want to ignore these files since the code is
105
+ # intended to run in multiple environments; otherwise, check them in:
106
+ # .python-version
107
+
108
+ # pipenv
109
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
110
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
111
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
112
+ # install all needed dependencies.
113
+ #Pipfile.lock
114
+
115
+ # poetry
116
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
117
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
118
+ # commonly ignored for libraries.
119
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
120
+ #poetry.lock
121
+
122
+ # pdm
123
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
124
+ #pdm.lock
125
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
126
+ # in version control.
127
+ # https://pdm.fming.dev/#use-with-ide
128
+ .pdm.toml
129
+
130
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
131
+ __pypackages__/
132
+
133
+ # Celery stuff
134
+ celerybeat-schedule
135
+ celerybeat.pid
136
+
137
+ # SageMath parsed files
138
+ *.sage.py
139
+
140
+ # Environments
141
+ .envrc
142
+ .env
143
+ .venv
144
+ env/
145
+ venv/
146
+ ENV/
147
+ env.bak/
148
+ venv.bak/
149
+
150
+ # Spyder project settings
151
+ .spyderproject
152
+ .spyproject
153
+
154
+ # Rope project settings
155
+ .ropeproject
156
+
157
+ # mkdocs documentation
158
+ /site
159
+
160
+ # mypy
161
+ .mypy_cache/
162
+ .dmypy.json
163
+ dmypy.json
164
+
165
+ # Pyre type checker
166
+ .pyre/
167
+
168
+ # pytype static type analyzer
169
+ .pytype/
170
+
171
+ # Cython debug symbols
172
+ cython_debug/
173
+
174
+ # PyCharm
175
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
176
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
177
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
178
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
179
+ #.idea/
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12.10
2
+
3
+ # Set up a new user named "user" with user ID 1000
4
+ RUN useradd -m -u 1000 user
5
+
6
+ # Switch to the "user" user
7
+ USER user
8
+
9
+ # Set home to the user's home directory
10
+ ENV HOME=/home/user \
11
+ PATH=/home/user/.local/bin:/tmp/gradio:$PATH
12
+
13
+ # Set the working directory to the user's home directory
14
+ WORKDIR $HOME
15
+
16
+ # Install graphviz dependency
17
+ USER root
18
+ RUN apt-get clean
19
+ RUN apt-get update --fix-missing \
20
+ && xargs -a packages.txt apt-get install -y \
21
+ && apt-get clean
22
+ USER user
README.md CHANGED
@@ -1,14 +1,123 @@
1
  ---
2
- title: Enterprise Architecture For All
3
- emoji: 🐨
4
- colorFrom: yellow
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.33.1
 
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
  short_description: EA4ALL Gradio MCP Server
 
 
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Enterprise Architecture for All
3
+ emoji: 👁
4
+ colorFrom: green
5
+ colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 5.32.1
8
+ python_version: 3.12.10
9
  app_file: app.py
10
  pinned: false
11
+ license: apache-2.0
12
  short_description: EA4ALL Gradio MCP Server
13
+ tags:
14
+ - mcp-server-track
15
  ---
16
 
17
+ ## Architect Agentic Companion
18
+
19
+ ![Agent System Container](ea4all/images/ea4all_architecture.png)
20
+
21
+
22
+ ## Background
23
+
24
+ - `Trigger`: How disruptive may Generative AI be for Enterprise Architecture Capability (People, Process and Tools)?
25
+ - `Motivation`: Master GenAI while disrupting Enterprise Architecture to empower individuals and organisations with ability to harness EA value and make people lives better, safer and more efficient.
26
+ - `Ability`: Exploit my carrer background and skillset across system development, business accumen, innovation and architecture to accelerate GenAI exploration while learning new things.
27
+
28
+ > That's how the `EA4ALL-Agentic system` was born and ever since continuously evolving to build an ecosystem of **Architects Agent partners**.
29
+
30
+ ## Benefits
31
+
32
+ - `Empower individuals with Knowledge`: understand and talk about Business and Technology strategy, IT landscape, Architectue Artefacts in a single click of button.
33
+ - `Increase efficiency and productivity`: generate a documented architecture with diagram, model and descriptions. Accelerate Business Requirement identification and translation to Target Reference Architecture. Automated steps and reduced times for task execution.
34
+ - `Improve agility`: plan, execute, review and iterate over EA inputs and outputs. Increase the ability to adapt, transform and execute at pace and scale in response to changes in strategy, threats and opportunities.
35
+ - `Increase collaboration`: democratise architecture work and knowledge with anyone using natural language.
36
+ - `Cost optimisation`: intelligent allocation of architects time for valuable business tasks.
37
+ - `Business Growth`: create / re-use of (new) products and services, and people experience enhancements.
38
+ - `Resilience`: assess solution are secured by design, poses any risk and how to mitigate, apply best-practices.
39
+ - `Streamline`: the process of managing and utilizsng architectural knowledge and tools in a user-friendly way.
40
+
41
+ ## Knowledge context
42
+
43
+ Synthetic datasets are used to exemplify the Agentic System capabilities.
44
+
45
+ ### IT Landscape Question and Answering
46
+
47
+ - Application name
48
+ - Business fit: appropriate, inadequate, perfect
49
+ - Technical fit: adequate, insufficient, perfect
50
+ - Business_criticality: operational, medium, high, critical
51
+ - Roadmap: maintain, invest, divers
52
+ - Architect responsible
53
+ - Hosting: user device, on-premise, IaaS, SaaS
54
+ - Business capability
55
+ - Business domain
56
+ - Description
57
+
58
+ - Bring Your Own Data: upload your own IT landscape data
59
+ - Application Portfolio Management
60
+ - xlsx tabular format
61
+ - first row (header) with fields name (colums)
62
+
63
+ ### Architecture Diagram Visual Question and Answering
64
+
65
+ - Architecture Visual Artefacts
66
+ - jpeg, png
67
+
68
+ **Disclaimer**
69
+ - Your data & image are not accessible or shared with anyone else nor used for training purpose.
70
+ - EA4ALL-VQA Agent should be used ONLY FOR Architecture Diagram images.
71
+ - This feature should NOT BE USED to process inappropriate content.
72
+
73
+ ### Reference Architecture Generation
74
+
75
+ - Clock in/out Use-case
76
+
77
+ ## Log / Traceability
78
+
79
+ For purpose of continuous improvement, agentic workflows are logged in.
80
+
81
+ ## Architecture
82
+
83
+ <italic>Core architecture built upon Python, Langchain, Langgraph, Langsmith, and Gradio.<italic>
84
+
85
+ - Python
86
+ - Pandas
87
+ - Langchain
88
+ - Langgraph
89
+ - Huggingface
90
+ - CrewAI
91
+
92
+ - RAG (Retrieval Augmented Generation)
93
+ - Vectorstore
94
+
95
+ - Prompt Engineering
96
+ - Strategy & tactics: Task / Sub-tasks
97
+ - Agentic Workflow
98
+
99
+ - Models:
100
+ - OpenAI
101
+ - Meta/Llama
102
+ - Google Gemini
103
+
104
+ - Hierarchical-Agent-Teams:
105
+ - Tabular-question-answering over your own document
106
+ - Supervisor
107
+ - Visual Questions Answering
108
+ - Diagram Component Analysis
109
+ - Risk & Vulnerability and Mitigation options
110
+ - Well-Architecture Design Assessment
111
+ - Vision and Target Architecture
112
+ - Architect Demand Management
113
+
114
+ - User Interface
115
+ - Gradio
116
+
117
+ - Observability & Evaluation
118
+ - Langsmith
119
+
120
+ - Hosting
121
+ - Huggingface Space
122
+
123
+ Check out the configuration reference at [spaces-config-reference](https://huggingface.co/docs/hub/spaces-config-reference)
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #import gradio as gr
2
+
3
+ #def greet(name):
4
+ # return "Hello " + name + "!!"
5
+
6
+ #demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ #demo.launch()
8
+
9
+ ##version 2025-05-17
10
+ # LangChain environment variables
11
+ from pathlib import Path
12
+ import sys, os
13
+
14
+ if __name__ == '__main__':
15
+
16
+ current_path = Path.cwd()
17
+ sys.path.append(os.path.join(str(current_path), 'ea4all', 'src'))
18
+
19
+ print (f"Current path: {current_path} \n Parent {current_path.parent} \n Root path: {str(Path.cwd())}")
20
+
21
+ #Set environment variables for build deployment (local run)
22
+ ea4all_stage = os.environ["EA4ALL_ENV"]
23
+ if ea4all_stage in ('MCP'):
24
+ project_name = "ea4all-gradio-agent-mcp-hackathon"
25
+ runname = "ea4all-gradio-agent-mcp-hackathon-run"
26
+ os.environ["LANGCHAIN_PROJECT"] = project_name # Optional: "default" is used if not set
27
+ os.environ['LANGCHAIN_RUNNAME'] = runname
28
+ os.environ['EA4ALL_ENV'] = ea4all_stage
29
+
30
+ #ea4all-agent-entry-point
31
+ from ea4all.__main__ import main
32
+ main()
ea4all/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.png filter=lfs diff=lfs merge=lfs -text
ea4all/__main__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ea4all import ea4all_mcp as e4m
2
+ import os
3
+
4
+ def main() -> None:
5
+ #Launch UI
6
+ try:
7
+ e4m.ea4all_mcp.launch(
8
+ server_name=os.getenv("GRADIO_SERVER_NAME","0.0.0.0"),
9
+ server_port=None,
10
+ debug=os.getenv("GRADIO_DEBUG", "True").lower() in ("true", "1", "yes"),
11
+ ssr_mode=False,
12
+ mcp_server=True,
13
+ inbrowser=os.getenv("GRADIO_INBROWSER", "True").lower() in ("true", "1", "yes"),
14
+ #auth=("ea4all", "ea4a@@"),
15
+ auth_message="Please login with your credentials. Under development, will be public soon.",
16
+ )
17
+ except Exception as e:
18
+ print(f"Error loading: {e}")
19
+
20
+ if __name__ == "__main__":
21
+ main()
ea4all/ea4all_mcp.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #CHANGELOG: 2025-06-04
2
+ ## Gradio Agents MCP Hackathon: retrofit to expose EA4ALL Agentic System Agents only
3
+ ## Greetings message not working
4
+ ## UI exposing too much tools, need to be refactored
5
+ from langchain.callbacks.tracers import LangChainTracer
6
+ from langchain.callbacks.tracers.langchain import wait_for_all_tracers
7
+ from langchain_core.messages import HumanMessage
8
+ from langchain_core.runnables import RunnableConfig
9
+
10
+ from ea4all.src.shared.configuration import BaseConfiguration, APM_MOCK_QNA, PMO_MOCK_QNA
11
+ from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
12
+ from ea4all.src.ea4all_apm.graph import apm_graph
13
+ from ea4all.src.ea4all_vqa.graph import diagram_graph
14
+ from ea4all.src.ea4all_gra.graph import togaf_graph
15
+ from ea4all.src.ea4all_indexer.graph import indexer_graph
16
+ from ea4all.src.shared.utils import (
17
+ get_relevant_questions,
18
+ get_vqa_examples,
19
+ _join_paths,
20
+ EA4ALL_ARCHITECTURE,
21
+ EA4ALL_PODCAST,
22
+ )
23
+
24
+ #from ea4all.src.pmo_crew.crew_runner import run_pmo_crew
25
+
26
+ from typing import AsyncGenerator
27
+ import gradio as gr
28
+ from gradio import ChatMessage
29
+ import os
30
+ import uuid
31
+ import time
32
+ from PIL import Image
33
+
34
+ from ea4all.utils.utils import (
35
+ UIUtils,
36
+ ea4all_agent_init, get_image,
37
+ get_question_diagram_from_example,
38
+ on_image_update
39
+ )
40
+
41
+ TITLE = """
42
+ # Title
43
+
44
+ **Explore, Share, Together:** harness the value of `Enterprise Architecture in the era of Generative AI` with ready-to-use MCP Tools.\n
45
+
46
+ ## Overview
47
+ """
48
+
49
+ #Set LangSmith project
50
+ tracer = LangChainTracer(project_name=os.getenv('LANGCHAIN_PROJECT'))
51
+
52
+ config = RunnableConfig(
53
+ run_name = os.getenv('LANGCHAIN_RUNNAME', "ea4all-gradio-agent-mcp-hackathon-run"),
54
+ tags = [os.getenv('EA4ALL_ENV', "MCP")],
55
+ callbacks = [tracer],
56
+ recursion_limit = 25,
57
+ configurable = {"thread_id": uuid.uuid4()},
58
+ #stream_mode = "messages"
59
+ )
60
+
61
+ async def call_indexer_apm(config: RunnableConfig):
62
+ response = await indexer_graph.ainvoke(input={"docs":[]}, config=config)
63
+ return response
64
+
65
+ #ea4all-qna-agent-conversational-with-memory
66
+ async def run_qna_agentic_system(question: str) -> AsyncGenerator[list, None]:
67
+ """
68
+ description:
69
+ Handles conversational Q&A for the Application Landscape using an agentic system.
70
+ Args:
71
+ question (str): The user's question or message.
72
+ request (gr.Request): The Gradio request object for user identification.
73
+ Returns:
74
+ reponse: Response to user's architectural question.
75
+ """
76
+
77
+ format_response = ""
78
+ chat_memory = []
79
+ if not question:
80
+ format_response = "Hi, how are you today? To start using the EA4ALL MCP Tool, provide the required Inputs!"
81
+ chat_memory.append(ChatMessage(role="assistant", content=format_response))
82
+ else:
83
+ index = await call_indexer_apm(config) #call indexer to update the index
84
+ response = await apm_graph.ainvoke({"question": question}, config=config)
85
+ chat_memory.append(ChatMessage(role="assistant", content=response['generation']))
86
+
87
+ yield chat_memory
88
+
89
+ #Trigger Solution Architecture Diagram QnA
90
+ async def run_vqa_agentic_system(question: str, diagram: str, request: gr.Request) -> AsyncGenerator[list, None]:
91
+ """
92
+ description:
93
+ Handles Visual Question Answering (VQA) for uploaded architecture diagrams.
94
+ Args:
95
+ question (str): User's question about the Architecture Diagram.
96
+ diagram (str): Path to the diagram file.
97
+ Returns:
98
+ response: Response to user's question.
99
+ """
100
+
101
+ #capture user ip
102
+ #ea4all_user = e4u.get_user_identification(request)
103
+
104
+ """Handle file uploads and validate their types."""
105
+ allowed_file_types = ('JPEG', 'PNG')
106
+
107
+ message = {
108
+ 'text': question,
109
+ 'files': [diagram] if isinstance(diagram, str) else diagram
110
+ }
111
+
112
+ print("---CALLING VISUAL QUESTION ANSWERING AGENTIC SYSTEM---")
113
+ print(f"Prompt: {message}")
114
+
115
+ chat_memory = []
116
+ if message['files'] == []:
117
+ chat_memory.append(ChatMessage(role="assistant", content="Please upload an Architecture PNG, JPEG diagram to start!"))
118
+ yield chat_memory
119
+ else:
120
+ diagram = message['files'][-1] ##chat_memory[-1]['content'][-1]
121
+ msg = message['text'] ##chat_memory[-2]['content']
122
+ print(f"---DIAGRAM: {diagram}---")
123
+ try:
124
+ if msg == "":
125
+ msg = "Please describe this diagram."
126
+
127
+ with Image.open(diagram) as diagram_:
128
+ if diagram_.format not in allowed_file_types:
129
+ #chat_memory.append(ChatMessage(role="assistant", content="Invalid file type. Allowed file types are JPEG and PNG."))
130
+ print(f"---DIAGRAM: {diagram.format} is not a valid file type. Allowed file types are JPEG and PNG.---")
131
+ #else:
132
+ #'vqa_image = e4u.get_raw_image(diagram) #MOVED into Graph
133
+
134
+ vqa_image = diagram
135
+ response = await diagram_graph.ainvoke({"question":msg, "image": vqa_image}, config)
136
+ chat_memory.append(ChatMessage(role="assistant", content=response['messages'][-1].content if len(response['messages']) else response['safety_status']['description']))
137
+
138
+ yield chat_memory
139
+
140
+ except Exception as e:
141
+ yield (e.args[-1])
142
+
143
+ #Run Togaf Agentic System
144
+ async def run_reference_architecture_agentic_system(business_query: str) -> AsyncGenerator[list, str]:
145
+ """
146
+ description:
147
+ Generates a reference architecture blueprint based on a business requirement using the TOGAF agentic system.
148
+ Args:
149
+ business_query (str): Description of a business problem / requirement.
150
+ Returns:
151
+ response: High-level architecture blueprint and target diagram.
152
+ """
153
+
154
+ if len(business_query) < 20:
155
+ agent_response = "Please provide a valid Business Requirement content to start!"
156
+ yield([agent_response, None])
157
+ else:
158
+ inputs = {"business_query": [{"role": "user", "content": business_query}]} #user response
159
+ index = await call_indexer_apm(config) #call indexer to update the index
160
+ response = await togaf_graph.ainvoke(
161
+ input=inputs,
162
+ config=config
163
+ ) #astream not loading the graph
164
+ vision_target = response['vision_target']
165
+ architecture_runway = response['architecture_runway']
166
+ yield [vision_target, architecture_runway]
167
+
168
+ async def run_pmo_agentic_system(question:str) -> AsyncGenerator[list, None]:
169
+ """
170
+ description:
171
+ Answers questions about Project Portfolio Management and Architect Demand Management.
172
+ Args:
173
+ question (str): The user's question about project portfolio or resource management.
174
+ chat_memory: The conversation history.
175
+ Returns:
176
+ response: Architect Demand Allocation Report
177
+ """
178
+
179
+ format_response = ""
180
+ chat_memory = []
181
+ if not question:
182
+ format_response = "Hi, how are you today? To start our conversation, please chat your message!"
183
+ chat_memory.append(ChatMessage(role="assistant", content=format_response))
184
+ yield chat_memory
185
+
186
+ if not chat_memory:
187
+ chat_memory.append(ChatMessage(role="user", content=question))
188
+ yield chat_memory
189
+
190
+ inputs = {
191
+ "question": question,
192
+ "verbose": True, # optional flags
193
+ }
194
+
195
+ #yield run_pmo_crew(inputs)
196
+
197
+ #Blocks w/ ChatInterface, BYOD, About
198
+ with gr.Blocks(title="Your ArchitectGPT",fill_height=True, fill_width=True) as ea4all_mcp:
199
+
200
+ agentic_pmo_desc="""
201
+ Hi,
202
+ Provide project resource estimation for architecture work based on business requirements, skillset,
203
+ architects allocation, and any other relevant information to enable successful project solution delivery."""
204
+
205
+ agentic_qna_desc="""
206
+ Hi,
207
+ Improve Architect's ability to share knowledge, and provide valuable insights from IT landscape using natural language answering questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
208
+
209
+ agentic_vqa_desc="""
210
+ Hi,
211
+ Gain rapid knowledge and insights translating image to meaningful description.
212
+ """
213
+
214
+ agentic_togaf_desc="""
215
+ Hi,
216
+ in a click of button create a reference architecture that serves as a blueprint for designing and implementing IT solutions.
217
+ Standardise, increase efficiency and productivity to architecture solution development.
218
+ Generate context-specific reference and minimal viable architectures to support business and IT strategy and digital transformation.
219
+ Streamline the architecture operating model, taking the best of agentic workflows and architects working together.
220
+ """
221
+
222
+ #Wrapper for functions not to be exposed by the MCP Server
223
+ wrapper = gr.Button(visible=False) #wrapper.click(UIUtils.ea4all_about, show_api=False,)
224
+ wrapper1 = gr.Button(visible=False) #wrapper1.click(init_dbr, show_api=False,)
225
+
226
+ #EA4ALL-Agentic system menu
227
+ with gr.Tabs(selected="how_to") as tabs:
228
+ with gr.Tab(label="Architect Demand Management", visible=False):
229
+ with gr.Tab(label="Architect Project Planning", id="pmo_qna_1"):
230
+ ea4all_pmo_description = gr.Markdown(value=agentic_pmo_desc)
231
+ pmo_chatbot = gr.Chatbot(
232
+ label="EA4ALL your AI Demand Management Architect Companion", type="messages",
233
+ max_height=160,
234
+ layout="bubble",
235
+ )
236
+ pmo_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here or select an example...")
237
+ with gr.Accordion("Open for question examples", open=False):
238
+ pmo_examples = gr.Dropdown(get_relevant_questions(PMO_MOCK_QNA), value=None,label="Questions", interactive=True)
239
+ gr.ClearButton([pmo_chatbot,pmo_prompt], value="Clear", size="sm", visible=False)
240
+ with gr.Tab(label="Project Portfolio Sample Dataset", id="id_pmo_ds"):
241
+ pmo_df = gr.Dataframe()
242
+ with gr.Tab(label="Application Landscape QnA"):
243
+ with gr.Tabs() as tabs_apm_qna:
244
+ with gr.Tab(label="Connect, Explore, Together", id="app_qna_1"):
245
+ ea4all_agent_metadata = gr.Markdown(value=agentic_qna_desc)
246
+ ea4all_chatbot = gr.Chatbot(
247
+ label="EA4ALL your AI Landscape Architect Companion", type="messages",
248
+ max_height=160,
249
+ layout="bubble",
250
+ )
251
+ qna_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, autofocus=True, placeholder="Type your message here or select an example...")
252
+ with gr.Accordion("Open for question examples", open=False):
253
+ qna_examples = gr.Dropdown(get_relevant_questions(APM_MOCK_QNA),label="Questions", interactive=True)
254
+ gr.ClearButton([ea4all_chatbot,qna_prompt, qna_examples], value="Clear", size="sm", visible=True)
255
+ with gr.Tab(label="Sample Dataset", id="id_apm_ds"):
256
+ apm_df = gr.Dataframe()
257
+ with gr.Tab(label="Diagram Question and Answering"):
258
+ gr.Markdown(value=agentic_vqa_desc)
259
+ ea4all_vqa = gr.Chatbot(
260
+ label="EA4ALL your AI Multimodal Architect Companion", type="messages",
261
+ max_height=160,
262
+ layout="bubble",
263
+ )
264
+ vqa_prompt = gr.Textbox(lines=1, show_label=False, max_lines=1, submit_btn=True, stop_btn=True,autofocus=True, placeholder="Type your message here and upload your diagram...")
265
+ vqa_image = gr.Image(
266
+ label="Architecture Diagram",
267
+ type="filepath",
268
+ format="jpeg, png",
269
+ interactive=True,
270
+ show_download_button=False,
271
+ show_share_button=False,
272
+ visible=True,
273
+ )
274
+ #vqa_prompt = gr.MultimodalTextbox(interactive=True, show_label=False, submit_btn=True, stop_btn=True, autofocus=True, placeholder="Upload your diagram and type your message or select an example...")
275
+ with gr.Accordion("Open for question examples", open=False):
276
+ vqa_examples = gr.Dropdown(get_vqa_examples(), value=0,label="Diagram and Questions", interactive=True)
277
+ gr.ClearButton([ea4all_vqa,vqa_prompt,vqa_image, vqa_examples], value="Clear", size="sm", visible=True)
278
+ with gr.Tab(label="Reference Architecture", id="id_refarch"):
279
+ dbr_text=gr.TextArea(label="Business Problem Sample", value="Provide a Business Problem / Requirement Specification or select an example provided.", lines=14, interactive=True)
280
+ togaf_vision=gr.Markdown(value='### Reference Architecture: Vision and Target')
281
+ architecture_runway=gr.Image(label="Target Architecture Runway",interactive=False,visible=False)
282
+ with gr.Row():
283
+ dbr_file=gr.File(
284
+ value=_join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock),
285
+ label="Business Requirement",
286
+ height=35,
287
+ show_label=False,
288
+ file_count="single",
289
+ file_types=['text'],
290
+ interactive=True,
291
+ type='binary',
292
+ visible=False
293
+ )
294
+ dbr_run=gr.Button(scale=None,value="Run Reference Architecture")
295
+ dbr_cls=gr.ClearButton([togaf_vision, architecture_runway])
296
+ with gr.Tab(label="Overview", id="how_to"):
297
+ gr.Markdown(value=TITLE)
298
+ gr.Image(
299
+ get_image(EA4ALL_ARCHITECTURE),
300
+ show_download_button=False,
301
+ container=False,
302
+ show_share_button=False,
303
+ )
304
+ gr.Markdown(
305
+ """
306
+ - `Empower individuals with Knowledge`: understand and talk about Business and Technology strategy, IT landscape, Architectue Artefacts in a single click of button.
307
+ - `Increase efficiency and productivity`: generate a documented architecture with diagram, model and descriptions. Accelerate Business Requirement identification and translation to Target Reference Architecture. Automated steps and reduced times for task execution.
308
+ - `Improve agility`: plan, execute, review and iterate over EA inputs and outputs. Increase the ability to adapt, transform and execute at pace and scale in response to changes in strategy, threats and opportunities.
309
+ - `Increase collaboration`: democratise architecture work and knowledge with anyone using natural language.
310
+
311
+ ### Knowledge Context
312
+
313
+ Synthetic datasets are used to exemplify the Agentic System capabilities.
314
+
315
+ ### IT Landscape Question and Answering
316
+
317
+ - Application name
318
+ - Business fit: appropriate, inadequate, perfect
319
+ - Technical fit: adequate, insufficient, perfect
320
+ - Business_criticality: operational, medium, high, critical
321
+ - Roadmap: maintain, invest, divers
322
+ - Architect responsible
323
+ - Hosting: user device, on-premise, IaaS, SaaS
324
+ - Business capability
325
+ - Business domain
326
+ - Description
327
+
328
+
329
+ ### Architecture Diagram Visual Question and Answering
330
+
331
+ - Architecture Visual Artefacts
332
+ - jpeg, png
333
+
334
+ **Disclaimer**
335
+ - Your data & image are not accessible or shared with anyone else nor used for training purpose.
336
+ - EA4ALL-VQA Agent should be used ONLY FOR Architecture Diagram images.
337
+ - This feature should NOT BE USED to process inappropriate content.
338
+
339
+ ### Reference Architecture Generation
340
+
341
+ - Clock in/out Use-case
342
+ """
343
+ )
344
+
345
+ #Avoid exposing API /Dependency?
346
+ #dbr_text.change(wrapper1.click(init_dbr,show_api=False)) NOT working
347
+
348
+ #Togaf upload file
349
+ #dbr_file.clear(unload_dbr,outputs=dbr_text)
350
+ #dbr_file.change(on_dbrtext,inputs=dbr_file,outputs=dbr_text)
351
+ dbr_file.change(UIUtils.load_dbr,inputs=dbr_file, outputs=dbr_text, show_api=False)
352
+ #dbr_cls.click(off_dbrtext,outputs=[dbr_text, tabs_togaf, tab_diagram])
353
+
354
+ #Refactored ea4all_chatbot / vqa_chatbot (ChatInterface -> Chatbot)
355
+ qna_prompt.submit(run_qna_agentic_system,[qna_prompt],ea4all_chatbot, api_name="landscape_answering_agent")
356
+ #qna_prompt.submit(lambda: "", None, [qna_prompt])
357
+ #ea4all_chatbot.like(fn=get_user_feedback)
358
+ qna_examples.input(lambda value: value, qna_examples, qna_prompt, show_api=False)
359
+
360
+ #Execute Reference Architecture
361
+ dbr_run.click(run_reference_architecture_agentic_system,show_progress='full',inputs=[dbr_text],outputs=[togaf_vision, architecture_runway], api_name="togaf_blueprint_generation")
362
+ architecture_runway.change(on_image_update, inputs=architecture_runway, outputs=architecture_runway, show_api=False)
363
+
364
+ #chat_msg = vqa_prompt.submit(UIUtils.add_message, [vqa_prompt, vqa_image], [vqa_prompt, ea4all_vqa], show_api=False)
365
+ #bot_msg = chat_msg.then(run_vqa_agentic_system, [vqa_prompt, vqa_image], ea4all_vqa, api_name="diagram_answering_agent")
366
+ vqa_prompt.submit(run_vqa_agentic_system,[vqa_prompt, vqa_image], ea4all_vqa, api_name="diagram_answering_agent")
367
+
368
+ #ea4all_vqa.like(fn=get_user_feedback)
369
+ vqa_examples.input(get_question_diagram_from_example, vqa_examples, outputs=[vqa_prompt, vqa_image], show_api=False)
370
+
371
+ #Invoke CrewAI PMO Agentic System
372
+ pmo_prompt.submit(run_pmo_agentic_system,[pmo_prompt],pmo_chatbot, api_name="architect_demand_agent", show_api=False)
373
+ pmo_prompt.submit(lambda: "", None, [pmo_prompt], show_api=False)
374
+ #pmo_examples.input(lambda value: value, pmo_examples, pmo_prompt)
375
+
376
+ #Set initial state of apm and llm
377
+ ea4all_mcp.load(ea4all_agent_init, outputs=[
378
+ ea4all_agent_metadata,
379
+ ea4all_chatbot,
380
+ ea4all_vqa,
381
+ pmo_chatbot,
382
+ apm_df,
383
+ pmo_df,
384
+ dbr_text
385
+ ],
386
+ show_api=False)
ea4all/ea4all_store/APM-ea4all (test-split).xlsx ADDED
Binary file (16.4 kB). View file
 
ea4all/ea4all_store/apm_qna_mock.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ What are the simplification opportunities within the collaboration landscape?
2
+ Who can I talk to about innovation?
3
+ What applications support marketing domain?
4
+ How can Cloud Assessment Framework increase cloud-based landscape benefits?
ea4all/ea4all_store/dbr.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Purpose of this document
2
+
3
+ The purpose of this document is to provide an overview of the project and specifically detail the business requirements for the project across the relevant business and market areas.
4
+
5
+ The requirements and solution will be agreed with the project sponsor (s) through formal review and sign off of this document.
6
+ Once signed off it will provide input to the subsequent design and development phases of the project.
7
+
8
+ Context
9
+ (These areas can be taken from the Brief/ PID as appropriate)
10
+ Background
11
+ • Change in external scenario - more integrated supervisory organs;
12
+
13
+ Why we need this project
14
+ To make it possible to control the schedule of work on employees that are legally required to.
15
+
16
+ Expected Business Outcome / Objective (Goal)
17
+ To implement the Electronic Timecard in all company business units to the public that are subject to the schedule of work and by that, reduce the number and impact of worktime related lawsuits
18
+
19
+ Project Objectives
20
+ Be compliance with current regulation regarding Timestamp with all employees with work schedule.
21
+
22
+ Ref,Feature,Description,MoSCoW
23
+ A,Input,Registration of ins/outs of employees at the system,M
24
+ G,New Worktime,Creation of new Time schedules for employees,M
25
+
26
+
27
+ Actor Catalogue
28
+
29
+ Name, Description,Goals
30
+ Employees,Employee of company under time control,To register ins and outs
31
+ Manager,Employees first manager,To approve JMLs and monthly activities regarding Time management of employees
32
+ HRSS,Key users of Shared Services of Human Resources,To manage the back end of time system
ea4all/ea4all_store/ea4all-portfolio-management.csv ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Project Name,Problem Statement,Requestor,Project Type,Complexity_Risk,Value_Benefit,Delivery Manager,Business Analyst,Project Sponsor,Line of Business,Timeline - Start,Timeline - End,Architect,Estimated Effort (Days/Month),Status
2
+ Procurement Target Operating Model,"The proposed project seeks to analyze the current procurement processes of our company and identify areas of inefficiency, particularly in the vendor management life cycle. This analysis will involve a comprehensive review of our existing supplier relationships, evaluation of vendor performance metrics, and comparison of industry benchmarks. The objective of this initiative is to streamline our procurement processes, reduce costs, and enhance our partnerships with key suppliers.",Michael Scott,Enhance,medium,medium_high,Kelly Kapoor,Creed Bratton,Jo Bennett,Procurement,2024-06-17,2024-09-15,alexandre.procurement,10,Closed
3
+ IT Comms Channels,"This document outlines the stakeholder identification process for a new IT project, including the roles and responsibilities of project sponsors, customers, developers, and end-users. It details the communication channels and expectations for each stakeholder group, ensuring that all parties are informed and engaged throughout the project lifecycle.",Pam Beesly,New Build,low,medium,Kelly Kapoor,Phyllis Vance,Jo Bennett,IT,2024-09-01,2024-11-30,alexandre.it,5,Closed
4
+ Customer-centric PMO Framework,"To develop a customer-centric project management framework that aligns business objectives with stakeholder expectations and employee capabilities, we will assess current processes, identify areas of inefficiency, and implement process improvements that enhance collaboration and data-driven decision making.",Michael Scott,New Build,low,medium,Ryan Howard,Phyllis Vance,Holly Flax,Project Management,2024-09-19,2024-12-18,alexandre.pmo,9,Closed
5
+ Market Channel Optimisation,"The project will involve gathering data on customer preferences and analyzing sales trends to determine the most effective marketing channels. This will be achieved by conducting market research, interviewing key stakeholders, and utilizing data analytics tools. The results will be presented to the project team and executive management to inform future business decisions.",Dwight Schrute,Enhance,low,low_medium,Angela Martin,Toby Flenderson,Jo Bennett,Marketing,2024-10-15,2025-01-13,alexandre.mkt,3,Closed
6
+ Brain Activity Deep Learning Model,"The proposed project aims to investigate the efficacy of a novel deep learning algorithm for the detection of subtle changes in brain activity patterns associated with the early stages of Alzheimer's disease, utilizing fMRI data and integrating insights from graph theory.",Stanley Hudson,New Service,high,high,Ryan Howard,Toby Flenderson,Jan Levinson,AI COE,2024-11-01,2025-01-30,alexandre.ai,13,Closed
7
+ Customer Satisfaction and Experience,"The goal of this project is to improve customer satisfaction by enhancing the user experience of our e-commerce platform. This will involve gathering feedback from customers, identifying pain points, and implementing changes to our website and mobile app.",Jim Halpert,Enhance,medium,medium,Oscar Martinez,Toby Flenderson,David Wallace,Customer Service,2024-11-22,2025-02-20,alexandre.csm,20,In Flight
8
+ Customer Onboarding Optimisation,"Our company aims to redesign the customer onboarding process by improving the user experience, reducing the time it takes to complete, and increasing customer satisfaction. The new process will involve creating a self-service portal where customers can easily find the necessary information, submit required documents, and track their application status in real-time. This will not only enhance the customer's experience but also reduce the workload of our support team.",Michael Scott,Enhance,low,medium_high,Ryan Howard,Toby Flenderson,Holly Flax,Customer Service,2024-12-03,2025-03-03,alexandre.d2c,10,In Flight
9
+ E-Commerce Website,"This document outlines the customer journey map for our new e-commerce website. It includes a detailed description of the customer's needs, pain points, and expectations at each stage of the purchasing process. This information will be used to identify opportunities for improvement and inform the design of the user interface.",Dwight Schrute,New Service,medium,high,Angela Martin,Creed Bratton,Jo Bennett,Digital to Consumer,2024-12-12,2025-03-12,alexandre.workplace,3,In Flight
10
+ Booking Room optimisation,"The company aims to improve the customer experience by streamlining the process of booking rooms and making payments. The new system will integrate with existing CRM software, enabling staff to view customer history and preferences, and allowing for personalized marketing campaigns. The primary objective is to increase revenue through enhanced customer satisfaction and retention.",Dwight Schrute,Enhance,medium,medium_high,Kelly Kapoor,Darryl Philbin,Robert California,Digital Workplace,2025-01-16,2025-04-16,alexandre.ops,7,Discovery
11
+ Blockchain Supply Chain solution,"The overarching goal of this project is to revolutionize the existing supply chain management system by implementing a decentralized blockchain-based solution. This system will enable real-time tracking of goods, reduce counterfeiting, and enhance transparency throughout the entire supply chain. Our primary objective is to create a more efficient and secure system, thereby improving customer satisfaction and business competitiveness.",Stanley Hudson,New Service,high,high,Ryan Howard,Creed Bratton,Jo Bennett,Operations,2025-01-16,2025-04-16,alexandre.ops,5,Discovery
12
+ Order Delivery Optimisation,"The primary goal of this project is to increase customer satisfaction by reducing the time it takes for customers to receive their orders. This will involve analyzing the current order fulfillment process and identifying areas for improvement, such as streamlining production, optimizing shipping routes, and implementing a more efficient inventory management system.",Stanley Hudson,Enhance,low,medium,Ryan Howard,Meredith Palmer,Jo Bennett,Operations,2025-01-16,2025-05-16,alexandre.ops,5,Discovery
13
+ Carbon Footprint Reduction,"Our company aims to reduce carbon footprint by implementing a smart grid system that integrates solar panels, wind turbines, and energy storage systems to provide a stable and efficient renewable energy supply to commercial and residential areas. This project will not only decrease our reliance on fossil fuels but also reduce energy costs for our customers. We will work with local authorities to ensure compliance with environmental regulations and partner with energy experts to optimize system performance.",Dwight Schrute,New Service,high,high,Kevin Malone,Meredith Palmer,Robert California,Digital Workplace,2025-01-17,2025-07-16,alexandre.finance,20,In Flight
14
+ Claim Process Journey Map,"The goal of this project is to develop a comprehensive customer journey map that highlights the pain points experienced by policyholders during the claims process. The objective is to identify areas for improvement to enhance the customer experience and reduce the average claims resolution time. Key stakeholders include claims adjusters, policyholders, and underwriters. The project will involve analyzing customer feedback, claims data, and industry benchmarks to inform the journey map and recommendations for improvement.",Michael Scott,Enhance,medium,medium,Ryan Howard,Darryl Philbin,David Wallace,Finance,2025-02-06,2025-08-05,alexandre.finance,5,In Flight
15
+ University Onboarding program,"The client is a prominent University that offers a range of undergraduate and postgraduate programs. To enhance student engagement and academic success, the University aims to implement a new student information system that integrates with existing student records and learning management systems. The system will enable students to track their academic progress, access course materials, and communicate with instructors in a seamless manner. The project objective is to increase student retention and satisfaction, improve academic performance, and reduce administrative burdens for the academic staff.",Stanley Hudson,New Build,low,medium_high,Oscar Martinez,Creed Bratton,Jo Bennett,Human Resources,2025-02-08,2025-06-08,alexandre.hr,15,In Flight
16
+ Customer Support Optimisation,"The project aims to enhance the efficiency of the customer support service by creating a centralized knowledge base that captures key information and business process flows. The team must identify the most common issues experienced by customers and the current solutions provided by the support team. Once this information is gathered, the team will create a comprehensive list of stakeholders, including their roles and responsibilities. This will be the foundation for the design of the knowledge base.",Stanley Hudson,New Build,low,medium,Kevin Malone,Meredith Palmer,David Wallace,Customer Service,2025-02-21,2025-05-22,alexandre.csm,,In Flight
17
+ Customer Experience Digital Platform,"Our company aims to develop a digital platform to enhance the customer experience for buying and selling second-hand electronics. The platform will include features such as product listing, price comparison, and customer feedback. Our target audience is environmentally conscious consumers who prefer to buy second-hand products. The platform will be integrated with popular social media channels to increase brand visibility and reach a wider audience. We plan to partner with local waste management organizations to promote sustainable practices and reduce electronic waste. Key stakeholders include product suppliers, customers, and waste management experts. Our goal is to reduce the average sale time of second-hand electronics by 30% and increase sales revenue by 25% within the first year.",Stanley Hudson,New Service,high,high,Kevin Malone,Meredith Palmer,Holly Flax,Digital Workplace,2025-04-01,2025-09-28,,2,Business Case
18
+ Renewable Energy solution,"An energy storage system that utilizes advanced battery technology to optimize renewable energy output and mitigate intermittency of solar and wind power, ensuring a stable power supply to the grid and reducing carbon footprint in the hospitality sector.",Dwight Schrute,New Buy,high,high,Angela Martin,Meredith Palmer,Jan Levinson,Digital Workplace,2025-04-04,2025-07-03,,1,Business Case
19
+ Insurance Claims Optimisation,"The insurance company needs to streamline the process of handling claims for customers who have suffered property damage due to natural disasters. The company aims to reduce the average processing time from 10 days to 3 days while maintaining a high level of customer satisfaction. Stakeholders involved include insurance agents, adjusters, and claims examiners. The journey map should facilitate seamless communication and efficient decision-making among these stakeholders.",Stanley Hudson,Enhance,low,medium,Oscar Martinez,Toby Flenderson,Holly Flax,Finance,2025-04-15,2025-07-14,,,Business Case
20
+ Track and Trace programme,"The company aims to enhance its supply chain efficiency by automating the tracking and monitoring of shipments. This involves integrating data from various stakeholders, including carriers, warehouses, and delivery personnel, to provide real-time updates on shipment status and location. The project objective is to reduce delivery times, increase transparency, and improve customer satisfaction.",Pam Beesly,New Build,high,high,Oscar Martinez,Creed Bratton,David Wallace,Operations,2025-04-30,2026-03-31,,1,Business Case
21
+ Retail Digital Transformation,"The objective of this project is to design and implement a digital transformation journey map for a large retail corporation, enabling them to improve customer engagement and drive sales through omnichannel experiences. The stakeholder list includes marketing, sales, and IT teams. The problem description reveals inefficiencies in the current supply chain process, and the project's primary objective is to enhance customer satisfaction through streamlined operations and data-driven decision making.",Jim Halpert,Enhance,high,medium_high,Kevin Malone,Meredith Palmer,Jan Levinson,Marketing,2025-05-05,2026-03-31,,2,Business Case
22
+ Invest Management Platform,"The project aims to develop a digital platform for streamlined investment management and portfolio optimization, catering to high-net-worth individuals and institutions. The platform will integrate advanced data analytics and machine learning algorithms to provide personalized investment recommendations, risk assessments, and real-time portfolio performance tracking.",Michael Scott,New Service,high,medium_high,Angela Martin,Darryl Philbin,Jan Levinson,Finance,2025-05-30,2025-12-16,,1,Business Case
23
+ Manufacturing Optimisation,"The proposed plan aims to increase the efficiency of the production line by reducing the time spent on quality control checks from 15 minutes to 5 minutes per product unit. Additionally, the new manufacturing process will involve the implementation of robotics to minimize human error and streamline the assembly process.",Dwight Schrute,Enhance,medium,medium,Kelly Kapoor,Toby Flenderson,Jo Bennett,Operations,2025-05-30,2026-03-01,,10,Business Case
24
+ AI Research Platform,"The proposed development of the AI-powered research platform seeks to bridge the gap between theoretical models and practical applications in machine learning. By integrating cutting-edge algorithms with real-world data, the system aims to provide actionable insights for data scientists and researchers. This project will focus on optimizing the workflow and automating routine tasks, ultimately enhancing the productivity of the research team.",Pam Beesly,New Service,high,high,Kelly Kapoor,Toby Flenderson,David Wallace,AI COE,,,,1,New Request
25
+ Customer Feedback Machine Learning Model,"The proposed system utilizes a combination of machine learning algorithms and natural language processing techniques to facilitate the analysis of customer feedback. By integrating sentiment analysis, topic modeling, and entity recognition, the system aims to provide a comprehensive understanding of customer sentiment and preferences.",Dwight Schrute,Enhance,high,high,Ryan Howard,Phyllis Vance,Holly Flax,Customer Service,,,,1,New Request
26
+ Real Estate Machine Learning Model,"The goal of our project is to develop a machine learning model that can accurately predict house prices based on features such as the number of bedrooms, square footage, and location. We will collect data from various sources, preprocess it, and train a regression model to make predictions.",Michael Scott,New Build,high,medium_high,Ryan Howard,Phyllis Vance,Jan Levinson,Finance,,,,1,New Request
27
+ Stock Market Chaos Theory,"Recent studies on the econophysics of complex financial systems have highlighted the potential benefits of incorporating chaos theory into predictive modeling of stock market fluctuations. By examining the fractal structure of stock price dynamics, researchers have been able to identify patterns that may be indicative of future market trends. However, the applicability of these findings to real-world investment strategies remains uncertain due to the complexity of market interactions and the presence of nonlinear feedback loops.",Michael Scott,Enhance,high,high,Oscar Martinez,Phyllis Vance,David Wallace,Finance,,,,1,New Request
28
+ Sustainable Products Review,"The recent decline in sales can be attributed to the strategic shift in consumer behavior, driven by the increasing awareness of sustainable and eco-friendly products. This change in consumer preferences has resulted in a significant decrease in demand for our company's traditional products.",Dwight Schrute,Enhance,high,high,Ryan Howard,Toby Flenderson,Jan Levinson,Procurement,,,,1,New Request
29
+ Social Media Sentiment Analysis,"The proposed AI-powered system utilizes a hybrid approach combining traditional machine learning algorithms with deep learning architectures to improve the accuracy of sentiment analysis in social media posts. By integrating various natural language processing techniques, the system can effectively capture nuances in human language and provide more accurate emotional intelligence.",Jim Halpert,New Buy,low,medium,Oscar Martinez,Creed Bratton,Robert California,Marketing,,,,,New Request
30
+ Thermodynamics Engineer Novel,This experiment aims to investigate the feasibility of leveraging non-equilibrium thermodynamics to engineer novel materials with tailored mechanical properties by exploiting the relationships between entropy and free energy in nanoscale systems.,Dwight Schrute,New Service,high,medium_high,Kevin Malone,Phyllis Vance,Jan Levinson,Research and Development,,,,1,New Request
31
+ High-End Flows Novel Exploration,"The proposed methodology for turbulence modeling in high-speed flows employs a novel combination of Direct Numerical Simulation (DNS) and Large Eddy Simulation (LES) techniques to capture the complex interactions between turbulence and mean flow. The approach involves a two-stage process: first, a DNS is performed to resolve the small-scale turbulence structures, and then the results are used to inform the LES simulation, which captures the larger-scale eddies. This hybrid approach enables the simulation of turbulent flows with high Reynolds numbers, thus providing valuable insights into the underlying physical mechanisms.",Jim Halpert,New Service,high,medium_high,Oscar Martinez,Creed Bratton,Jan Levinson,Research and Development,,,,1,New Request
ea4all/ea4all_store/ea4all_overview.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - `Empower individuals with Knowledge`: understand and talk about Business and Technology strategy, IT landscape, Architectue Artefacts in a single click of button.
2
+ - `Increase efficiency and productivity`: generate a documented architecture with diagram, model and descriptions. Accelerate Business Requirement identification and translation to Target Reference Architecture. Automated steps and reduced times for task execution.
3
+ - `Improve agility`: plan, execute, review and iterate over EA inputs and outputs. Increase the ability to adapt, transform and execute at pace and scale in response to changes in strategy, threats and opportunities.
4
+ - `Increase collaboration`: democratise architecture work and knowledge with anyone using natural language.
5
+
6
+ Knowledge Context
7
+
8
+ Synthetic datasets are used to exemplify the Agentic System capabilities.
9
+
10
+ IT Landscape Question and Answering
11
+
12
+ - Application name
13
+ - Business fit: appropriate, inadequate, perfect
14
+ - Technical fit: adequate, insufficient, perfect
15
+ - Business_criticality: operational, medium, high, critical
16
+ - Roadmap: maintain, invest, divers
17
+ - Architect responsible
18
+ - Hosting: user device, on-premise, IaaS, SaaS
19
+ - Business capability
20
+ - Business domain
21
+ - Description
22
+
23
+
24
+ Architecture Diagram Visual Question and Answering
25
+
26
+ - Architecture Visual Artefacts
27
+ - jpeg, png
28
+
29
+ **Disclaimer**
30
+ - Your data & image are not accessible or shared with anyone else nor used for training purpose.
31
+ - EA4ALL-VQA Agent should be used ONLY FOR Architecture Diagram images.
32
+ - This feature should NOT BE USED to process inappropriate content.
33
+
34
+ Reference Architecture Generation
35
+
36
+ - Clock in/out Use-case
ea4all/ea4all_store/pmo_qna_mock.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ What architects are working on in flight projects?
2
+ List all new projects without an architect.
3
+ List all projects from the AI COE without an architect.
ea4all/ea4all_store/reference_architecture_dbr_assistant.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ Purpose of this document
2
+
3
+ The purpose of this document is to provide an overview of the project and specifically detail the business requirements for the project across the relevant business and market areas.
4
+
5
+ The requirements and solution will be agreed with the project sponsor (s) through formal review and sign off of this document.
6
+ Once signed off it will provide input to the subsequent design and development phases of the project.
7
+
8
+ Why we need this project
9
+ I want an assistant to take notes during a workshop and translate that into a pseudo process and generate a visual representation that I can then refine in a focused session.
ea4all/ea4all_store/reference_architecture_dbr_demo.txt ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Purpose of this document
2
+
3
+ The purpose of this document is to provide an overview of the project and specifically detail the business requirements for the project across the relevant business and market areas.
4
+
5
+ The requirements and solution will be agreed with the project sponsor (s) through formal review and sign off of this document.
6
+ Once signed off it will provide input to the subsequent design and development phases of the project.
7
+
8
+ Context
9
+ (These areas can be taken from the Brief/ PID as appropriate)
10
+ Background
11
+ • Change in external scenario - more integrated supervisory organs;
12
+ • Validity of the exception model adopted questioning (number of inquiries)
13
+ • Labor lawsuits average ticket is very high (number of lawsuits)
14
+
15
+ Why we need this project
16
+ To make it possible to control the schedule of work on employees that are legally required to.
17
+
18
+ Expected Business Outcome / Objective (Goal)
19
+ To implement the Electronic Timecard in all company business units to the public that are subject to the schedule of work and by that, reduce the number and impact of worktime related lawsuits
20
+
21
+ Project Objectives
22
+ Be compliance with current regulation regarding Timestamp with all employees with work schedule.
23
+
24
+ Ref,Feature,Description,MoSCoW
25
+ A,Input,Registration of ins/outs of employees at the system,M
26
+ B,Joiner,Registration of new employees considering the new system,M
27
+ C,Workplace Change,Changes of a workplace of a given employee,M
28
+ D,employee time,may it be from subject to worktime to noto r vice versa,M
29
+ E,New Equipment,New equipment instalation on facilities,M
30
+ F,Calendar change,Change of holydays of a given workplace,M
31
+ G,New Worktime,Creation of new Time schedules for employees,M
32
+ H,New balance rule,Creation of new Time balance rules for employees,M
33
+
34
+
35
+ Actor Catalogue
36
+
37
+ Name, Description,Goals
38
+ Employees,Employee of company under time control,To register ins and outs
39
+ Coordinator,Immediate superior of non-computer user employee,To register daily activities regarding Time management of non-computer user employees subject to them
40
+ Immediate superior,Immediate superior of employee,To approve daily activities regarding Time management of employees
41
+ Manager,Employees first manager,To approve JMLs and monthly activities regarding Time management of employees
42
+ Local Medical Service,Business unity Doctor,To include absence regarding sick leaves
43
+ HRSS,Key users of Shared Services of Human Resources,To manage the back end of time system
ea4all/ea4all_store/strategic_principles.txt ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Strategic Principles
2
+ architecture_principles = """
3
+ | Architecture Principle | Description |
4
+ | --- | --- |
5
+ | **Business Continuity** | The architecture must ensure that critical business functions can continue to operate during and after a disaster or unexpected downtime. |
6
+ | **Interoperability** | Systems and data must be able to interact with each other, both within and across organizational boundaries. |
7
+ | **Modularity** | The architecture should be composed of modular components that can be independently updated or replaced. |
8
+ | **Scalability** | The architecture should be designed to handle increasing amounts of work in a graceful manner. |
9
+ | **Secure by Design** | The architecture must protect information and systems from unauthorized access and provide confidentiality, integrity, and availability. |
10
+ | **Simplicity** | The architecture should be as simple as possible, while still meeting business needs. Avoid unnecessary complexity. |
11
+ | **Standardization** | Use industry standards where they exist and are appropriate for the business. |
12
+ | **Sustainability** | The architecture should be sustainable and consider the environmental impact of IT decisions. |
13
+ | **User-Centric** | The architecture should focus on the user experience, and be designed with the needs and behaviors of the user in mind.
14
+ """
15
+
16
+ business_principles = """
17
+ | Business Principle | Description |
18
+ | --- | --- |
19
+ | **Customer Focus** | The interests of the customer must be at the center of all decisions and operations. |
20
+ | **Value Creation** | Every initiative and operation should aim to create value for the customers and the business. |
21
+ | **Continuous Improvement** | The business should always strive for better ways to deliver value, through innovation and improvement. |
22
+ | **Integrity** | The business should operate in an ethical and transparent manner. |
23
+ | **Collaboration** | Working together across teams and departments is essential for delivering value. |
24
+ | **Agility** | The business should be able to quickly respond to changes in the market or environment. |
25
+ | **Sustainability** | Decisions should consider their long-term impact on the environment and society. |
26
+ | **Accountability** | Every team and individual in the business should take responsibility for their actions and decisions. |
27
+ | **Data-Driven Decision Making** | Decisions should be based on data and factual information.
28
+ """
29
+
30
+ technology_principles = """
31
+ | Technology Principle | Description |
32
+ | --- | --- |
33
+ | **Reliability** | Systems should be dependable and perform consistently under all conditions. |
34
+ | **Maintainability** | Technology should be easy to update and improve over time. |
35
+ | **Efficiency** | Systems and processes should be designed to minimize waste and maximize productivity. |
36
+ | **User-Centric Design** | Technology should be designed with the end user in mind, ensuring it is easy to use and meets user needs. |
37
+ | **Data Integrity** | Ensuring the accuracy and consistency of data over its entire lifecycle. |
38
+ | **Sustainability** | Technology decisions should consider their impact on the environment. |
39
+ | **Innovation** | Embracing new technologies and ideas to stay competitive and meet evolving business needs.
40
+ """
ea4all/main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ##version 2025-06-04
2
+ #ea4all-gradio-agent-mcp-entry-point
3
+ from ea4all.__main__ import main
4
+
5
+ if __name__ == '__main__':
6
+ main()
ea4all/packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ graphviz
ea4all/src/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ """Shared utilities module."""
2
+
3
+ #from ea4all.src.graph import super_graph
4
+ #__all__ = ["super_graph"]
ea4all/src/ea4all_apm/configuration.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Define the configurable parameters for the APM agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Annotated, Literal
7
+
8
+ import ea4all.src.ea4all_apm.prompts as prompts
9
+ from ea4all.src.shared.configuration import BaseConfiguration
10
+
11
+ @dataclass(kw_only=True)
12
+ class AgentConfiguration(BaseConfiguration):
13
+ """The configuration for the agent."""
14
+
15
+ # prompts
16
+ router_system_prompt: str = field(
17
+ default=prompts.ROUTER_SYSTEM_PROMPT,
18
+ metadata={
19
+ "description": "The system prompt used for classifying user questions to route them to the correct node."
20
+ },
21
+ )
22
+
23
+ query_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
24
+ default="meta-llama/Llama-3.3-70B-Instruct",
25
+ metadata={
26
+ "description": "The language model used for processing and refining queries. Should be in the form: provider/model-name."
27
+ },
28
+ )
29
+
30
+ response_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
31
+ default="meta-llama/Llama-3.3-70B-Instruct",
32
+ metadata={
33
+ "description": "The language model used for generating responses. Should be in the form: provider/model-name."
34
+ },
35
+ )
ea4all/src/ea4all_apm/graph.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main entrypoint for the conversational APM graph.
2
+
3
+ This module defines the core structure and functionality of the conversational
4
+ APM graph. It includes the main graph definition, state management,
5
+ and key functions for processing & routing user queries, generating answer to
6
+ Enterprise Architecture related user questions
7
+ about an IT Landscape or Websearch.
8
+ """
9
+
10
+ #CHANGELOG: 2025-06-08
11
+ # Refactored to use tools.websearch (changes State, removed web_search)
12
+
13
+ import os
14
+
15
+ from langgraph.graph import END, StateGraph
16
+
17
+ #core libraries
18
+ from langchain_core.runnables import RunnableConfig
19
+ from langchain_core.prompts.chat import ChatPromptTemplate
20
+ from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
21
+ from langchain_core.prompts import ChatPromptTemplate
22
+ from langchain_core.output_parsers.json import JsonOutputParser
23
+ from langchain_core.output_parsers import StrOutputParser
24
+ from langchain_core.runnables import RunnableLambda
25
+ from langchain_core.runnables import RunnablePassthrough, RunnableConfig
26
+ from langchain_core.runnables import RunnableGenerator
27
+ from langchain_core.documents import Document
28
+
29
+ from langchain.load import dumps, loads
30
+ from langchain.hub import pull
31
+
32
+ from operator import itemgetter
33
+ from typing import AsyncGenerator, AsyncIterator
34
+
35
+ #compute amount of tokens used
36
+ import tiktoken
37
+
38
+ #import APMGraph packages
39
+ from ea4all.src.ea4all_apm.configuration import AgentConfiguration
40
+ from ea4all.src.ea4all_apm.state import InputState, OutputState, OverallState
41
+ import ea4all.src.ea4all_apm.prompts as e4p
42
+ from ea4all.src.shared.utils import (
43
+ load_mock_content,
44
+ get_llm_client,
45
+ get_history_gradio,
46
+ extract_structured_output,
47
+ extract_topic_from_business_input,
48
+ _join_paths,
49
+ )
50
+ from ea4all.src.shared import vectorstore
51
+ from ea4all.src.tools.tools import (
52
+ websearch,
53
+ )
54
+
55
+ # This file contains sample APM QUESTIONS
56
+ APM_MOCK_QNA = "apm_qna_mock.txt"
57
+
58
+ async def retrieve_documents(
59
+ state: OverallState, *, config: RunnableConfig
60
+ ) -> dict[str, list[Document]]:
61
+ """Retrieve documents based on a given query.
62
+
63
+ This function uses a retriever to fetch relevant documents for a given query.
64
+
65
+ Args:
66
+ state (QueryState): The current state containing the query string.
67
+ config (RunnableConfig): Configuration with the retriever used to fetch documents.
68
+
69
+ Returns:
70
+ dict[str, list[Document]]: A dictionary with a 'documents' key containing the list of retrieved documents.
71
+ """
72
+ with vectorstore.make_retriever(config) as retriever:
73
+ response = await retriever.ainvoke(state.question, config)
74
+ return {"messages": response}
75
+
76
+ async def apm_retriever(config: RunnableConfig):
77
+ with vectorstore.make_retriever(config) as retriever:
78
+ response = retriever
79
+
80
+ return response
81
+
82
+ # Few Shot Examples
83
+ few_shot_step_back_examples = [
84
+ {
85
+ "input": "Who can I talk to about innovation?",
86
+ "output": '{"datasource": "vectorstore, "topic":"who can I talk to"}"}',
87
+ },
88
+ {
89
+ "input": "Describe the finance landscape.",
90
+ "output": '{"datasource": "vectorstore", "topic:":"line of business landscape"}',
91
+ },
92
+ {
93
+ "input": "What applications support the marketing landscape?",
94
+ "output": '{"datasource": "vectorstore", "topic:":"line of business landscape"}',
95
+ },
96
+ {
97
+ "input": "List the simplification opportunities for the collaboration space.",
98
+ "output": '{"datasource": "vectorstore", "topic:":"line of business landscape"}',
99
+ },
100
+ {
101
+ "input": "What are the available patterns to deploy AI applications into AWS?",
102
+ "output": '{"datasource": "websearch", "topic:":"design patterns"}',
103
+ },
104
+ {
105
+ "input": "What is a Well-Architected Framework?",
106
+ "output": '{"datasource": "websearch", "topic:":"architecture framework"}',
107
+ },
108
+ {
109
+ "input": "What is a Cloud Assessment Framework?",
110
+ "output": '{"datasource": "websearch", "topic:":"cloud assessment framework"}',
111
+ },
112
+ {
113
+ "input": "What are the main architecture frameworks?",
114
+ "output": '{"datasource": "websearch", "topic:":"architecture framework"}',
115
+ },
116
+ ]
117
+
118
+ # We now transform these to example messages
119
+ few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
120
+ [
121
+ ("human", "{input}"),
122
+ ("ai", "{output}"),
123
+ ]
124
+ )
125
+
126
+ few_shot_prompt = FewShotChatMessagePromptTemplate(
127
+ input_variables=["user_question"],
128
+ example_prompt=few_shot_step_back_examples_prompt,
129
+ examples=few_shot_step_back_examples,
130
+ )
131
+
132
+ ## RAG from scratch: Query Translations functions
133
+ def get_unique_union(documents: list[list]):
134
+ """ Unique union of retrieved docs """
135
+ # Flatten list of lists, and convert each Document to string
136
+ flattened_docs = [dumps(doc) for sublist in documents for doc in sublist]
137
+ # Get unique documents
138
+ unique_docs = list(set(flattened_docs))
139
+ # Return
140
+ return [loads(doc) for doc in unique_docs]
141
+
142
+ def reciprocal_rank_fusion(results: list[list], k=60):
143
+ """ Reciprocal_rank_fusion that takes multiple lists of ranked documents
144
+ and an optional parameter k used in the RRF formula """
145
+
146
+ # Initialize a dictionary to hold fused scores for each unique document
147
+ fused_scores = {}
148
+
149
+ # Iterate through each list of ranked documents
150
+ for docs in results:
151
+ # Iterate through each document in the list, with its rank (position in the list)
152
+ for rank, doc in enumerate(docs):
153
+ # Convert the document to a string format to use as a key (assumes documents can be serialized to JSON)
154
+ doc_str = doc.metadata['source']
155
+ # If the document is not yet in the fused_scores dictionary, add it with an initial score of 0
156
+ if doc_str not in fused_scores:
157
+ fused_scores[doc_str] = [doc,0]
158
+ # Retrieve the current score of the document, if any
159
+ #previous_score = fused_scores[doc_str]
160
+ # Update the score of the document using the RRF formula: 1 / (rank + k)
161
+ fused_scores[doc_str][1] += 1 / (rank + k)
162
+
163
+ # Sort the documents based on their fused scores in descending order to get the final reranked results
164
+ reranked_results = [
165
+ doc[0]
166
+ for source, doc in sorted(fused_scores.items(), key=lambda x: x[0], reverse=True)
167
+ ]
168
+
169
+ # Return the reranked results as a list of tuples, each containing the document and its fused score
170
+ return reranked_results
171
+
172
+ def format_qa_pair(question, answer):
173
+ """Format Q and A pair"""
174
+
175
+ formatted_string = ""
176
+ formatted_string += f"Question: {question}\nAnswer: {answer}\n\n"
177
+ return formatted_string.strip()
178
+
179
+ async def get_retrieval_chain(rag_input, ea4all_user, question, retriever, config: RunnableConfig):
180
+
181
+ configuration = AgentConfiguration.from_runnable_config(config)
182
+ llm = get_llm_client(configuration.query_model, api_base_url=configuration.api_base_url)
183
+
184
+ #retriever = retriever_faiss(db, ea4all_user)
185
+ #CHANGE: Receive as parameter originer
186
+ #retriever = await apm_retriever(config) #NEEDS retrofit to add user_login
187
+
188
+ if rag_input == 1: # Multi-query
189
+ ## RAG Query Transformation: Multi query
190
+ prompt_perspectives = ChatPromptTemplate.from_template(e4p.multiquery_template)
191
+ generate_queries = (
192
+ prompt_perspectives
193
+ | llm
194
+ | StrOutputParser()
195
+ | (lambda x: x.split("\n"))
196
+ )
197
+ # Retrieve chain
198
+ retrieval_chain = generate_queries | retriever.map() | get_unique_union
199
+
200
+ elif rag_input == 2: # RAG Fusion
201
+ # Prompt
202
+ prompt_rag_fusion = ChatPromptTemplate.from_template(e4p.rag_fusion_questions_template)
203
+ generate_queries = (
204
+ prompt_rag_fusion
205
+ | llm
206
+ | StrOutputParser()
207
+ | (lambda x: x.split("\n"))
208
+ )
209
+ # Retrieval chain
210
+ retrieval_chain = generate_queries | retriever.map() | reciprocal_rank_fusion
211
+
212
+ elif rag_input == 3: # Decomposition
213
+ # Build prompt
214
+ prompt_decomposition = ChatPromptTemplate.from_template(e4p.decomposition_template)
215
+ # Chain
216
+ generate_queries_decomposition = ( prompt_decomposition | llm | StrOutputParser() | (lambda x: x.split("\n")))
217
+
218
+ # Return new set of questions
219
+ questions = generate_queries_decomposition.invoke(
220
+ {"question": question},
221
+ {"tags": [os.environ['EA4ALL_ENV']], "metadata": {"ea4all_user": ea4all_user, "rag_input": rag_input}}
222
+ )
223
+ # Prompt: Answer recuservely
224
+ decomposition_prompt = ChatPromptTemplate.from_template(e4p.decomposition_answer_recursevely_template)
225
+
226
+ # Answer each question and return final answer
227
+ answer = ""
228
+ q_a_pairs = ""
229
+ for q in questions:
230
+ rag_chain = (
231
+ {"context": itemgetter("question") | retriever,
232
+ "question": itemgetter("question"),
233
+ "q_a_pairs": itemgetter("q_a_pairs")}
234
+ | decomposition_prompt
235
+ | llm
236
+ | StrOutputParser())
237
+
238
+ answer = rag_chain.invoke(
239
+ {"question":q,"q_a_pairs":q_a_pairs},
240
+ {"tags": [os.environ['EA4ALL_ENV']], "metadata": {"ea4all_user": ea4all_user, "rag_input": rag_input}}
241
+ )
242
+
243
+ q_a_pair = format_qa_pair(q,answer)
244
+ q_a_pairs = q_a_pairs + "\n---\n" + q_a_pair
245
+
246
+ return answer # Final response to user inquiry
247
+
248
+ elif rag_input == 4: # RAG Step-back
249
+
250
+ generate_queries_step_back = e4p.few_shot_step_back_prompt | llm | StrOutputParser()
251
+
252
+ generate_queries_step_back.invoke(
253
+ {"standalone_question": lambda x: x["standalone_question"]},
254
+ {"tags": [os.environ['EA4ALL_ENV']], "metadata": {"ea4all_user": ea4all_user, "rag_input": rag_input}}
255
+ )
256
+
257
+ response_prompt = ChatPromptTemplate.from_template(e4p.step_back_response_prompt_template)
258
+
259
+ retrieval_chain = (
260
+ {
261
+ # Retrieve context using the normal question
262
+ "normal_context": RunnableLambda(lambda x: getattr(x, "standalone_question")) | retriever,
263
+ # Retrieve context using the step-back question
264
+ "step_back_context": generate_queries_step_back | retriever,
265
+ # Pass on the question
266
+ "standalone_question": lambda x: x["standalone_question"],
267
+ }
268
+ | response_prompt
269
+ | llm
270
+ | StrOutputParser()
271
+ )
272
+
273
+ elif rag_input == 5: # RAG HyDE
274
+ # Prompt
275
+ prompt_hyde = ChatPromptTemplate.from_template(e4p.hyde_template)
276
+ generate_docs_for_retrieval = (
277
+ prompt_hyde |
278
+ llm |
279
+ StrOutputParser()
280
+ )
281
+
282
+ retrieval_chain = generate_docs_for_retrieval | retriever
283
+
284
+ else:
285
+ # Standard RAG approach - user query
286
+ retrieval_chain = itemgetter("standalone_question") | retriever
287
+
288
+ return retrieval_chain
289
+
290
+ #Get relevant asnwers to user query
291
+ ##get_relevant_documents "deprecated" - replaced by invoke : 2024-06-07
292
+ async def get_relevant_answers(state: OverallState, query, config: RunnableConfig):
293
+
294
+ if query != "":
295
+ #retriever.vectorstore.index.ntotal
296
+ #retriever = retriever_faiss(user_ip)
297
+ #response = retriever.invoke({"standalone_question": query})
298
+
299
+ response = await retrieve_documents(state, config=config)
300
+ return response
301
+ else:
302
+ return []
303
+
304
+ #Return LLM answer to user inquriy
305
+ def rag_llm(llm, chat_prompt, query, response):
306
+ answers = llm.invoke(
307
+ chat_prompt.format_prompt(
308
+ cdocs=response, query=query,
309
+ )
310
+ )
311
+
312
+ try:
313
+ return answers.content
314
+ except AttributeError:
315
+ return answers
316
+
317
+ #Save user apm to disk
318
+ def ea4all_serialize(apm_file, user_ip):
319
+ import pickle
320
+
321
+ # Specify the target filename
322
+ filename = _join_paths(AgentConfiguration.ea4all_store, f"apm_{user_ip}.pkl")
323
+
324
+ # Serialize and save the binary data to a file
325
+ try:
326
+ with open(filename, 'wb') as file:
327
+ pickle.dump(apm_file, file)
328
+ return True
329
+ # Some code that might raise an exception
330
+ except Exception:
331
+ # Handle the exception
332
+ return False
333
+
334
+ #number of tokens consumed
335
+ def num_tokens_from_string(string: str, encoding_name: str) -> int:
336
+ """Returns the number of tokens in a text string."""
337
+ encoding = tiktoken.get_encoding(encoding_name)
338
+ num_tokens = len(encoding.encode(string))
339
+ return num_tokens
340
+
341
+ #retrieve relevant questions based on user interaction
342
+ def get_relevant_questions():
343
+ relevant_questions = []
344
+ mock = load_mock_content(APM_MOCK_QNA)
345
+ for line in mock.splitlines(): relevant_questions += [line]
346
+
347
+ return relevant_questions
348
+
349
+ #Rephrase the original user question based on system prompt to lead a better LLM answer
350
+ def user_query_rephrasing(
351
+ state: OverallState, _prompt=None, *, config: RunnableConfig
352
+ ) -> dict[str,str]:
353
+
354
+ question = getattr(state,'question')
355
+
356
+ configuration = AgentConfiguration.from_runnable_config(config)
357
+ # 'model = load_chat_model(configuration.query_model)
358
+ model = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
359
+
360
+ if _prompt:
361
+ rewrite_prompt = pull("learn-it-all-do-it-all/ea4all_user_question_rephrase")
362
+ inputs = {"user_question": question} #, "ai_output": e4p.LLAMA31_PROMPT_FORMAT}
363
+ else:
364
+ rewrite_prompt = pull("learn-it-all-do-it-all/ea4all_question_rewriter")
365
+ inputs = {"user_question": question, "target":"web search"}
366
+
367
+ rewrite_chain = rewrite_prompt | model | JsonOutputParser()
368
+
369
+ result = rewrite_chain.invoke(
370
+ input=inputs
371
+ )
372
+
373
+ try:
374
+ question = result['rephrased']
375
+ except Exception:
376
+ question = state.question
377
+
378
+ return {"question": question}
379
+
380
+ # Post-processing
381
+ def format_docs(docs):
382
+ return "\n".join(doc.page_content for doc in docs)
383
+
384
+ def identify_task_category(
385
+ question,chat_memory,config: RunnableConfig
386
+ ):
387
+ configuration = AgentConfiguration.from_runnable_config(config)
388
+
389
+ prompt = pull("learn-it-all-do-it-all/apm_task_router")
390
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
391
+
392
+ try:
393
+ # x=get_history_gradio(x) extract Human / AI
394
+ # fake gradio chat memory
395
+ x={"chat_memory":[]}
396
+ x['chat_memory'] = chat_memory
397
+ # extract human message only
398
+ memory=""
399
+ for human, ai in x['chat_memory']: memory += human + ";"
400
+
401
+ chain_one = prompt | llm | JsonOutputParser()
402
+ result = chain_one.invoke({"user_question": memory + question if x else question})
403
+
404
+ #parse response and pass on to next chain2/prompt2
405
+ response = extract_topic_from_business_input(result)
406
+
407
+ return response
408
+ except Exception:
409
+ return {'primary': 'General Inquiry'}
410
+
411
+ def retrieval_grader(model):
412
+ prompt = PromptTemplate(
413
+ template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assessing relevance
414
+ of a retrieved document to a user question. If the document contains keywords related to the user question,
415
+ grade it as relevant. It does not need to be a stringent test. The goal is to filter out erroneous retrievals. \n
416
+ Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question. \n
417
+ Provide the binary score as a JSON with a single key 'score' and no premable or explanation.
418
+ <|eot_id|><|start_header_id|>user<|end_header_id|>
419
+ Here is the retrieved document: \n\n {document} \n\n
420
+ Here is the user question: {question} \n <|eot_id|><|start_header_id|>assistant<|end_header_id|>
421
+ """,
422
+ input_variables=["user_question", "document"],
423
+ )
424
+
425
+ retrieval_grader = prompt | model | JsonOutputParser()
426
+
427
+ return retrieval_grader
428
+
429
+ def hallucination_grader(model):
430
+ # Prompt
431
+ prompt = pull("learn-it-all-do-it-all/ea4all_apm_hallucination_grader")
432
+ hallucination_grader = prompt | model | JsonOutputParser()
433
+
434
+ return hallucination_grader
435
+
436
+ def grade_answer(model):
437
+ # Prompt
438
+ prompt = PromptTemplate(
439
+ template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a grader assistant and your task is to assess the answer relevance to address a user question.\n
440
+ Give a binary score 'yes' to indicate that the answer is relevant or 'no' otherwise.\n
441
+ Provide the binary score as a JSON with a keys 'score' and nothing else.\n
442
+ <|eot_id|><|start_header_id|>user<|end_header_id|> Here is the answer:
443
+ \n ------- \n
444
+ {generation}
445
+ \n ------- \n
446
+ Here is the question: {user_question} <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
447
+ input_variables=["generation", "user_question"],
448
+ )
449
+
450
+ answer_grader = prompt | model | JsonOutputParser()
451
+
452
+ return answer_grader
453
+
454
+ async def grade_documents(state, config: RunnableConfig):
455
+ """
456
+ Determines whether the retrieved documents are relevant to the question
457
+ If any document is not relevant, we will set a flag to run web search
458
+
459
+ Args:
460
+ state (dict): The current graph state
461
+
462
+ Returns:
463
+ state (dict): Filtered out irrelevant documents and updated web_search state
464
+ """
465
+
466
+ configuration = AgentConfiguration.from_runnable_config(config)
467
+
468
+ print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
469
+ question = state.question
470
+ documents = state.messages
471
+ source = state.source
472
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
473
+
474
+ # Score each doc
475
+ filtered_docs = []
476
+ for d in documents:
477
+ score = retrieval_grader(llm).ainvoke(
478
+ {"user_question": question, "document": d.page_content}
479
+ )
480
+ grade = getattr(score,"score", "no")
481
+ # Document relevant
482
+ if grade.lower() == "yes":
483
+ print("---GRADE: DOCUMENT RELEVANT---")
484
+ filtered_docs.append(d)
485
+ # Document not relevant
486
+ else:
487
+ print("---GRADE: DOCUMENT NOT RELEVANT---")
488
+ # We do not include the document in filtered_docs
489
+ # We set a flag to indicate that we want to run web search
490
+ #web_search = "Yes"
491
+ source = "websearch"
492
+
493
+ return {"documents": filtered_docs, "question": question, "source": source}
494
+
495
+ def decide_to_generate(state):
496
+ """
497
+ Determines whether to generate an answer, or add web search
498
+
499
+ Args:
500
+ state (dict): The current graph state
501
+
502
+ Returns:
503
+ str: Binary decision for next node to call
504
+ """
505
+
506
+ print("---ASSESS GRADED DOCUMENTS---")
507
+ state.question
508
+ source = state.source
509
+ getattr(state,'documents')
510
+
511
+ if source == "websearch":
512
+ # All documents have been filtered check_relevance
513
+ # We will re-generate a new query
514
+ print(
515
+ "---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, INCLUDE WEB SEARCH---"
516
+ )
517
+ return "websearch"
518
+ else:
519
+ # We have relevant documents, so generate answer
520
+ print("---DECISION: GENERATE---")
521
+ return "generate"
522
+
523
+ def grade_generation_v_documents_and_question(
524
+ state:OverallState, config: RunnableConfig) -> str:
525
+ """
526
+ Determines whether the generation is grounded in the document and answers question.
527
+
528
+ Args:
529
+ state (dict): The current graph state
530
+
531
+ Returns:
532
+ str: Decision for next node to call
533
+ """
534
+
535
+ configuration = AgentConfiguration.from_runnable_config(config)
536
+
537
+ question = getattr(state,'question')
538
+ documents = getattr(state,'messages')
539
+ generation = getattr(state,'generation')
540
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
541
+
542
+ if getattr(state,'source') == "websearch":
543
+ #print("---CHECK HALLUCINATIONS---")
544
+ hallucination_grader_instance = hallucination_grader(llm)
545
+ #for output in hallucination_grader_instance.stream(
546
+ output = hallucination_grader_instance.invoke(
547
+ {"documents": documents, "generation": generation},
548
+ config={"tags":["stream_hallucination"]})
549
+ #yield(output)
550
+ grade = output["score"]
551
+ print("---DECISION: GENERATION IS GROUNDED IN DOCUMENTS---") if grade=="yes" else exit
552
+ else:
553
+ grade = 'yes'
554
+
555
+ # Check hallucination
556
+ if grade == "yes":
557
+ #Check question-answering
558
+ print("---GRADE GENERATION vs QUESTION---")
559
+ grade_answer_instance = grade_answer(llm)
560
+ #for output in grade_answer_instance.stream(
561
+ output = grade_answer_instance.invoke(
562
+ {"user_question": question, "generation": generation},
563
+ config={"tags":["stream_grade_answer"]})
564
+ #yield(output)
565
+ grade = output["score"]
566
+ if grade == "yes":
567
+ print("---DECISION: GENERATION ADDRESSES QUESTION---")
568
+ return "useful"
569
+ else:
570
+ print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
571
+ return "not useful"
572
+ else:
573
+ print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---")
574
+ return "not supported"
575
+
576
+ async def apm_query_router(
577
+ state: OverallState, config: RunnableConfig
578
+ ) -> str:
579
+
580
+ configuration = AgentConfiguration.from_runnable_config(config)
581
+
582
+ routing_prompt = pull('learn-it-all-do-it-all/ea4all-apm-user-question-routing')
583
+
584
+ #update prompt with few-shot-examples
585
+ updated_prompt = routing_prompt.from_messages([routing_prompt.messages[0], few_shot_prompt, routing_prompt.messages[1], routing_prompt.messages[2]])
586
+ # Apply partial variables to the created template
587
+ updated_prompt = updated_prompt.partial(
588
+ metadata=e4p.TEMPLATE_APM_QNA_ROUTING,
589
+ )
590
+
591
+ model = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
592
+
593
+ route = updated_prompt | model
594
+
595
+ ##Rephrase user question to lead bettern LLM response
596
+ #PROMPT as context NOT WORKING AS EXPECTED 2024-09-23
597
+ user_query = user_query_rephrasing(state=state, _prompt=updated_prompt, config=config)['question']
598
+
599
+ response = await route.ainvoke({"user_question": user_query})
600
+
601
+ extracted = extract_structured_output(response.content)
602
+ if extracted is not None:
603
+ datasource = extracted.get('datasource', 'vectorstore')
604
+ else:
605
+ datasource = 'vectorstore'
606
+
607
+ return datasource
608
+
609
+ async def retrieve(
610
+ state: OverallState, config: RunnableConfig
611
+ ):
612
+ """
613
+ Retrieve documents
614
+
615
+ Args:
616
+ state (dict): The current graph state
617
+
618
+ Returns:
619
+ state (dict): New key added to state, documents, that contains retrieved documents
620
+ """
621
+
622
+ configuration = AgentConfiguration.from_runnable_config(config)
623
+
624
+ #print("---RETRIEVE---")
625
+ question = getattr(state,'question')
626
+
627
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
628
+
629
+ with vectorstore.make_retriever(config) as _retriever:
630
+ retriever = _retriever
631
+
632
+ # First we add a step to load memory from gr.ChatInterface.history_chat
633
+ # This adds a "memory" key to the input object
634
+ loaded_memory = RunnablePassthrough.assign(
635
+ chat_history = RunnableLambda(get_history_gradio) | itemgetter("history"))
636
+
637
+ # Now we calculate the standalone question <= Original Question + ChatHistory
638
+ standalone_question = {
639
+ "standalone_question": {
640
+ "chat_history": lambda x: str(x["chat_history"]),
641
+ "user_question": lambda x: x['user_question']
642
+ }
643
+ | e4p.CONDENSE_QUESTION_PROMPT
644
+ | llm
645
+ | StrOutputParser()
646
+ }
647
+
648
+ # Retrieval
649
+ rag_input = int(getattr(state,'rag'))
650
+ retrieval_chain = await get_retrieval_chain(rag_input,"ea4all_agent",question,retriever, config=config)
651
+
652
+ retrieved_documents = {
653
+ "cdocs": retrieval_chain,
654
+ "user_question": itemgetter("standalone_question")
655
+ }
656
+
657
+ # And now we put it all together!
658
+ final_chain = loaded_memory | standalone_question | retrieved_documents
659
+
660
+ documents = await final_chain.ainvoke({"user_question": question, "chat_memory":[]})
661
+
662
+ return {"messages": format_docs(documents['cdocs']), "question": question, "rag":getattr(state,'rag')}
663
+
664
+ ### Edges ###
665
+ def route_to_node(state:OverallState):
666
+
667
+ if state.source == "websearch":
668
+ #print("---ROUTE QUESTION TO WEB SEARCH---")
669
+ return "websearch"
670
+ elif state.source == "vectorstore":
671
+ #print("---ROUTE QUESTION TO RAG---")
672
+ return "vectorstore"
673
+
674
+ async def route_question(
675
+ state: OverallState, config: RunnableConfig
676
+ ) -> dict[str, str]:
677
+ """
678
+ Route question to web search or RAG.
679
+
680
+ Args:
681
+ state (dict): The current graph state
682
+
683
+ Returns:
684
+ str: Next node to call
685
+ """
686
+
687
+ #print("---ROUTE QUESTION---")
688
+ source = await apm_query_router(state, config)
689
+
690
+ return {"source":source}
691
+
692
+ async def stream_generation(
693
+ state: OverallState, config: RunnableConfig
694
+ ) -> AsyncGenerator[str, None]:
695
+ configuration = AgentConfiguration.from_runnable_config(config)
696
+
697
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url,streaming=configuration.streaming)
698
+
699
+ documents = None
700
+ question = None
701
+ source = None
702
+ chat_memory = None
703
+ async for s in state:
704
+ documents = getattr(s,"messages")
705
+ question = getattr(s,"question")
706
+ source = getattr(s,"source")
707
+ chat_memory = getattr(s,"chat_memory")
708
+
709
+ # Prompt Web Search generation
710
+ if source == "websearch":
711
+ prompt = PromptTemplate(
712
+ template="""<|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an enterprise architect assistant for question-answering tasks.
713
+ Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know.
714
+ Keep the answer concise <|eot_id|><|start_header_id|>user<|end_header_id|>
715
+ Question: {user_question}
716
+ Context: {cdocs}
717
+ Answer: <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
718
+ input_variables=["user_question", "cdocs"],
719
+ )
720
+ else:
721
+ # Now we construct the inputs for the final prompt
722
+ # identify primary, second category
723
+ tc = identify_task_category(question,chat_memory,config)
724
+ prompt = e4p.ea4ll_prompt_selector(tc['primary'])
725
+
726
+ rag_chain = prompt | llm | StrOutputParser()
727
+
728
+ async for output in rag_chain.astream({"cdocs": documents, "user_question": question}):
729
+ yield(output)
730
+
731
+ async def generate(
732
+ state: OverallState, config: RunnableConfig
733
+ ) -> dict[str, str]:
734
+ """
735
+ Generate answer
736
+
737
+ Args:
738
+ state (dict): The current graph state
739
+ config (RunnableConfig): Configuration with the model used for query analysis.
740
+
741
+ Returns:
742
+ state (dict): New key added to state, generation, that contains LLM generation
743
+ """
744
+ #print("---GENERATE---")
745
+
746
+ #documents = getattr(state,'messages')[-1].content #documents
747
+ source = getattr(state,'source')
748
+ #question = getattr(state,'question')
749
+
750
+ ##Triggered by hallucination_grade? 2025-02-21 - NOT USER being edged to END atm
751
+ #2025-02-21: it's being triggered by super_graph supervisor as well - need to review as calling web_search twice
752
+ #if getattr(state,'generation') is None:
753
+ # if getattr(state,'web_search') == "Yes":
754
+ # await websearch(state, config)
755
+ # else:
756
+ # state.rag = "1"
757
+ # await retrieve(state, config)
758
+
759
+ # Generate answer
760
+ tags = ["websearch_stream"] if source == "websearch" else ["apm_stream"]
761
+ gen = RunnableGenerator(stream_generation).with_config(tags=tags)
762
+ generation=""
763
+ async for message in gen.astream(state):
764
+ generation = ''.join([generation,message])
765
+
766
+ #return {"messages": documents.content, "question": question, "generation": generation, "web_search": web_search}
767
+ return {"generation": generation}
768
+
769
+ #ea4all-qna-agent-conversational-with-memory
770
+ async def apm_agentic_qna(
771
+ state:OverallState, config: RunnableConfig):
772
+
773
+ configuration = AgentConfiguration.from_runnable_config(config)
774
+
775
+ question = getattr(state,'question')
776
+ chat_memory = getattr(state,'chat_memory')
777
+
778
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
779
+
780
+ retriever = await apm_retriever(config)
781
+
782
+ # First we add a step to load memory from gr.ChatInterface.history_chat
783
+ # This adds a "memory" key to the input object
784
+ loaded_memory = RunnablePassthrough.assign(
785
+ chat_history = itemgetter("chat_memory"))
786
+
787
+ # Now we calculate the standalone question <= Original Question + ChatHistory
788
+ standalone_question = {
789
+ "standalone_question": {
790
+ "chat_history": lambda x: str(x["chat_history"]),
791
+ "user_question": lambda x: x["user_question"]
792
+ }
793
+ | e4p.CONDENSE_QUESTION_PROMPT
794
+ | llm
795
+ | StrOutputParser()
796
+ }
797
+
798
+ # Start with Hyde
799
+ prompt_hyde = ChatPromptTemplate.from_template(e4p.hyde_template)
800
+ generate_docs_for_retrieval = (
801
+ prompt_hyde |
802
+ llm |
803
+ StrOutputParser()
804
+ )
805
+ retrieval_chain = generate_docs_for_retrieval | retriever
806
+
807
+ retrieved_documents = {
808
+ "cdocs": retrieval_chain,
809
+ "query": itemgetter("standalone_question")
810
+ }
811
+
812
+ # And now we put it all together!
813
+ final_chain = loaded_memory | standalone_question | retrieved_documents
814
+
815
+ documents = await final_chain.ainvoke({"user_question": question, "chat_memory":chat_memory})
816
+
817
+ #return {"documents": format_docs(documents['cdocs']), "question": question, "rag":5, "generation": None}
818
+ return {"messages": format_docs(documents['cdocs']), "rag":5}
819
+
820
+ async def final(state: OverallState):
821
+ return {"safety_status": state}
822
+
823
+ async def choose_next(state: OverallState):
824
+ if state.safety_status is not None and len(state.safety_status) > 0 and state.safety_status[0] == 'no':
825
+ return "exit"
826
+ else:
827
+ return "route"
828
+
829
+ class SafetyCheck:
830
+ def apm_safety_check(self,state: OverallState, config: RunnableConfig):
831
+
832
+ configuration = AgentConfiguration.from_runnable_config(config)
833
+ question = state.question
834
+
835
+ safety_prompt = pull('learn-it-all-do-it-all/ea4all_apm_safety_check')
836
+
837
+ llm = get_llm_client(model=configuration.query_model, api_base_url=configuration.api_base_url)
838
+
839
+ route = safety_prompt | llm | JsonOutputParser()
840
+
841
+ response = route.invoke({"user_question": question})
842
+
843
+ try:
844
+ score = response['score']
845
+ explain = response['response']
846
+ except ValueError:
847
+ score = 'no'
848
+ explain = 'I cannot answer your question at moment!'
849
+
850
+ return {"safety_status": [score, explain, question]}
851
+
852
+ def __init__(self):
853
+ self._safety_run = self.apm_safety_check
854
+
855
+ def __call__(self, state: OverallState, config: RunnableConfig) -> dict[str, list]:
856
+ try:
857
+ response = getattr(self, '_safety_run')(state, config)
858
+ return {"safety_status": [response['safety_status'][0], "", state.question]}
859
+ except Exception as e:
860
+ return {"safety_status": ['no', e, state.question]}
861
+
862
+ ##BUILD APM Graph
863
+ # Build graph
864
+ workflow = StateGraph(OverallState, input=InputState, output=OutputState, config_schema=AgentConfiguration)
865
+
866
+ # Define the nodes
867
+ workflow.add_node("safety_check",SafetyCheck())
868
+ workflow.add_node("route_question", route_question) # route to vectorstore or websearch
869
+ workflow.add_node("retrieve", apm_agentic_qna) # retrieve
870
+ workflow.add_node("websearch", websearch) # web search
871
+ workflow.add_node("generate", generate) # generate web search based answer
872
+ workflow.add_node("final", final)
873
+
874
+ workflow.set_entry_point("safety_check")
875
+ workflow.add_conditional_edges(
876
+ "safety_check",
877
+ choose_next,
878
+ {
879
+ "exit": "final",
880
+ "route": "route_question"
881
+ }
882
+ )
883
+ workflow.add_conditional_edges(
884
+ "route_question",
885
+ route_to_node,
886
+ {
887
+ "websearch": "websearch",
888
+ "vectorstore": "retrieve",
889
+ },
890
+ )
891
+ workflow.add_edge("retrieve", "generate")
892
+ workflow.add_edge("websearch", "generate")
893
+ workflow.add_conditional_edges( #2025-02-27: Conditional edges expect sync function only
894
+ "generate",
895
+ grade_generation_v_documents_and_question,
896
+ {
897
+ "not supported": "route_question",
898
+ "useful": END,
899
+ "not useful": END, ##2025-02-21: need to review THIS to try again and respond to user with a better answer
900
+ },
901
+ )
902
+ workflow.add_edge("final", END)
903
+
904
+ # Compile
905
+ apm_graph = workflow.compile()
906
+ apm_graph.name = "APMGraph"
ea4all/src/ea4all_apm/prompts.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Default prompts and support functions."""
2
+
3
+ #prompt libraries
4
+ from langchain_core.prompts.chat import (ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate,)
5
+ from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
6
+ from langchain_core.prompts import format_document
7
+ from langchain_core.prompts import ChatPromptTemplate
8
+ from langchain.chains.prompt_selector import ConditionalPromptSelector
9
+
10
+ from langchain_core.output_parsers.json import JsonOutputParser
11
+
12
+ ##return a prompt-template class with informed user inquiry
13
+ def ea4all_prompt(query):
14
+ prompt_template = PromptTemplate(
15
+ input_variables=["query", "answer"],
16
+ template=TEMPLATE_QUERY_ANSWER)
17
+
18
+ prompt = prompt_template.format(
19
+ query=query,
20
+ answer="")
21
+
22
+ return prompt
23
+
24
+ ##return a chat-prompt-template class from the informed template
25
+ def ea4all_chat_prompt(template):
26
+ system_message_prompt = SystemMessagePromptTemplate.from_template(template)
27
+ human_template = "{user_question}"
28
+ human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
29
+
30
+ ea4all_prompt = ChatPromptTemplate.from_messages(
31
+ messages=[
32
+ system_message_prompt,
33
+ ## MessagesPlaceholder(variable_name="history"),
34
+ human_message_prompt],
35
+ )
36
+ ea4all_prompt.output_parser=JsonOutputParser()
37
+
38
+ return ea4all_prompt
39
+
40
+ ##select best prompt based on user inquiry's category
41
+ def ea4ll_prompt_selector(category):
42
+ QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
43
+ default_prompt = ea4all_chat_prompt(GENERAL_TEMPLATE),
44
+ conditionals=[
45
+ (lambda category: True if category == "Strategy" else False, ea4all_chat_prompt(STRATEGY_TEMPLATE)),
46
+ (lambda category: True if category == "Application" else False, ea4all_chat_prompt(APPLICATION_TEMPLATE)),
47
+ (lambda category: True if category == "Recommendation" else False, ea4all_chat_prompt(RECOMMENDATION_TEMPLATE)),
48
+ (lambda category: True if category not in ("Strategy","Application", "Recommendation") else False, ea4all_chat_prompt(GENERAL_TEMPLATE))
49
+ ]
50
+ )
51
+
52
+ prompt = QUESTION_PROMPT_SELECTOR.get_prompt(category)
53
+
54
+ return(prompt)
55
+
56
+
57
+ ##Template-basic instruction + context
58
+ TEMPLATE_CONTEXT = """You are a helpful Enterprise Architect with knowledge on enterprises IT landscapes.
59
+ Use only the context delimited by trible backticks to answer questions. Return the answer formatted as a text paragraph.
60
+ If you don't know the answer return I could not find the information.
61
+ Don't make up the response.
62
+ Context: ```{cdocs}```
63
+ Help answer: ""
64
+ """
65
+
66
+ ##Template-basic instruction + question + answer
67
+ TEMPLATE_QUERY_ANSWER = """You are Enterprise Architect highly knowledgable on IT landscape. \
68
+ Answer the question that is delimited by triple backticks into a style that is bullet list. \
69
+ If the question cannot be answered using the information provided answer with "I don't know". \
70
+
71
+ Always say "thanks for asking!" at the end of the answer.
72
+
73
+ Question: ```{user_question}```
74
+ Answer: {answer}
75
+ """
76
+
77
+ TEMPLATE_APM_QNA_ROUTING = """application portfolio assessment, application/IT landscape rationalisation, simplification or optimisation, business capability assessment, line of business landscape, who can I talk to, assistance from architecture team."""
78
+
79
+ ##Template-break-into-simpler-tasks
80
+ #https://platform.openai.com/docs/guides/prompt-engineering/strategy-split-complex-tasks-into-simpler-subtasks
81
+ TEMPLATE_HEADER = """You are a helpful enterprise architect assistant. """
82
+ TEMPLATE_HEADER += """Your goal is to provide accurate and detailed responses to user inquiry. """
83
+ TEMPLATE_HEADER += """You have access to a vast amount of enterprise architecture knowledge, """
84
+ TEMPLATE_HEADER += """and you can understand and generate language fluently. """
85
+ TEMPLATE_HEADER += """You can assist with a wide range of architectural topics, including but not limited to """
86
+ TEMPLATE_HEADER += """business, application, data and technology architectures. """
87
+ TEMPLATE_HEADER += """You should always strive to promote a positive and respectful conversation.
88
+ """
89
+
90
+ TEMPLATE_TASKS = ""
91
+ TEMPLATE_TASKS += """You will be provided with a user inquiry. """
92
+ TEMPLATE_TASKS += """Classify the inquiry into primary category and secondary category. """
93
+ TEMPLATE_TASKS += """Primary categories: Strategy, Application, Recommendation or General Inquiry. """
94
+ TEMPLATE_TASKS += """Strategy secondary categories:
95
+ - Architecture and Technology Strategy
96
+ - Vision
97
+ - Architecture Principles
98
+ """
99
+ TEMPLATE_TASKS += """Application secondary categories:
100
+ - Meet business and technical need
101
+ - Business criticality
102
+ - Roadmap
103
+ - Business Capability
104
+ - Hosting
105
+ """
106
+ TEMPLATE_TASKS += """Recommendation secondary categories:
107
+ - Application rationalisation
108
+ - Landscape simplification
109
+ - Reuse existent invested application
110
+ - Business capability with overlapping applications
111
+ - Opportunities and innovation
112
+ """
113
+ TEMPLATE_TASKS += """General inquiry:
114
+ - Speak to an architect
115
+ """
116
+ TEMPLATE_TASKS += """You may also revise the original inquiry if you think that revising \
117
+ it will ultimately lead to a better response from the language model """
118
+ TEMPLATE_TASKS += """Provide your output in JSON format with the keys: primary, secondary, question.
119
+ """
120
+
121
+ #Template-break-into-specific-prompt-by-category
122
+ strategy_template = """You will be provided with inquiry about architecture strategy.
123
+ Follow these steps to answer user inquiry:
124
+ STEP 1 - Using only the context delimited by triple backticks.
125
+ STEP 2 - Look at application with roadmap to invest.
126
+ STEP 3 - Extract the information that is only relevant to help answer the user inquiry
127
+ """
128
+
129
+ application_template = """You will be provided with an inquiry about application architecture.
130
+ Follow these steps to answer user inquiry:
131
+ STEP 1 - Using only the context delimited by triple backticks.
132
+ STEP 2 - Extract the information that is only relevant to help answer the user inquiry
133
+ """
134
+
135
+ recommendation_template = """You will be provided with enterprise architecture inquiry that needs a recommendation.
136
+ Follow these steps to answer user inquiry:
137
+ STEP 1 - Use only the context delimited by triple backticks.
138
+ STEP 2 - Look at applications with low business or technical fit
139
+ STEP 3 - Look at application with roadmap diffent to invest
140
+ STEP 4 - Look at applicatins hosted on premise
141
+ STEP 5 - Look at Business capability with overlapping applications
142
+ """
143
+
144
+ general_template = """You will provided with a general inquiry about enterprise architecture IT landscape.
145
+ Follow these steps to answer user queries:
146
+ STEP 1 - use only the context delimited by triple backticks
147
+ STEP 2 - Extract the information that is only relevant to help answer the user inquiry
148
+ """
149
+
150
+ default_template = """
151
+ FINAL STEP - Do not make up or guess ANY extra information. \
152
+ Ask follow-up question to the user if you need further clarification to understand and answer their inquiry. \
153
+ After a follow-up question if you still don't know the answer or don't find specific information needed to answer the user inquiry \
154
+ return I could not find the information. \
155
+ Ensure that the response contain all relevant context needed to interpret them -
156
+ in other words don't extract small snippets that are missing important context.
157
+ Format the output as top-like string formatted with the most appropriate style to make it clear, concise and user-friendly for a chatbot response.
158
+ Here is the question: {user_question}
159
+ Here is the context: ```{cdocs}```
160
+ """
161
+
162
+ STRATEGY_TEMPLATE = TEMPLATE_HEADER + strategy_template + default_template
163
+ APPLICATION_TEMPLATE = TEMPLATE_HEADER + application_template + default_template
164
+ RECOMMENDATION_TEMPLATE = TEMPLATE_HEADER + recommendation_template + default_template
165
+ GENERAL_TEMPLATE = TEMPLATE_HEADER + general_template + default_template
166
+
167
+
168
+ ###############################################
169
+ ##COLLECTION of prompts for conversation memory
170
+ ###############################################
171
+
172
+ _template = """Given the following conversation and a follow up question,\
173
+ rephrase the follow up question to be a standalone question, in its original language.\
174
+ Chat History:
175
+ {chat_history}
176
+ Follow Up Input: {user_question}
177
+ Standalone question:"""
178
+
179
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
180
+ DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
181
+
182
+
183
+ def _combine_documents(
184
+ docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
185
+ ):
186
+ doc_strings = [format_document(doc, document_prompt) for doc in docs]
187
+
188
+ return document_separator.join(doc_strings)
189
+
190
+
191
+ ##################################################
192
+ ##COLLECTION of prompts - RAG query transformation
193
+ ##################################################
194
+ ## Multi Query
195
+ # Prompt
196
+ multiquery_template = """You are an AI Enterprise Architect language model assistant. Your task is to generate five
197
+ different versions of the given user question to retrieve relevant documents from a vector
198
+ database. By generating multiple perspectives on the user question, your goal is to help
199
+ the user overcome some of the limitations of the distance-based similarity search.
200
+ Provide these alternative questions separated by newlines. Original question: {standalone_question}"""
201
+
202
+ decomposition_template = """You are a helpful enterprise architect assistant that generates multiple sub-questions related to an input question. \n
203
+ The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n
204
+ Generate multiple search queries related to: {user_question} \n
205
+ Output (3 queries):"""
206
+
207
+ decomposition_answer_recursevely_template = """
208
+ Here is the question you need to answer:
209
+
210
+ \n --- \n {question} \n --- \n
211
+
212
+ Here is any available background question + answer pairs:
213
+
214
+ \n --- \n {q_a_pairs} \n --- \n
215
+
216
+ Here is additional context relevant to the question:
217
+
218
+ \n --- \n {context} \n --- \n
219
+
220
+ Use the above context and any background question + answer pairs to answer the question: \n {user_question}
221
+ """
222
+
223
+ rag_fusion_questions_template = """You are a helpful enterprise architect assistant that generates multiple search queries based on a single input query. \n
224
+ Generate multiple search queries related to: {standalone_question} \n
225
+ Output (4 queries):"""
226
+
227
+ # Few Shot Examples
228
+ few_shot_step_back_examples = [
229
+ {
230
+ "input": "Could the members of The Police perform lawful arrests?",
231
+ "output": "what can the members of The Police do?",
232
+ },
233
+ {
234
+ "input": "Jan Sindel was born in what country?",
235
+ "output": "what is Jan Sindel personal history?",
236
+ },
237
+ ]
238
+ # We now transform these to example messages
239
+ few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
240
+ [
241
+ ("human", "{input}"),
242
+ ("ai", "{output}"),
243
+ ]
244
+ )
245
+ few_shot_prompt = FewShotChatMessagePromptTemplate(
246
+ input_variables=["standalone_question"],
247
+ example_prompt=few_shot_step_back_examples_prompt,
248
+ examples=few_shot_step_back_examples,
249
+ )
250
+ few_shot_step_back_prompt = ChatPromptTemplate.from_messages(
251
+ [
252
+ (
253
+ "system",
254
+ """You are an expert at enterprise architecture world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
255
+ ),
256
+ # Few shot examples
257
+ few_shot_prompt,
258
+ # New question
259
+ ("user", "{standalone_question}"),
260
+ ]
261
+ )
262
+ # Response prompt
263
+ step_back_response_prompt_template = """You are an expert of enterprise architecture world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
264
+
265
+ # {normal_context}
266
+ # {step_back_context}
267
+
268
+ # Original Question: {standalone_question}
269
+ """
270
+
271
+ # HyDE document generation
272
+ hyde_template = """Please write an architecture scientific passage to answer the question
273
+ Question: {standalone_question}
274
+ Passage:"""
275
+
276
+ # Retrieval APM Graph - TO BE REVIEWED
277
+ ROUTER_SYSTEM_PROMPT = """You are a LangChain Developer advocate. Your job is help people using LangChain answer any issues they are running into.
278
+
279
+ A user will come to you with an inquiry. Your first job is to classify what type of inquiry it is. The types of inquiries you should classify it as are:
280
+
281
+ ## `more-info`
282
+ Classify a user inquiry as this if you need more information before you will be able to help them. Examples include:
283
+ - The user complains about an error but doesn't provide the error
284
+ - The user says something isn't working but doesn't explain why/how it's not working
285
+
286
+ ## `langchain`
287
+ Classify a user inquiry as this if it can be answered by looking up information related to LangChain open source package. The LangChain open source package \
288
+ is a python library for working with LLMs. It integrates with various LLMs, databases and APIs.
289
+
290
+ ## `general`
291
+ Classify a user inquiry as this if it is just a general question"""
292
+
ea4all/src/ea4all_apm/state.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """State management for the APM graph.
2
+
3
+ This module defines the state structures used in the APM graph. It includes
4
+ definitions for agent state, input state, and router classification schema.
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from typing import Optional, Literal, List, Tuple
9
+ from typing_extensions import TypedDict
10
+
11
+ class Router(TypedDict):
12
+ """Classify a user query."""
13
+ logic: str
14
+ datasource: Optional[Literal["vectorstore", "websearch"]]
15
+
16
+ # Optional, the InputState is a restricted version of the State that is used to
17
+ # define a narrower interface to the outside world vs. what is maintained
18
+ # internally.
19
+ @dataclass(kw_only=True)
20
+ class InputState:
21
+ """Represents the input state for the agent.
22
+
23
+ This class defines the structure of the input state, which includes
24
+ the messages exchanged between the user and the agent. It serves as
25
+ a restricted version of the full State, providing a narrower interface
26
+ to the outside world compared to what is maintained internally.
27
+ """
28
+
29
+ """Attributes:
30
+ question: user question
31
+ """
32
+ question: str
33
+
34
+ @dataclass(kw_only=True)
35
+ class OutputState:
36
+ """Represents the output schema for the APM agent."""
37
+ question: str
38
+ messages: Optional[List[str]] = None
39
+ generation: Optional[str] = None
40
+ source: Optional[str] = None
41
+ """Answer to user's Architecture IT Landscape question about ."""
42
+
43
+ @dataclass(kw_only=True)
44
+ class OverallState(InputState, OutputState):
45
+ """State of the APM graph / agent."""
46
+
47
+ """
48
+ safety_status: user question's safeguarding status, justification, rephrased question
49
+ router: classification of the user's query
50
+ source: RAG or websearch
51
+ retrieved: list of documents retrieved by the retriever
52
+ rag: last RAG approach used
53
+ chat_memory: user chat memory
54
+ """
55
+ safety_status: Optional[Tuple[str, str, str]] = None
56
+ router: Optional[Router] = None
57
+ rag: Optional[str] = None
58
+ chat_memory: Optional[str] = None
59
+ retrieved: Optional[List[str]] = None
ea4all/src/ea4all_gra/configuration.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Define the configurable parameters for the TOGAF agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Annotated
7
+
8
+ from ea4all.src.shared.configuration import BaseConfiguration
9
+
10
+ @dataclass(kw_only=True)
11
+ class AgentConfiguration(BaseConfiguration):
12
+ """The configuration for the agent."""
13
+
14
+ supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
15
+ default="gpt-4o-mini",
16
+ metadata={
17
+ "description": "The language model used for supervisor agents. Should be in the form: provider/model-name."
18
+ },
19
+ )
20
+ togaf_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
21
+ default="meta-llama/Llama-3.3-70B-Instruct",
22
+ metadata={
23
+ "description": "The language model used for processing and refining queries. Should be in the form: provider/model-name."
24
+ },
25
+ )
26
+
27
+ recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "integer"}}] = field(
28
+ default=10,
29
+ metadata={
30
+ "description": "The maximum number of times the agent can recursively call itself."
31
+ },
32
+ )
33
+
34
+ dbr_mock: Annotated[str, {"__template_metadata__": {"kind": "dataset"}}] = field(
35
+ default="dbr.txt",
36
+ metadata={
37
+ "description": "The EA4ALL Togal Business Requirement mock content."
38
+ },
39
+ )
40
+
41
+ ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
42
+ default="Frontend",
43
+ metadata={
44
+ "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
45
+ },
46
+ )
ea4all/src/ea4all_gra/data.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ from pydantic import BaseModel, Field
3
+
4
+ """
5
+ This module defines the data models used in the EA4ALL TOGAF project.
6
+ The data models include:
7
+ - Capability: Represents a business capability.
8
+ - BusinessCapability: Represents a list of required business capabilities.
9
+ - Requirement: Represents a business requirement.
10
+ - ListRequirement: Represents a list of identified business requirements.
11
+ - Objective: Represents a business objective.
12
+ - ListObjective: Represents a list of business objectives.
13
+ - UseCase: Represents a use case describing user interactions with the system.
14
+ - UserJourney: Represents a list of user journeys.
15
+ - StakeholderMap: Represents a business stakeholder.
16
+ - StakeholderList: Represents a list of business stakeholders.
17
+ - IdentifiedApp: Represents an identified application.
18
+ - LandscapeAsIs: Represents a list of applications to address a business query.
19
+ - CapabilityAsIs: Represents the support status of a business capability.
20
+ - CapabilityGap: Represents a list of capability support statuses.
21
+ - GradeAnswer: Represents a binary score for relevance check on retrieved applications.
22
+ - GradeHallucinations: Represents a binary score for hallucination present in generation answer.
23
+ - GradeDocuments: Represents a binary score for relevance check on retrieved applications.
24
+ - Principles: Represents the business, architecture, and technology principles.
25
+ - GradeBusinessQueryAnswer: Represents a binary score for quality check on business query.
26
+ """
27
+
28
+
29
+ class Capability(BaseModel):
30
+ """Business capability"""
31
+ capability: str = Field(description="Business capability name.")
32
+
33
+ class BusinessCapability(BaseModel):
34
+ """List of required business capabilities."""
35
+ capabilities: Optional[List[Capability]]
36
+
37
+ class Requirement(BaseModel):
38
+ """Business requirement."""
39
+ category: str = Field(description="Business requirement should be functional or non-functional")
40
+ requirement: str = Field(description="Business requirement description.")
41
+
42
+ class ListRequirement(BaseModel):
43
+ """List of identified business requirements."""
44
+ requirements: Optional[List[Requirement]]
45
+
46
+ class Objective(BaseModel):
47
+ """Business Objective"""
48
+ objective: str = Field(title=None, description="Business objective.")
49
+
50
+ class ListObjective(BaseModel):
51
+ """List of business objectives."""
52
+ objectives: Optional[List[Objective]]
53
+
54
+ class UseCase(BaseModel):
55
+ """Use case describing who (actor,user,persona) does what (interaction) with the system, for what purpose (goal), without dealing with system internals."""
56
+ persona: str = Field(description="User, actor or personna who interacts with the system.")
57
+ step: str = Field(description="Action executed by user.")
58
+ goal: str = Field(description="Purpose, goal of a step executed by user.")
59
+
60
+ class UserJourney(BaseModel):
61
+ """List of user journey."""
62
+ userjourney: Optional[List[UseCase]]
63
+
64
+ class StakeholderMap(BaseModel):
65
+ """Business stakeholder."""
66
+ stakeholder: str = Field(description="Stakeholder name.")
67
+ role: str = Field(description="Stakeholder role.")
68
+ concern: str = Field(description="Stakeholder concern.")
69
+
70
+ class StakeholderList(BaseModel):
71
+ """List of business stakeholders."""
72
+ stakeholders: Optional[List[StakeholderMap]]
73
+
74
+ #Task-2
75
+ class IdentifiedApp(BaseModel):
76
+ """Identified application"""
77
+ application: str = Field(description="Application name")
78
+ description: str = Field(description="Application description")
79
+ capability: list = Field(description="Business capabilities supported")
80
+ businessFit: str = Field(description="how well application support current business need")
81
+ technicalFit: str = Field(description="application alignment with technology strategy")
82
+ roadmap: str = Field(description="application portfolio strategy")
83
+
84
+ class LandscapeAsIs(BaseModel):
85
+ """List of applications to address a business query."""
86
+ identified_asis: Optional[List[IdentifiedApp]]
87
+
88
+ class CapabilityAsIs(BaseModel):
89
+ """Business capability support"""
90
+ capability: str = Field(description="business capability definition")
91
+ support: bool = Field(description="capability support status")
92
+
93
+ class CapabilityGap(BaseModel):
94
+ """List of capabilities support status"""
95
+ capability_status: Optional[List[CapabilityAsIs]]
96
+
97
+ class GradeAnswer(BaseModel):
98
+ """Binary score for relevance check on retrieved applications."""
99
+
100
+ binary_score: str = Field(...,
101
+ description="Relevance of retrieved applications to the business query, 'yes' or 'no'"
102
+ )
103
+
104
+ class GradeHallucinations(BaseModel):
105
+ """Binary score for hallucination present in generation answer."""
106
+
107
+ binary_score: bool = Field(
108
+ description="Answer is grounded in the facts, 'yes' or 'no'"
109
+ )
110
+
111
+ class GradeDocuments(BaseModel):
112
+ """Binary score for relevance check on retrieved applications."""
113
+
114
+ binary_score: str = Field(
115
+ description="Applications support the business capability, 'yes' or 'no'"
116
+ )
117
+
118
+ #Task-3
119
+ class Principles(BaseModel):
120
+ """Describe the business, archirecture and technology principles"""
121
+ architecture: list = Field(description="Name and description of an architecture principle")
122
+ business: list = Field(description="Name and description of a business principle")
123
+ technology: list = Field(description="Name and description of a technology principle")
124
+
125
+ #Togaf-Agentic-Workflow
126
+ class GradeBusinessQueryAnswer(BaseModel):
127
+ """Binary score for quality check on business query."""
128
+
129
+ binary_score: str = Field(
130
+ description="Business Query is well-described, 'yes' or 'no'"
131
+ )
ea4all/src/ea4all_gra/graph.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains the implementation of a Togaf reference architecture graph.
3
+ The graph represents a workflow for managing a conversation between team members
4
+ in the context of Togaf, a framework for enterprise architecture development.
5
+
6
+ The graph is defined using the StateGraph class from the langgraph library.
7
+ It consists of several nodes, each representing a specific task or action in the workflow.
8
+ The nodes are connected by edges, which control the flow of logic through the program.
9
+
10
+ The main entry point of the graph is the "ask_human" node, which prompts the user to provide
11
+ a business requirement document/file name. The input is then passed to the "enter_graph" node,
12
+ which initializes the state of the graph with the provided input.
13
+
14
+ The graph then proceeds to the "query_grader" node, which evaluates the quality of the business query.
15
+ Based on the evaluation, the graph branches to different nodes, such as "assess_query", "assess_asis",
16
+ and "generate_tobe", each representing a different task in the Togaf workflow.
17
+
18
+ The "togaf_supervisor" node acts as a router, determining the next role to act based on the conversation
19
+ and instructions. It uses an LLM (Learned Language Model) model to make the decision.
20
+
21
+ The graph continues to execute the tasks until it reaches the "return" node, which generates a response
22
+ to be returned to the user.
23
+
24
+ The graph is compiled and saved as a Togaf_reference_architecture_graph object, which can be executed
25
+ to run the workflow.
26
+
27
+ The module also includes helper functions and utility classes used by the graph, as well as import statements
28
+ for required libraries and modules.
29
+ """
30
+
31
+ """Changelog: 20250609
32
+ - Refactored State classes to OverallState, InputState, OutputState
33
+ - Task-1, Task-2, Task-3 State classes changed to TypedDicts
34
+ - Review what's best content to provide Retrieve with requirements or intent
35
+ """
36
+
37
+
38
+ #core libraries
39
+ from langchain_core.runnables import RunnableConfig
40
+ from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder
41
+ from langchain_core.prompts import ChatPromptTemplate
42
+ from langchain_core.messages import (
43
+ AIMessage,
44
+ SystemMessage,
45
+ HumanMessage,
46
+ )
47
+ from langchain_core.output_parsers import (
48
+ PydanticOutputParser,
49
+ JsonOutputKeyToolsParser
50
+ )
51
+ from langgraph.graph import (
52
+ END,
53
+ StateGraph,
54
+ )
55
+ from langgraph.types import Command, interrupt
56
+ from langgraph.checkpoint.memory import MemorySaver
57
+
58
+ from langchain import hub
59
+
60
+ import functools
61
+
62
+ from typing import List, Union, Dict
63
+ from typing_extensions import Literal
64
+
65
+ from ea4all.src.ea4all_gra.configuration import AgentConfiguration
66
+ from ea4all.src.ea4all_gra.state import OverallState, InputState, OutputState
67
+ from ea4all.src.ea4all_gra.data import (
68
+ GradeBusinessQueryAnswer
69
+ )
70
+
71
+ from ea4all.src.shared.utils import (
72
+ get_llm_client,
73
+ clean_and_load_json,
74
+ extract_response_from_backticks,
75
+ load_mock_content,
76
+ )
77
+ from ea4all.src.shared.prompts import LLAMA31_PROMPT_FORMAT
78
+
79
+ from ea4all.src.ea4all_gra.togaf_task1.graph import task1_graph
80
+ from ea4all.src.ea4all_gra.togaf_task2.graph import task2_graph
81
+ from ea4all.src.ea4all_gra.togaf_task3.graph import task3_graph
82
+
83
+ from ea4all.src.ea4all_gra.utils import (
84
+ AsyncInterruptHandler
85
+ )
86
+
87
+ #CAPTURE business requirement asking for USER input & call togad_agentic workflow
88
+ async def _get_user_input():
89
+
90
+ interrupt_handler = AsyncInterruptHandler()
91
+ result = await interrupt_handler.handle_interrupt()
92
+
93
+ return {"user_feedback": result}
94
+
95
+ async def togaf_ask_human(state: OverallState, config: RunnableConfig):
96
+ # Check user_input method
97
+ configuration = AgentConfiguration.from_runnable_config(config)
98
+
99
+ if "interrupt" in (AgentConfiguration.ea4all_ask_human, configuration.ea4all_ask_human):
100
+ print("--- TOGAF Blueprint Team --- User input requested")
101
+ response = interrupt(
102
+ {
103
+ "task": state.get('business_query'),
104
+ "content": "Please provide your business requirement in the form of document/file name or past the content:",
105
+ "optional": False
106
+ },
107
+ )
108
+
109
+ print(f"--- TOGAF AGENTIC team --- got an answer and processing user input: {response}")
110
+
111
+ business_query = load_mock_content(response['user_feedback'])
112
+ else:
113
+ business_query = state.get('business_query')
114
+
115
+ return Command(update={
116
+ "business_query": business_query,
117
+ }
118
+ )
119
+
120
+ #DEFINE Helper functions
121
+ def create_team_supervisor(
122
+ state: OverallState,
123
+ config:RunnableConfig):
124
+ members = ["AssessBusinessQuery", "AssessLandscape", "GenerateToBe"] #NEEDS REFACTORING
125
+
126
+ """An LLM-based router."""
127
+ options = ["FINISH"] + members
128
+ function_def = {
129
+ "name": "route",
130
+ "description": "Select the next role.",
131
+ "parameters": {
132
+ "title": "routeSchema",
133
+ "type": "object",
134
+ "properties": {
135
+ "next": {
136
+ "type": "string",
137
+ "title": "Next",
138
+ "anyOf": [
139
+ {"enum": options},
140
+ ],
141
+ },
142
+ },
143
+ "required": ["next"],
144
+ },
145
+ }
146
+
147
+ configuration = AgentConfiguration.from_runnable_config(config)
148
+ model = get_llm_client(
149
+ configuration.supervisor_model,
150
+ api_base_url="",
151
+ )
152
+
153
+ system_prompt = " ".join([
154
+ 'You are a supervisor tasked with managing a conversation between the',
155
+ 'following team members: {team_members}. Respond with the worker to act next in sequence.',
156
+ 'Each worker will perform a task and respond with their results and status.',
157
+ 'After last worker is finished,respond with FINISH.']
158
+ )
159
+
160
+ prompt = ChatPromptTemplate.from_messages(
161
+ [
162
+ ("system", system_prompt),
163
+ MessagesPlaceholder(variable_name="messages"),
164
+ (
165
+ "system",
166
+ "Based on the above conversation and instructions who should act next."
167
+ "Or should we FINISH?. Select one of: {options}.",
168
+ ),
169
+ ]
170
+ ).partial(options=str(options),team_members=", ".join(members))
171
+
172
+ return (
173
+ prompt
174
+ | model.bind_tools(tools=[function_def], tool_choice="route")
175
+ | JsonOutputKeyToolsParser(key_name='route', first_tool_only=True)
176
+ )
177
+
178
+ # The following functions interoperate between the top level graph state
179
+ # and the state of the sub-graph
180
+ # this makes it so that the states of each graph don't get intermixed
181
+ def task1_enter_chain(state:OverallState, members: List[str]) -> dict:
182
+ results = {
183
+ "messages": [AIMessage(content=str(state))],
184
+ "team_members": ", ".join(members),
185
+ "business_query": state.get('business_query'),
186
+ "next": state.get('next'),
187
+ }
188
+ return results
189
+
190
+ def task2_enter_chain(state:OverallState, members: List[str]):
191
+ results = {
192
+ "messages": [AIMessage(content=str(state))],
193
+ "team_members": ", ".join(members),
194
+ "business_query": state.get('business_query'),
195
+ "intent": state.get('intent'),
196
+ "stakeholder": state.get('stakeholder'),
197
+ "biz_capability": state.get('biz_capability'),
198
+ "requirement": state.get('requirement'),
199
+ "userjourney": state.get('userjourney'),
200
+ "next": state.get('next')
201
+ }
202
+ return results
203
+
204
+ def task3_enter_chain(state:OverallState, members: List[str]):
205
+ results = {
206
+ "messages": [AIMessage(content=str(state))],
207
+ "team_members": ", ".join(members),
208
+ "business_query": state.get('business_query'),
209
+ "intent": state.get('intent'),
210
+ "stakeholder": state.get('stakeholder'),
211
+ "biz_capability": state.get('biz_capability'),
212
+ "requirement": state.get('requirement'),
213
+ "userjourney": state.get('userjourney'),
214
+ "landscape_asis": state.get('landscape_asis'),
215
+ "identified_asis": state.get('identified_asis'),
216
+ "landscape_gap": state.get('landscape_gap'),
217
+ "next": state.get('next'),
218
+ }
219
+ return results
220
+
221
+ def get_last_message(state: OverallState) -> dict:
222
+ results = {}
223
+ #results['messages'] = [state.get('messages')[-1].content]
224
+ results['next'] = state.get('next')
225
+ if state.get('business_query'):
226
+ results['business_query'] = state.get('business_query')
227
+ if state.get('principles'):
228
+ results['principles'] = state.get('principles')
229
+ if state.get('intent'):
230
+ results['intent'] = state.get('intent')
231
+ if state.get('stakeholder'):
232
+ results['stakeholder'] = state.get('stakeholder')
233
+ if state.get('biz_capability'):
234
+ results['biz_capability'] = state.get('biz_capability')
235
+ if state.get('requirement'):
236
+ results['requirement'] = state.get('requirement')
237
+ if state.get('userjourney'):
238
+ results['userjourney'] = state.get('userjourney')
239
+ if state.get('landscape_asis'):
240
+ results['landscape_asis'] = state.get('landscape_asis')
241
+ if state.get('identified_asis'):
242
+ results['identified_asis'] = state.get('identified_asis')
243
+ if state.get('landscape_gap'):
244
+ results['landscape_gap'] = state.get('landscape_gap')
245
+ if state.get('vision_target'):
246
+ results['vision_target'] = state.get('vision_target')
247
+ if state.get('architecture_runway'):
248
+ results['architecture_runway'] = state.get('architecture_runway')
249
+
250
+ return results
251
+
252
+ def join_graph(state: OverallState) -> dict:
253
+ results = {}
254
+ results['messages'] = [state.get('business_query')[-1]]
255
+ results['next'] = state.get('next')
256
+ if state.get('business_query'):
257
+ results['business_query'] = state.get('business_query')
258
+ if state.get('principles'):
259
+ results['principles'] = state.get('principles')
260
+ if state.get('intent'):
261
+ results['intent'] = state.get('intent')
262
+ if state.get('stakeholder'):
263
+ results['stakeholder'] = state.get('stakeholder')
264
+ if state.get('biz_capability'):
265
+ results['biz_capability'] = state.get('biz_capability')
266
+ if state.get('requirement'):
267
+ results['requirement'] = state.get('requirement')
268
+ if state.get('userjourney'):
269
+ results['userjourney'] = state.get('userjourney')
270
+ if state.get('landscape_asis'):
271
+ results['landscape_asis'] = state.get('landscape_asis')
272
+ if state.get('identified_asis'):
273
+ results['identified_asis'] = state.get('identified_asis')
274
+ if state.get('landscape_gap'):
275
+ results['landscape_gap'] = state.get('identified_asis')
276
+ if state.get('vision_target'):
277
+ results['vision_target'] = state.get('vision_target')
278
+ if state.get('architecture_runway'):
279
+ results['architecture_runway'] = state.get('architecture_runway')
280
+
281
+ return results
282
+
283
+ ##Refactored to use Command instead of conditional_edge
284
+ async def business_query_grader(state:OverallState, config:RunnableConfig) -> Command[Literal["assess_query", "return"]]:
285
+ print(f"--- TOGAF AGENTIC team --- safety/quality review of the user requirement: {state.get('business_query')}")
286
+ business_query = state.get('business_query')
287
+
288
+ #if len(business_query) < 50:
289
+ # return Command(
290
+ # # state update
291
+ # update={"query_status": False},
292
+ # # control flow
293
+ # goto="return",
294
+ # )
295
+
296
+ # Prompt
297
+ grader_prompt = hub.pull('learn-it-all-do-it-all/ea4all_business_query_grader')
298
+
299
+ # Set up a parser:
300
+ #parser = PydanticOutputParser(pydantic_object=GradeBusinessQueryAnswer)
301
+ #grader_prompt = grader_prompt.partial(
302
+ # format_instructions=parser.get_format_instructions(),
303
+ # ai_output = LLAMA31_PROMPT_FORMAT,
304
+ #)
305
+
306
+ # Get any user-provided configs - LLM model in use
307
+ configuration = AgentConfiguration.from_runnable_config(config)
308
+ model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
309
+
310
+ grader = grader_prompt | model
311
+
312
+ response = await grader.ainvoke(
313
+ {"business_query": state.get('business_query')}
314
+ )
315
+
316
+ binary_score = clean_and_load_json(extract_response_from_backticks(response.content))['binary_score']
317
+
318
+ messages = [
319
+ HumanMessage(content=state.get('business_query')),
320
+ ]
321
+
322
+ if binary_score == "yes":
323
+ return Command(
324
+ # state update
325
+ update={"query_status": True, "messages": messages},
326
+ # control flow
327
+ goto="assess_query",
328
+ )
329
+ else:
330
+ return Command(
331
+ # state update
332
+ update={"query_status": False},
333
+ # control flow
334
+ goto="return",
335
+ )
336
+
337
+ def return_2user(state:OverallState):
338
+ message = '{"binary_score":"no"}'
339
+
340
+ return {
341
+ "messages": [AIMessage(content=str(message), name="return")],
342
+ "next": "end",
343
+ }
344
+
345
+ async def enter_graph(state:dict, config: RunnableConfig) -> dict:
346
+
347
+ print(f"--- Entered TOGAF AGENTIC team to --- {state.get('business_query')}") #state.get('business_query')[-1].content
348
+ #if isinstance(state, dict):
349
+ # user_feedback = state.get('user_feedback') if state.get('user_feedback') else state['messages'][-1].content
350
+ #else:
351
+ # user_feedback = state.get('user_feedback', state['messages'][-1].content)
352
+
353
+ #busines_query = load_mock_content(state.get('user_feedback')),
354
+
355
+ business_query = state['business_query'][-1]['content']
356
+
357
+ return {"business_query": business_query}
358
+
359
+ ## TOGAF Orchestrator Graph
360
+ task1_business_query_chain = (
361
+ functools.partial(task1_enter_chain, members=list(task1_graph.nodes))
362
+ | task1_graph
363
+ )
364
+
365
+ task2_assess_asis_chain = (
366
+ functools.partial(task2_enter_chain, members=list(task2_graph.nodes))
367
+ | task2_graph
368
+ )
369
+
370
+ task3_vision_target_chain = (
371
+ functools.partial(task3_enter_chain, members=list(task3_graph.nodes))
372
+ | task3_graph
373
+ )
374
+
375
+ # Define the graph.
376
+ workflow = StateGraph(OverallState, input=InputState, output=OutputState, config_schema=AgentConfiguration)
377
+ # First add the nodes, which will do the work
378
+ workflow.add_node("enter_graph", enter_graph)
379
+ workflow.add_node("ask_human", togaf_ask_human)
380
+ workflow.add_node("query_grader", business_query_grader)
381
+ workflow.add_node("assess_query", get_last_message | task1_business_query_chain | join_graph)
382
+ workflow.add_node("assess_asis", get_last_message | task2_assess_asis_chain | join_graph)
383
+ workflow.add_node("generate_tobe", get_last_message | task3_vision_target_chain | join_graph)
384
+ workflow.add_node("return", return_2user)
385
+
386
+ # Define the graph connections, which controls how the logic
387
+ # propagates through the program
388
+ #workflow.add_conditional_edges(
389
+ # "togaf_supervisor",
390
+ # lambda x: x["next"],
391
+ # {
392
+ # "AssessBusinessQuery": "assess_query",
393
+ # "AssessLandscape": "assess_asis",
394
+ # "GenerateToBe": "generate_tobe",
395
+ # "FINISH": END,
396
+ # },
397
+ #)
398
+
399
+ workflow.add_edge("enter_graph", "ask_human")
400
+ workflow.add_edge("ask_human", "query_grader")
401
+ workflow.add_edge("assess_query", "assess_asis")
402
+ workflow.add_edge("assess_asis", "generate_tobe")
403
+ workflow.add_edge("generate_tobe", END)
404
+ workflow.add_edge("return", END)
405
+
406
+ workflow.set_entry_point("enter_graph")
407
+
408
+ #memory = MemorySaver()
409
+ togaf_graph = workflow.compile() #checkpointer=memory)
410
+ togaf_graph.name = "Togaf_reference_architecture_graph"
ea4all/src/ea4all_gra/state.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import Field
2
+ from typing_extensions import (
3
+ Annotated,
4
+ TypedDict,
5
+ List
6
+ )
7
+ import operator
8
+ from typing import (
9
+ Optional,
10
+ )
11
+ from dataclasses import dataclass, field
12
+ from typing import Optional
13
+
14
+ from ea4all.src.ea4all_gra.data import (
15
+ ListRequirement,
16
+ ListObjective,
17
+ UserJourney,
18
+ StakeholderList,
19
+ BusinessCapability,
20
+ LandscapeAsIs,
21
+ CapabilityGap,
22
+ Principles
23
+ )
24
+
25
+ # Optional, the InputState is a restricted version of the State that is used to
26
+ # define a narrower interface to the outside world vs. what is maintained
27
+ # internally.
28
+ @dataclass(kw_only=True)
29
+ class InputState(TypedDict):
30
+ """Represents the input state for the agent.
31
+
32
+ This class defines the structure of the input state, which includes
33
+ the messages exchanged between the user and the agent. It serves as
34
+ a restricted version of the full State, providing a narrower interface
35
+ to the outside world compared to what is maintained internally.
36
+ """
37
+
38
+ """Attributes:
39
+ business_query: a business requirement is the starting point of the TOGAF process
40
+ """
41
+ #business_query: Optional[Annotated[List[str], Field(
42
+ # description="A business requirement is the starting point of the TOGAF process."), operator.add]]
43
+ business_query: str
44
+
45
+ @dataclass(kw_only=True)
46
+ class OutputState(TypedDict):
47
+ """Represents te output state for the agent."""
48
+ vision_target: Optional[str]
49
+ architecture_runway: Optional[str]
50
+
51
+ @dataclass(kw_only=True)
52
+ class OverallState(InputState, OutputState):
53
+ """Represents the state of a Togaf system."""
54
+
55
+ """
56
+ Attributes:
57
+ - user_feedback: used to capture additional information needed from the user by the graph
58
+ - business_query: a business requirement is the starting point of the TOGAF process
59
+ - query_status (Optional[bool]): Indicates the status of the query. Default value is False.
60
+ - messages (Optional[Annotated[list[AnyMessage], add_messages]]): A list of messages associated with the state.
61
+ - stakeholder (Optional[StakeholderList]): Represents the list of stakeholders.
62
+ - principles (Optional[Principles]): Represents the principles of the Togaf system.
63
+ - requirement (Optional[ListRequirement]): Represents the list of requirements.
64
+ - intent (Optional[ListObjective]): Represents the list of objectives.
65
+ - userjourney (Optional[UserJourney]): Represents the user journey of the Togaf system.
66
+ - biz_capability (Optional[BusinessCapability]): Represents the business capability of the Togaf system.
67
+ - landscape_asis (Optional[List[str]]): Represents the list of landscape as-is.
68
+ - identified_asis (Optional[LandscapeAsIs]): Represents the identified landscape as-is.
69
+ - landscape_gap (Optional[CapabilityGap]): Represents the capability gap of the landscape.
70
+ - vision_target (Optional[str]): Represents the vision target of the Togaf system.
71
+ - architecture_runway (Optional[str]): Represents the architecture runway of the Togaf system.
72
+ - next (Optional[str]): Represents the next step in the Togaf system.
73
+ """
74
+
75
+ query_status: Optional[bool]
76
+ stakeholder: Optional[StakeholderList]
77
+ principles: Optional[Principles]
78
+ requirement: Optional[ListRequirement]
79
+ intent: Optional[ListObjective]
80
+ userjourney: Optional[UserJourney]
81
+ biz_capability: Optional[BusinessCapability]
82
+ landscape_asis: Optional[List[str]]
83
+ identified_asis: Optional[LandscapeAsIs]
84
+ landscape_gap: Optional[CapabilityGap]
85
+ next: Optional[str]
ea4all/src/ea4all_gra/togaf_task1/graph.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #core libraries
2
+ from langchain_core.runnables import RunnableConfig
3
+ from langchain_core.messages import (
4
+ AIMessage
5
+ )
6
+
7
+ from langgraph.graph import (
8
+ END,
9
+ StateGraph,
10
+ )
11
+
12
+ from ea4all.src.ea4all_gra.configuration import AgentConfiguration
13
+ from ea4all.src.ea4all_gra.data import (
14
+ ListRequirement,
15
+ ListObjective,
16
+ BusinessCapability,
17
+ StakeholderList,
18
+ UserJourney,
19
+ )
20
+ from ea4all.src.shared.utils import (
21
+ get_llm_client,
22
+ extract_detailed_business_requirements,
23
+ )
24
+ from ea4all.src.shared.prompts import LLAMA31_PROMPT_FORMAT
25
+
26
+ from ea4all.src.ea4all_gra.togaf_task1.state import Task1State
27
+
28
+ #EXECUTE STEP-1: Identify Business Requirements, Objectives, Capabilities, Stakeholders and Journey Agent
29
+ def assess_business_query(state: Task1State, config: RunnableConfig):
30
+ """Identified business requirements, goals, use cases, user journey, stakeholder and business capability from a given business query."""
31
+ #DEFINE agent template & prompt
32
+ #BROKE-DOWN BusinessInput into individual extractions: LLAMA-3 CONTEXT WINDOW limitation
33
+ #REMOVED parser from the chain: LLAMA-3 returning text + ```BusinessInput```
34
+ ##Parser back to chain 2024-10-13
35
+ #Setting streaming=True makes the model produces wrong output
36
+ query = state.get("business_query")
37
+
38
+ # Get any user-provided configs - LLM model in use
39
+ configuration = AgentConfiguration.from_runnable_config(config)
40
+ model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
41
+
42
+ values = {"business_input": query}
43
+ final_response=[]
44
+
45
+ ##EXTRACT BUSINESS REQUIREMENT
46
+ response = extract_detailed_business_requirements(model, ListRequirement, "business requirement", values)
47
+ business_reqs = ""
48
+ try:
49
+ for item in response.requirements:
50
+ business_reqs += ':'.join([item.category, item.requirement.lower()]) + ";"
51
+ final_response += [response.requirements]
52
+ except Exception as e:
53
+ print(f"Houston, we a {e} thorny problem!")
54
+
55
+ ##EXTRACT BUSINESS OBJECTIVE
56
+ response = extract_detailed_business_requirements(model, ListObjective, "business objective", values)
57
+ business_goal=[]
58
+ try:
59
+ for item in response.objectives:
60
+ business_goal.append(item.objective)
61
+ final_response += [response.objectives]
62
+ except Exception as e:
63
+ print(f"Houston, we a {e} thorny problem!")
64
+
65
+
66
+ ##EXTRACT BUSINESS CAPABILITY
67
+ response = extract_detailed_business_requirements(model, BusinessCapability, "business capabilities", values)
68
+ business_capabilities=[]
69
+ try:
70
+ for item in response.capabilities:
71
+ business_capabilities.append(item.capability)
72
+ final_response += [response.capabilities]
73
+ except Exception as e:
74
+ print(f"Houston, we a {e} thorny problem!")
75
+
76
+
77
+ ##EXTRACT STAKEHOLDER
78
+ response = extract_detailed_business_requirements(model, StakeholderList, "business stakeholder", values)
79
+ business_stakeholder = ""
80
+ try:
81
+ for item in response.stakeholders:
82
+ business_stakeholder += ' '.join([item.stakeholder,item.role.lower(), item.concern]) + "."
83
+ final_response += [response.stakeholders]
84
+ except Exception as e:
85
+ print(f"Houston, we a {e} thorny problem!")
86
+
87
+
88
+ ##EXTRACT BUSINESS USER JOURNEY
89
+ response = extract_detailed_business_requirements(model, UserJourney, "user journey", values)
90
+ user_journey = ""
91
+ try:
92
+ for item in response.userjourney:
93
+ user_journey += ':'.join([item.persona,item.step.lower()]) + ","
94
+ final_response += [response.userjourney]
95
+ except Exception as e:
96
+ print(f"Houston, we a {e} thorny problem!")
97
+
98
+ name = state.get("next")
99
+
100
+ return {
101
+ "messages": [AIMessage(content=str(final_response), name=name)],
102
+ "requirement": business_reqs,
103
+ "intent": business_goal,
104
+ "stakeholder": business_stakeholder,
105
+ "userjourney": user_journey,
106
+ "biz_capability": str(business_capabilities)
107
+ }
108
+
109
+ # Build graphs task1
110
+ ## TASK-1 Graph
111
+ task1_builder = StateGraph(Task1State)
112
+
113
+ # Define the nodes
114
+ task1_builder.add_node("AssessBusinessQuery", assess_business_query) # assess business input
115
+
116
+ # Build graph
117
+ task1_builder.add_edge("AssessBusinessQuery", END)
118
+ task1_builder.set_entry_point("AssessBusinessQuery")
119
+
120
+ # Set FINISH node end-point
121
+ task1_builder.set_finish_point('AssessBusinessQuery')
122
+
123
+ # Compile
124
+ task1_graph = task1_builder.compile()
125
+ task1_graph.name = "togaf_assess_business_query_graph"
ea4all/src/ea4all_gra/togaf_task1/state.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import Field
2
+ from typing_extensions import (
3
+ Annotated
4
+ )
5
+ import operator
6
+ from typing import (
7
+ Optional,
8
+ Annotated,
9
+ Sequence,
10
+ List
11
+ )
12
+ from dataclasses import dataclass
13
+ from typing import Optional
14
+
15
+ from langchain_core.messages import (
16
+ BaseMessage,
17
+ )
18
+
19
+ from ea4all.src.ea4all_gra.data import (
20
+ ListRequirement,
21
+ ListObjective,
22
+ UserJourney,
23
+ StakeholderList,
24
+ BusinessCapability,
25
+ )
26
+
27
+ from ea4all.src.ea4all_gra.state import InputState
28
+
29
+ # Task-1 Graph State
30
+ @dataclass(kw_only=True)
31
+ class Task1State(InputState):
32
+ """
33
+ Represents the BusinessOutput state of our graph.
34
+
35
+ Attributes:
36
+ message: a message is added after each team member finishes
37
+ team_members: the team members are tracked so they are aware of the others' skill-sets
38
+ next: used to route work. The supervisor calls a function that will update this every time it makes a decision
39
+ requirement: list of business requirements
40
+ intent: business objective, goal
41
+ userjourney: list of user journeys
42
+ stakeholder: list of stakeholder and their concerns
43
+ capability: list of business capabilities to deliver intent and requirements
44
+ """
45
+
46
+ messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
47
+ team_members: Optional[List[str]]
48
+ requirement: Optional[ListRequirement]
49
+ intent: Optional[ListObjective]
50
+ userjourney: Optional[UserJourney]
51
+ stakeholder: Optional[StakeholderList]
52
+ biz_capability: Optional[BusinessCapability]
53
+ next: Optional[str]
ea4all/src/ea4all_gra/togaf_task2/graph.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+
3
+ #core libraries
4
+ from langchain_core.runnables import RunnableConfig, RunnableSerializable
5
+
6
+ from langchain_core.messages import (
7
+ AIMessage,
8
+ )
9
+ from langchain_core.output_parsers import PydanticOutputParser
10
+ from langchain_core.prompts.chat import ChatPromptTemplate
11
+
12
+ from langchain import hub
13
+
14
+ from langgraph.graph import (
15
+ END,
16
+ StateGraph,
17
+ )
18
+
19
+ from ea4all.src.ea4all_gra.configuration import AgentConfiguration
20
+ from ea4all.src.ea4all_gra.data import (
21
+ CapabilityGap,
22
+ GradeAnswer,
23
+ GradeDocuments,
24
+ LandscapeAsIs,
25
+ )
26
+
27
+ from ea4all.src.shared.utils import (
28
+ get_llm_client,
29
+ extract_structured_output,
30
+ extract_topic_from_business_input,
31
+ set_max_new_tokens,
32
+ get_predicted_num_tokens_from_prompt,
33
+ )
34
+
35
+ from ea4all.src.shared.prompts import (
36
+ LLAMA31_CHAT_PROMPT_FORMAT,
37
+ LLAMA31_PROMPT_FORMAT,
38
+ )
39
+
40
+ from ea4all.src.shared import vectorstore
41
+
42
+ from ea4all.src.ea4all_gra.togaf_task2.state import Task2State
43
+
44
+ from ea4all.src.ea4all_apm.graph import get_retrieval_chain
45
+ from ea4all.src.ea4all_apm import configuration as apm_config
46
+
47
+ from ea4all.src.ea4all_gra.state import OverallState
48
+
49
+ # Retrieval Grader score whether retrieved IT Landscape address business query
50
+ def retrieval_grader(model):
51
+ # LLM with function call
52
+ structured_llm_grader = model.with_structured_output(GradeDocuments)
53
+
54
+ #Prompt
55
+ system = """You are an enterprise architect grader assessing relevance of applications to address a business query.
56
+ It does not need to be a stringent test. The objective is to filter out erroneous retrievals.
57
+ If the application contains any keyword or semantic meaning related to the business query, grade it as relevant.
58
+ Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
59
+
60
+ grade_prompt = ChatPromptTemplate.from_messages(
61
+ [
62
+ ("system", system),
63
+ ("ai", "Retrieved applications: \n\n {landscape_asis} \n\n Business Query: {business_query}"),
64
+ ]
65
+ )
66
+
67
+ grader = grade_prompt | structured_llm_grader
68
+
69
+ return grader
70
+
71
+ # Business Capability needs vs landscap asis gap analysis
72
+ def gap_grader(model):
73
+
74
+ gap_prompt = hub.pull("learn-it-all-do-it-all/ea4all_togaf_capability_gap")
75
+
76
+ # Set up a parser:
77
+ parser = PydanticOutputParser(pydantic_object=CapabilityGap)
78
+ gap_prompt = gap_prompt.partial(
79
+ format_instructions=parser.get_format_instructions(),
80
+ )
81
+
82
+ capability_gap_grader = gap_prompt | model | parser
83
+
84
+ return capability_gap_grader
85
+
86
+ ## Question Re-writer
87
+ def question_rewriter(model):
88
+ # Rerwriter Prompt
89
+ rewrite_prompt = hub.pull("learn-it-all-do-it-all/ea4all_question_rewriter")
90
+ rewrite_prompt = rewrite_prompt.partial(ai_output=LLAMA31_CHAT_PROMPT_FORMAT)
91
+
92
+ rewriter = rewrite_prompt | model
93
+
94
+ return rewriter
95
+
96
+ ##Answer Grade: score whether RAG + LLM answer address business query
97
+ def answer_grader():
98
+ # Prompt
99
+ answer_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_answer_grade')
100
+
101
+ # Set up a parser:
102
+ parser = PydanticOutputParser(pydantic_object=GradeAnswer)
103
+ answer_prompt = answer_prompt.partial(
104
+ format_instructions=parser.get_format_instructions(),
105
+ ai_output = LLAMA31_PROMPT_FORMAT
106
+ )
107
+
108
+ return answer_prompt
109
+
110
+ ## Hallucination Grader score whether there is any hallucination with between RAG and LLM answers
111
+ def hallucination_grader(asis, identified):
112
+ # Prompt": REVISED TO WORK WIHT LLAMA-3 - issue w/ multi-word app
113
+ #changes on prompting e.g. role, rules and restrictions, explicit instructions, change from word to application(s)
114
+ #changed to one-by-one assessment using single text search
115
+ grader_false = []
116
+ for d in identified:
117
+ if d.lower() not in asis.lower():
118
+ grader_false.append(d)
119
+
120
+ return grader_false
121
+
122
+ ##Action-1 RAG retrieval - Assess-AsIs-Landscape
123
+ async def retrieve(state:Task2State, config: RunnableConfig):
124
+ """
125
+ Retrieve applications
126
+
127
+ Args:
128
+ state (dict): The current graph state
129
+
130
+ Returns:
131
+ state (dict): New key added to state, applications, that contains retrieved identified applications
132
+ """
133
+
134
+ configuration = AgentConfiguration.from_runnable_config(config)
135
+
136
+ print("---RETRIEVE---")
137
+ business_query = state['business_query']
138
+
139
+ if not state.get( 'landscape_asis'):
140
+ intent=""
141
+ if state['messages']:
142
+ intent = ','.join(ast.literal_eval(str(state['messages'][-1].content))['intent']).lower().replace("'", "")
143
+
144
+ business_query=f"""What existent applications can be re-used {intent}?"""
145
+
146
+ # Retrieval
147
+ rag_input = 5
148
+ #faiss_index = set_faiss_index(config)
149
+ with vectorstore.make_retriever(config) as _retriever:
150
+ retriever = _retriever
151
+
152
+ retrieval = await get_retrieval_chain(rag_input,"ea4all_agent",business_query,retriever, config)
153
+
154
+ landscape_asis = await retrieval.ainvoke(
155
+ {"standalone_question": business_query},
156
+ config={"recursion_limit":configuration.ea4all_recursion_limit})
157
+
158
+ name = state['next']
159
+
160
+ ## return Document page_content
161
+ content = ';'.join(asis.page_content.strip() for asis in landscape_asis)
162
+ return {
163
+ "messages": [AIMessage(content=content, name=name)],
164
+ "landscape_asis": landscape_asis,
165
+ "business_query": business_query
166
+ }
167
+
168
+ ##Action-2 Grade retrieval against business query, filter out not relevant applications
169
+ def grade_landscape_asis(state:Task2State, config: RunnableConfig):
170
+ """
171
+ Determines whether an application is relevant to address a business query.
172
+
173
+ Args:
174
+ state (dict): The current graph state
175
+
176
+ Returns:
177
+ state (dict): Updates landscape_asis key with only filtered relevant applications
178
+ """
179
+
180
+ print("---CHECK DOCUMENT RELEVANCE TO BUSINESS QUERY---")
181
+ business_query = state.get('business_query')
182
+ landscape_asis = state.get('landscape_asis')
183
+
184
+ # Score each doc
185
+ filtered_docs = []
186
+ if landscape_asis is not None:
187
+ for d in landscape_asis:
188
+ ##Pick relevant Metadata
189
+ application = d.metadata['source']
190
+ capability = d.metadata['capability']
191
+ description = d.metadata['description']
192
+ business_fit = d.metadata['business fit']
193
+ roadmap = d.metadata['roadmap']
194
+ asis = f"Application:{application}; Capability:{capability}; Description:{description};Business fit: {business_fit}; Roadmap: {roadmap};"
195
+
196
+ filtered_docs.append(asis)
197
+
198
+ return {
199
+ #"messages": [AIMessage(content=str(filtered_docs), name=name)],
200
+ "business_query": business_query,
201
+ "landscape_asis": landscape_asis,
202
+ "identified_asis": filtered_docs
203
+ }
204
+
205
+ ##Action-3 Is there relevant applications? Yes, generate, otherwise transform_query
206
+ def decide_to_generate(state:Task2State, config: RunnableConfig):
207
+ """
208
+ Determines whether to generate an answer, or re-generate a question.
209
+
210
+ Args:
211
+ state (dict): The current graph state
212
+
213
+ Returns:
214
+ str: Binary decision for next node to call
215
+ """
216
+
217
+ print("---ASSESS GRADED APPLICATIONS---")
218
+ filtered_applications = state['identified_asis']
219
+
220
+ if not filtered_applications:
221
+ # All documents have been filtered check_relevance
222
+ # We will re-generate a new query
223
+ print(
224
+ "---DECISION: ALL APPLICATIONS ARE NOT RELEVANT TO BUSINESS QUERY, TRANSFORM QUERY---"
225
+ )
226
+ return "transform_query"
227
+ else:
228
+ # We have relevant documents, so generate answer
229
+ print("---DECISION: GENERATE---")
230
+ return "generate"
231
+
232
+ ##Action-4a Generate if relevant applications found
233
+ def generate(state:Task2State, config: RunnableConfig):
234
+ """
235
+ Generate answer
236
+
237
+ Args:
238
+ state (dict): The current graph state
239
+
240
+ Returns:
241
+ state (dict): New key
242
+ added to state, identified_asis, that contains LLM generation
243
+ """
244
+ configuration = AgentConfiguration.from_runnable_config(config)
245
+ model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
246
+
247
+ print("---GENERATE---")
248
+ landscape_asis = state['landscape_asis']
249
+
250
+ values = {
251
+ "business_query": state['business_query'],
252
+ "applications": state['identified_asis']
253
+ }
254
+
255
+ parser = PydanticOutputParser(pydantic_object=LandscapeAsIs)
256
+
257
+ hub_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_landscape_business_query')
258
+ hub_prompt = hub_prompt.partial(
259
+ format_instructions=parser.get_format_instructions(),
260
+ )
261
+
262
+ model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,hub_prompt,values))
263
+
264
+ task_2_generate = hub_prompt | model | parser
265
+ generated_asis = task_2_generate.invoke(input=values, config={"recursion_limit":configuration.recursion_limit})
266
+
267
+ name = state['next']
268
+
269
+ return {
270
+ "messages": [AIMessage(content=str(generated_asis.identified_asis), name=name)],
271
+ "landscape_asis": landscape_asis,
272
+ "business_query": state['business_query'],
273
+ "identified_asis": generated_asis.identified_asis
274
+ }
275
+
276
+ ##Action-4b Re-write query otherwise
277
+ def transform_query(state:Task2State, config: RunnableConfig):
278
+ """
279
+ Transform the query to produce a better question.
280
+
281
+ Args:
282
+ state (dict): The current graph state
283
+
284
+ Returns:
285
+ state (dict): Updates question key with a re-phrased question
286
+ """
287
+ # Get any user-provided configs - LLM model in use
288
+ configuration = AgentConfiguration.from_runnable_config(config)
289
+ model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
290
+
291
+ print("---TRANSFORM QUERY---")
292
+ business_query = state['business_query']
293
+
294
+ # Re-write question
295
+ response = question_rewriter(model).invoke(
296
+ {"user_question": business_query, "target": "vectorstore"},
297
+ )
298
+
299
+ generated_question = extract_topic_from_business_input(response.content)
300
+ better_question = generated_question['rephrased']
301
+
302
+ if better_question == None: better_question = business_query
303
+
304
+ name = state['next']
305
+
306
+ return {
307
+ "messages": [AIMessage(content=better_question, name=name)],
308
+ "business_query": better_question
309
+ }
310
+
311
+ ##Action-5 Grade final response
312
+ def grade_generation_v_documents_and_question(state:Task2State, config: RunnableConfig):
313
+ """
314
+ Determines whether the generation is grounded in the landscape_asis and answers business query.
315
+
316
+ Args:
317
+ state (dict): The current graph state
318
+
319
+ Returns:
320
+ str: Decision for next node to call
321
+ """
322
+
323
+ # Get any user-provided configs - LLM model in use
324
+ configuration = AgentConfiguration.from_runnable_config(config)
325
+ model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
326
+
327
+
328
+ print("---CHECK HALLUCINATIONS---")
329
+ business_query = state['business_query']
330
+ landscape_asis = state['landscape_asis']
331
+ identified_asis = state['identified_asis']
332
+ generated_asis = [item.application for item in identified_asis] if identified_asis else []
333
+
334
+ score = hallucination_grader(str(landscape_asis),generated_asis)
335
+
336
+ if len(score)==0:
337
+ print("---DECISION: IDENTIFIED APPLICATION(s) IS GROUNDED IN LANDSCAPE ASIS---")
338
+ # Check question-answering
339
+ print("---GRADE GENERATION vs QUESTION---")
340
+
341
+ values = {"business_query": business_query, "application": identified_asis}
342
+ prompt = answer_grader()
343
+ model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,prompt,values))
344
+ grader_chain = prompt | model
345
+ score = grader_chain.invoke(values)
346
+ extracted_answer = extract_structured_output(score.content)
347
+ if extracted_answer is not None: ##REVIEW PROMPT W/ LLAMA3.1-70B
348
+ grade = extracted_answer['binary_score']
349
+ else:
350
+ grade = "no"
351
+
352
+ if grade == "yes":
353
+ print("---DECISION: APPLICATION ADDRESSES BUSINESS QUERY---")
354
+ return "useful"
355
+ else:
356
+ print("---DECISION: APPLICATION DOES NOT ADDRESS BUSINESS QUERY---")
357
+ return "not useful"
358
+ else:
359
+ print("---DECISION: IDENTIFIED ASIS IS NOT GROUNDED IN LANDSCAPE ASIS, RE-TRY---")
360
+ print(f"---HALLUCINATIONS: {score}---")
361
+ return "not supported"
362
+
363
+ ##Action-6 Analyse gap between current state and the desired future state - identified capabilities
364
+ def grade_landscape_asis_v_capability_gap(state:Task2State, config: RunnableConfig):
365
+ """
366
+ Analyse any gap between existent applications and identified business capability to address the business query.
367
+
368
+ Args:
369
+ state (dict): The current graph state
370
+
371
+ Returns:
372
+ state (dict): Updates landscape_gap key with capability gap status
373
+ """
374
+
375
+ # Get any user-provided configs - LLM model in use
376
+ configuration = AgentConfiguration.from_runnable_config(config)
377
+ model = get_llm_client(configuration.togaf_model, configuration.api_base_url)
378
+
379
+ print("---CHECK SUPPORT IDENTIFIED APP TO BUSINESS CAPABILITY---")
380
+
381
+ parser = PydanticOutputParser(pydantic_object=CapabilityGap)
382
+
383
+ hub_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_capability_gap')
384
+ hub_prompt = hub_prompt.partial(
385
+ format_instructions=parser.get_format_instructions(),
386
+ )
387
+ task_2_landscape_gap = hub_prompt | model | parser
388
+
389
+ #capability_gap_grader
390
+ if state['identified_asis']:
391
+ content = ';'.join(str(app) for app in state['identified_asis'])
392
+ else:
393
+ content = "No applications identified"
394
+
395
+ if state['biz_capability']:
396
+ capability = ', '.join(ast.literal_eval(state['biz_capability'])).replace("'", ", ")
397
+ #bcm = ast.literal_eval(str(state['biz_capability']))
398
+ #capability = bcm[1:-1].replace("'","")
399
+ #capability = state['biz_capability']
400
+ else:
401
+ capability = "No business capabilities identified"
402
+
403
+ values = {
404
+ "application": content,
405
+ "capability": capability
406
+ }
407
+
408
+ model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,hub_prompt,values))
409
+
410
+ extracted_gap = task_2_landscape_gap.invoke(input=values, config={"recursion_limit":configuration.recursion_limit})
411
+
412
+ for item in extracted_gap.capability_status:
413
+ print(f"---CAPABILITY: {item.capability} SUPPORT: {item.support}---")
414
+
415
+ return {
416
+ "messages": [AIMessage(content=str(state['messages']), name=state['next'])],
417
+ "landscape_gap": extracted_gap #landscape_gap.content
418
+ }
419
+
420
+ ##TASK-2 Graph
421
+ task2_builder = StateGraph(Task2State)
422
+
423
+ # Define the nodes
424
+ task2_builder.add_node("assess_landscape", retrieve) # retrieve
425
+ task2_builder.add_node("grade_landscape_asis", grade_landscape_asis) # grade documents
426
+ task2_builder.add_node("generate", generate) # generate
427
+ task2_builder.add_node("transform_query", transform_query) # transform_query
428
+ task2_builder.add_node("grade_landscape_gap", grade_landscape_asis_v_capability_gap) #analyse asis gap
429
+
430
+ # Build graph
431
+ task2_builder.set_entry_point("assess_landscape")
432
+
433
+ task2_builder.add_edge("assess_landscape", "grade_landscape_asis")
434
+ task2_builder.add_conditional_edges(
435
+ "grade_landscape_asis",
436
+ decide_to_generate,
437
+ {
438
+ "transform_query": "transform_query",
439
+ "generate": "generate",
440
+ },
441
+ )
442
+ task2_builder.add_edge("transform_query", "assess_landscape")
443
+ task2_builder.add_conditional_edges(
444
+ "generate",
445
+ grade_generation_v_documents_and_question,
446
+ {
447
+ "not supported": "generate",
448
+ "useful": "grade_landscape_gap",
449
+ "not useful": "transform_query",
450
+ },
451
+ )
452
+
453
+ task2_builder.add_edge("grade_landscape_gap", END)
454
+
455
+ # Compile
456
+ task2_graph = task2_builder.compile()
457
+ task2_graph.name = "Togaf_assess_asis_graph"
ea4all/src/ea4all_gra/togaf_task2/state.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing_extensions import (
2
+ Annotated
3
+ )
4
+ import operator
5
+ from typing import (
6
+ Optional,
7
+ Annotated,
8
+ Sequence,
9
+ List
10
+ )
11
+ from dataclasses import dataclass
12
+
13
+ from langchain_core.messages import (
14
+ BaseMessage
15
+ )
16
+
17
+ from langchain_core.documents import Document
18
+
19
+ from ea4all.src.ea4all_gra.data import (
20
+ BusinessCapability,
21
+ CapabilityGap,
22
+ LandscapeAsIs,
23
+ )
24
+
25
+ from ea4all.src.ea4all_gra.state import InputState
26
+
27
+ # Task-2 Graph State
28
+ @dataclass(kw_only=True)
29
+ class Task2State(InputState):
30
+ """
31
+ Represents the landscape assessement state of our graph.
32
+
33
+ Attributes:
34
+ message: a message is added after each team member finishes
35
+ team_members: the team members are tracked so they are aware of the others' skill-sets
36
+ next: used to route work. The supervisor calls a function that will update this every time it makes a decision
37
+ business_query: identified business capabilities
38
+ landscape_asis: list of applications
39
+ identified_asis: LLM generation
40
+ capability: list of business capabilities required to support intent and requirements
41
+ landscape_gap: business capability support gap
42
+ """
43
+
44
+ messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
45
+ team_members: Optional[List[str]]
46
+ landscape_asis: Optional[List[Document]]
47
+ identified_asis: Optional[LandscapeAsIs]
48
+ biz_capability: Optional[BusinessCapability]
49
+ landscape_gap: Optional[CapabilityGap]
50
+ next: Optional[str]
ea4all/src/ea4all_gra/togaf_task3/graph.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from markdownify import markdownify as md
2
+ from graphviz import Source
3
+
4
+ #core libraries
5
+ from langchain_core.runnables import RunnableConfig, RunnableGenerator
6
+ from langchain_core.messages import (
7
+ AIMessage,
8
+ )
9
+
10
+ from langchain import hub
11
+
12
+ from langgraph.graph import (
13
+ END,
14
+ StateGraph,
15
+ )
16
+
17
+ from ea4all.src.ea4all_gra.configuration import AgentConfiguration
18
+
19
+ from ea4all.src.shared.utils import (
20
+ get_llm_client,
21
+ set_max_new_tokens,
22
+ get_predicted_num_tokens_from_prompt,
23
+ extract_detailed_business_requirements,
24
+ load_mock_content,
25
+ extract_principles,
26
+ )
27
+
28
+ from ea4all.src.ea4all_gra.data import (
29
+ Principles,
30
+ )
31
+
32
+ from ea4all.src.ea4all_gra.togaf_task3.state import Task3State
33
+
34
+ # Task-3: Generate reference architecture Vision and Target first iteration
35
+ def generate_principles(state: Task3State, config: RunnableConfig):
36
+
37
+ #Extract Business, Technnology and Architecture Principles
38
+ strategic_principles = md(str(load_mock_content('strategic_principles.txt'))) ##REFACTORING NEEDED
39
+
40
+ configuration = AgentConfiguration.from_runnable_config(config)
41
+ model = get_llm_client(
42
+ configuration.togaf_model,
43
+ api_base_url=configuration.api_base_url,
44
+ )
45
+
46
+ model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,extract_principles(Principles),{"strategic_principles": strategic_principles}))
47
+
48
+ ##RE-use business reqs extractor
49
+ identified_principles = extract_detailed_business_requirements(model, Principles, "architecture principles", {"business_input": strategic_principles})
50
+
51
+ name = state.get('next')
52
+ return {
53
+ "messages": [AIMessage(content=str(identified_principles), name=name)],
54
+ "principles": identified_principles,
55
+ "business_query": state.get('business_query'),
56
+ "business_goal": state.get('intent'),
57
+ "business_stakeholder": state.get('stakeholder'),
58
+ "biz_capability": state.get('biz_capability'),
59
+ "requirement": state.get('requirement'),
60
+ "user_journey": state.get('userjourney'),
61
+ "landscape_asis": state.get('landscape_asis'),
62
+ "identified_asis": state.get('identified_asis'),
63
+ "landscape_gap": state.get('landscape_gap')
64
+ }
65
+
66
+ async def stream_vision_target(state: Task3State, config: RunnableConfig):
67
+ ##MAX_TOKENS OPTIMISATION 2024-07-08
68
+
69
+ inputs = {}
70
+ async for s in state:
71
+ inputs['intent'] = s.get('intent')
72
+ inputs['stakeholder'] = s.get('stakeholder')
73
+ inputs['biz_capability'] = s.get('biz_capability')
74
+ inputs['requirement'] = s.get('requirement')
75
+ inputs['userjourney'] = s.get('userjourney')
76
+ inputs['identified_principles'] = s.get('principles')
77
+ inputs['landscape_asis'] = s.get('landscape_asis')
78
+ inputs['identified_asis'] = s.get('identified_asis')
79
+ inputs['landscape_gap'] = s.get('landscape_gap')
80
+
81
+ # Prompt
82
+ vision_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_vision_target')
83
+
84
+ values = {
85
+ "business_goal": inputs['intent'],
86
+ "business_stakeholder": inputs['stakeholder'],
87
+ "business_capability": inputs['biz_capability'],
88
+ "principles": inputs['identified_principles'],
89
+ "requirement": inputs['requirement'],
90
+ "user_journey": inputs['userjourney'],
91
+ "landscape_asis": inputs['landscape_asis'],
92
+ "identified_asis": inputs['identified_asis'],
93
+ "landscape_gap": inputs['landscape_gap']
94
+ }
95
+
96
+ configuration = AgentConfiguration.from_runnable_config(config)
97
+ model = get_llm_client(
98
+ configuration.togaf_model,
99
+ api_base_url=configuration.api_base_url,
100
+ streaming=configuration.streaming,
101
+ )
102
+
103
+ model.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(model,vision_prompt,values))
104
+ vision_chain = vision_prompt | model
105
+
106
+ async for output in vision_chain.astream(values):
107
+ yield(output)
108
+
109
+ async def generate_vision(state: Task3State):
110
+ """
111
+ Generate answer
112
+
113
+ Args:
114
+ state (dict): The current graph state
115
+
116
+ Returns:
117
+ state (dict): New key added to state, generation, that contains LLM generation
118
+ """
119
+
120
+ gen = RunnableGenerator(stream_vision_target).with_config(tags=["gra_stream"])
121
+
122
+ generation=""
123
+ async for message in gen.astream(state):
124
+ generation = ''.join([generation,message.content])
125
+
126
+ name = state.get('next')
127
+
128
+ return {
129
+ "messages": [AIMessage(content=generation, name=name)],
130
+ "principles": state.get('principles'),
131
+ "business_query": state.get('business_query'),
132
+ "intent": state.get('intent'),
133
+ "stakeholder": state.get('stakeholder'),
134
+ "biz_capability": state.get('biz_capability'),
135
+ "requirement": state.get('requirement'),
136
+ "userjourney": state.get('userjourney'),
137
+ "landscape_asis": state.get('landscape_asis'),
138
+ "identified_asis": state.get('identified_asis'),
139
+ "landscape_gap": state.get('landscape_gap'),
140
+ "vision_target": generation
141
+ }
142
+
143
+ def generate_architecture_runway(state: Task3State, config: RunnableConfig):
144
+ stakeholder = state.get('stakeholder')
145
+ biz_capability = state.get('biz_capability')
146
+ userjourney = state.get('userjourney')
147
+ identified_asis = state.get('identified_asis')
148
+ intent = state.get('intent')
149
+
150
+ # Prompt
151
+ runway_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_architecture_runway')
152
+
153
+ configuration = AgentConfiguration.from_runnable_config(config)
154
+ model = get_llm_client(
155
+ configuration.togaf_model,
156
+ api_base_url=configuration.api_base_url,
157
+ )
158
+
159
+ values = {
160
+ "stakeholder": stakeholder,
161
+ "business_capability": biz_capability,
162
+ "userjourney": userjourney,
163
+ "identified_asis": identified_asis,
164
+ "intent": intent,
165
+ }
166
+
167
+ ##Issue w/ llama-3.2-vision and ChatOpenAI token limits
168
+ model.max_tokens = int((configuration.max_tokens - get_predicted_num_tokens_from_prompt(model,runway_prompt,values))*0.95)
169
+
170
+ vision_chain = runway_prompt | model
171
+ architecture_runway = vision_chain.invoke(input=values)
172
+
173
+ name = state.get('next')
174
+
175
+ return {
176
+ "messages": [AIMessage(content=architecture_runway.content, name=name)],
177
+ "architecture_runway": architecture_runway.content
178
+ }
179
+
180
+ def save_diagram(state: Task3State, config: RunnableConfig, fmt=["svg","png"]):
181
+ configuration = AgentConfiguration.from_runnable_config(config)
182
+
183
+ #_config = config.get('configurable')
184
+ #if _config is not None:
185
+ # thread_id = _config['thread_id']
186
+ # output_img = "togaf_runway_" + str(thread_id)
187
+ #else:
188
+ # output_img = "Error generating file name!"
189
+
190
+ input_img = state['architecture_runway']
191
+ output_img = "togaf_runway_" + str(config['configurable']['thread_id'])
192
+
193
+ try:
194
+ x=Source(
195
+ source=input_img,
196
+ filename=output_img,
197
+ format=fmt[0]
198
+ )
199
+ response = x.render(
200
+ cleanup=True,
201
+ directory=configuration.ea4all_images,
202
+ format=fmt[1],
203
+ view=False,
204
+ ).replace('\\', '/')
205
+ except Exception as e:
206
+ response=f"Error: Agent couldn't parse the diagram at this time! {e} \n {output_img} \n {input_img}"
207
+
208
+ return{"architecture_runway": response}
209
+
210
+ def generate_reference_architecture(state: Task3State):
211
+ ##BY PASS to generate principles
212
+ return {
213
+ "business_query": state.get('business_query'),
214
+ "intent": state.get('intent'),
215
+ "stakeholder": state.get('stakeholder'),
216
+ "biz_capability": state.get('biz_capability'),
217
+ "requirement": state.get('requirement'),
218
+ "userjourney": state.get('userjourney'),
219
+ "landscape_asis": state.get('landscape_asis'),
220
+ "identified_asis": state.get('identified_asis'),
221
+ "landscape_gap": state.get('landscape_gap'),
222
+ }
223
+
224
+ ##TASK-3 Graph
225
+ workflow = StateGraph(Task3State)
226
+
227
+ # Define the nodes
228
+ workflow.add_node("generate_reference_architecture", generate_reference_architecture) # business, technology, architecture principles
229
+ workflow.add_node("generate_principles", generate_principles) # business, technology, architecture principles
230
+ workflow.add_node("generate_vision_target", generate_vision) # architecture vision and target
231
+ workflow.add_node("generate_architecture_runway", generate_architecture_runway) # draw high-level diagram target state
232
+ workflow.add_node("save_diagram", save_diagram)
233
+
234
+ # Build graph
235
+ workflow.add_edge("generate_reference_architecture", "generate_principles")
236
+ workflow.add_edge("generate_principles", "generate_vision_target")
237
+ workflow.add_edge("generate_vision_target", "generate_architecture_runway")
238
+ workflow.add_edge("generate_architecture_runway","save_diagram")
239
+ workflow.add_edge("save_diagram", END)
240
+
241
+ #Entry point
242
+ workflow.set_entry_point("generate_reference_architecture")
243
+
244
+ # Compile
245
+ task3_graph = workflow.compile()
246
+ task3_graph.name = "Togaf_generate_tobe_graph"
ea4all/src/ea4all_gra/togaf_task3/state.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing_extensions import (
2
+ Annotated
3
+ )
4
+ import operator
5
+ from typing import (
6
+ Optional,
7
+ Annotated,
8
+ Sequence,
9
+ List
10
+ )
11
+ from dataclasses import dataclass
12
+
13
+ from langchain_core.messages import (
14
+ BaseMessage
15
+ )
16
+
17
+ from ea4all.src.ea4all_gra.data import (
18
+ BusinessCapability,
19
+ CapabilityGap,
20
+ LandscapeAsIs,
21
+ StakeholderList,
22
+ Principles,
23
+ ListObjective,
24
+ ListRequirement,
25
+ UserJourney,
26
+ )
27
+
28
+ from ea4all.src.ea4all_gra.state import InputState
29
+
30
+ # Task-3 Graph State
31
+ @dataclass(kw_only=True)
32
+ class Task3State(InputState):
33
+ """
34
+ Represents the Reference Architecture state of our graph.
35
+
36
+ Attributes:
37
+ message: a message is added after each team member finishes
38
+ team_members: the team members are tracked so they are aware of the others' skill-sets
39
+ next: used to route work. The supervisor calls a function that will update this every time it makes a decision
40
+ business_query: business demand to be delivered
41
+ principles: list of principles to the architecture work
42
+ requirement: list of business requirements
43
+ intent: business objective, goal
44
+ user_journey: list of user journeys
45
+ stakeholder: list of stakeholder and their concerns
46
+ capability: list of business capabilities to deliver intent and requirements
47
+ landscape_asis: list of potential applications to support business query
48
+ identified_asis: identified target applications
49
+ landscape_gap: list of capabilities not supported by as-is landscape
50
+ """
51
+
52
+ messages: Optional[Annotated[Sequence[BaseMessage], operator.add]]
53
+ team_members: Optional[List[str]]
54
+ landscape_asis: Optional[List[str]]
55
+ identified_asis: Optional[LandscapeAsIs]
56
+ biz_capability: Optional[BusinessCapability]
57
+ landscape_gap: Optional[CapabilityGap]
58
+ stakeholder: Optional[StakeholderList]
59
+ principles: Optional[Principles]
60
+ requirement: Optional[ListRequirement]
61
+ intent: Optional[ListObjective]
62
+ userjourney: Optional[UserJourney]
63
+ vision_target: Optional[str]
64
+ architecture_runway: Optional[str]
65
+ next: Optional[str]
ea4all/src/ea4all_gra/utils.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import asyncio
4
+ import threading
5
+
6
+ def assign_event_loop_to_thread():
7
+ """
8
+ Explicitly assign a new event loop to the current thread
9
+ This method can be called at the start of thread-based operations
10
+ """
11
+ try:
12
+ # Try to get the current event loop
13
+ loop = asyncio.get_event_loop()
14
+ except RuntimeError:
15
+ # If no event loop exists, create a new one
16
+ loop = asyncio.new_event_loop()
17
+
18
+ # Set the new event loop for the current thread
19
+ asyncio.set_event_loop(loop)
20
+
21
+ return loop
22
+
23
+ class AsyncInterruptHandler:
24
+ def __init__(self):
25
+ # Assign event loop at initialization
26
+ self.loop = assign_event_loop_to_thread()
27
+ self.input_queue = asyncio.Queue()
28
+ self.event = asyncio.Event()
29
+
30
+ self.user_feedback = None
31
+ self.interface = None
32
+ self.interface_thread = None
33
+
34
+ # Get or create the event loop
35
+ try:
36
+ self.loop = asyncio.get_event_loop()
37
+ except RuntimeError:
38
+ self.loop = asyncio.new_event_loop()
39
+ asyncio.set_event_loop(self.loop)
40
+
41
+ async def close_interface_with_timeout(self):
42
+ # Get the current thread's event loop
43
+ try:
44
+ print(f"NOT Closing interface: {self.interface.is_callable()}")
45
+
46
+ except asyncio.TimeoutError:
47
+ print("Interface closure timed out")
48
+ except Exception as e:
49
+ print(f"Error closing interface: {e}")
50
+
51
+ def launch_gradio_interface(self):
52
+ def run_interface():
53
+ # Explicitly assign event loop for this thread
54
+ assign_event_loop_to_thread()
55
+
56
+ # Get the current thread's event loop
57
+ current_loop = asyncio.get_event_loop()
58
+
59
+ try:
60
+ # Run the interface creation coroutine
61
+ current_loop.run_until_complete(self._create_and_launch_interface())
62
+ except Exception as e:
63
+ print(f"Error in run_interface: {e}")
64
+
65
+ # Create and start the thread
66
+ self.interface_thread = threading.Thread(target=run_interface, daemon=True)
67
+ self.interface_thread.start()
68
+
69
+ async def _create_and_launch_interface(self):
70
+ title = 'User Input'
71
+ description = 'Please provide input'
72
+
73
+ async def submit_input(user_feedback):
74
+ asyncio.run_coroutine_threadsafe(self.input_queue.put(user_feedback), self.loop)
75
+ self.event.set()
76
+
77
+ print(f"User Provided input: {user_feedback}")
78
+
79
+ return user_feedback
80
+
81
+ with gr.Blocks() as demo:
82
+ gr.Markdown(f"###{title}")
83
+ gr.Markdown(f"**{description}")
84
+
85
+ input_component = gr.Textbox(label="Your input")
86
+ submit_btn = gr.Button("Submit")
87
+ output = gr.Textbox(label="Status")
88
+
89
+ submit_btn.click(
90
+ submit_input,
91
+ inputs=input_component,
92
+ outputs=output
93
+ )
94
+
95
+ self.event = asyncio.Event()
96
+ self.event.clear()
97
+ self.user_feedback = None
98
+
99
+ self.interface = demo
100
+ self.interface.launch(inbrowser=True)
101
+
102
+ async def handle_interrupt(self):
103
+ self.launch_gradio_interface()
104
+
105
+ try:
106
+ # Use the current loop's queue and event
107
+ self.user_feedback = await asyncio.wait_for(
108
+ self.input_queue.get(),
109
+ timeout=300.0, # 5-minute timeout
110
+ )
111
+
112
+ print(f"Finished waiting for user input {self.user_feedback}")
113
+
114
+ return self.user_feedback
115
+
116
+ except asyncio.TimeoutError:
117
+ print("Gradio interface timed out")
118
+ return None
119
+
120
+ except Exception as e:
121
+ print(f"Error in handle_interrupt: {e}")
122
+ return None
123
+
124
+ finally:
125
+ await self.close_interface_with_timeout()
ea4all/src/ea4all_indexer/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ """Index Graph Module."""
2
+
3
+ #from ea4all_indexer.graph import ea4all_indexer
4
+
5
+ #__all__ = ["ea4all_indexer"]
ea4all/src/ea4all_indexer/configuration.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Define the configurable parameters for the index graph."""
2
+
3
+ from dataclasses import dataclass, field
4
+
5
+ from ea4all.src.shared.configuration import BaseConfiguration
6
+
7
+ # This file contains sample APPLICATIONS to index
8
+ DEFAULT_APM_CATALOGUE = "APM-ea4all (test-split).xlsx"
9
+
10
+ @dataclass(kw_only=True)
11
+ class IndexConfiguration(BaseConfiguration):
12
+ """Configuration class for indexing and retrieval operations.
13
+
14
+ This class defines the parameters needed for configuring the indexing and
15
+ retrieval processes, including embedding model selection, retriever provider choice, and search parameters.
16
+ """
17
+ apm_catalogue: str = field(
18
+ default=DEFAULT_APM_CATALOGUE,
19
+ metadata={
20
+ "description": "The EA4ALL APM default Vectorstore index name."
21
+ },
22
+ )
ea4all/src/ea4all_indexer/graph.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This "graph" simply exposes an endpoint for a user to upload docs to be indexed."""
2
+ """Changelog: 2025-06-03
3
+ - Refactored code to fix problems with linter and type checking (Standard mode)
4
+ - Refactored to use langgraph state management for MCP compatibility.
5
+ - Enabled input BYOD (Bring Your Own Data) for indexing.
6
+ """
7
+
8
+ from typing import Optional
9
+
10
+ from langchain_core.runnables import RunnableConfig
11
+ from langgraph.graph import END, START, StateGraph
12
+
13
+ from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
14
+ from ea4all.src.ea4all_indexer.state import InputState, OutputState, OverallState
15
+ from ea4all.src.shared import vectorstore
16
+ from ea4all.src.shared.configuration import BaseConfiguration
17
+
18
+ async def index_docs(
19
+ state: InputState, *, config: RunnableConfig
20
+ ) -> dict[str, str]:
21
+ """Asynchronously index documents in the given state using the configured retriever.
22
+
23
+ This function takes the documents from the state, ensures they have a user ID,
24
+ adds them to the retriever's index, and then signals for the documents to be
25
+ deleted from the state.
26
+
27
+ If docs are not provided in the state, they will be loaded
28
+ from the configuration.docs_file JSON file.
29
+
30
+ Args:
31
+ state (IndexState): The current state containing documents and retriever.
32
+ config (Optional[RunnableConfig]): Configuration for the indexing process.r
33
+ """
34
+ if not config:
35
+ raise ValueError("Configuration required to run index_docs.")
36
+
37
+ #configuration = IndexConfiguration.from_runnable_config(config)
38
+
39
+ with vectorstore.make_retriever(config) as retriever:
40
+ if len(retriever.vectorstore.docstore._dict) == 0:
41
+ apm_docs = vectorstore.get_apm_excel_content(config, file=state.path)
42
+ await retriever.aadd_documents(apm_docs)
43
+ retriever.vectorstore.save_local(
44
+ folder_path=getattr(config, "ea4all_store", BaseConfiguration.ea4all_store),
45
+ index_name=getattr(config, "apm_faiss", BaseConfiguration.apm_faiss)
46
+ )
47
+
48
+ return {"docs": "delete"}
49
+
50
+ # Define the graph
51
+ builder = StateGraph(OverallState, input=InputState, output=OutputState, config_schema=IndexConfiguration)
52
+ builder.add_node("apm_indexer",index_docs)
53
+ builder.add_edge(START, "apm_indexer")
54
+
55
+ # Compile into a graph object that you can invoke and deploy.
56
+ indexer_graph = builder.compile()
57
+ indexer_graph.name = "EA4ALL APM Indexer"
ea4all/src/ea4all_indexer/state.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """State management for the index graph."""
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Annotated, Optional
5
+
6
+ from langchain_core.documents import Document
7
+
8
+ from ea4all.src.shared.state import reduce_docs
9
+
10
+ @dataclass(kw_only=True)
11
+ class InputState:
12
+ """Represents the input state for the index graph.
13
+
14
+ This class is used to pass the input documents to the index graph.
15
+ It contains a single field, `path`, which is the source of documents.
16
+ """
17
+
18
+ path: Optional[str] = None
19
+ """Document source path to be indexed by the graph."""
20
+
21
+
22
+ # The index state defines the simple IO for the single-node index graph
23
+ @dataclass(kw_only=True)
24
+ class OutputState:
25
+ """Represents the state for document indexing and retrieval.
26
+
27
+ This class defines the structure of the index state, which includes
28
+ the documents to be indexed and the retriever used for searching
29
+ these documents.
30
+ """
31
+
32
+ docs: Annotated[list[Document], reduce_docs]
33
+ """A list of documents that the agent can index."""
34
+
35
+ @dataclass(kw_only=True)
36
+ class OverallState(InputState):
37
+ """Represents the overall state of the index graph.
38
+
39
+ This class combines the input and output states, allowing for
40
+ both input documents and indexed documents to be managed within
41
+ the same state.
42
+ """
43
+
44
+ pass
ea4all/src/ea4all_vqa/configuration.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Define the configurable parameters for the VQA agent."""
2
+
3
+ #'from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+
7
+ #'from shared.configuration import BaseConfiguration
8
+ from typing import Annotated
9
+
10
+ from ea4all.src.shared.configuration import BaseConfiguration
11
+
12
+ @dataclass(kw_only=True)
13
+ class AgentConfiguration(BaseConfiguration):
14
+ """The configuration for the agent."""
15
+
16
+ supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
17
+ default="gpt-4o-mini",
18
+ metadata={
19
+ "description": "The language model used for supervisor agents. Should be in the form: provider/model-name."
20
+ },
21
+ )
22
+
23
+ vqa_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
24
+ default="gpt-4o-mini", #meta-llama/llama-3.2-11B-Vision-Instruct",
25
+ metadata={
26
+ "description": "The language model used for visual questions and answering. Should be in the form: provider/model-name."
27
+ },
28
+ )
29
+
30
+ vqa_max_tokens: Annotated[int, {"__template_metadata__": {"kind": "llm"}}] = field(
31
+ default=4096,
32
+ metadata={
33
+ "description": "The maximum number of tokens allowed for the visual question and answer model."
34
+ },
35
+ )
36
+
37
+ ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
38
+ default="Frontend",
39
+ metadata={
40
+ "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
41
+ },
42
+ )
ea4all/src/ea4all_vqa/graph.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This graph implements a Vision Question Answering (VQA) agent for architecture diagrams and flowcharts."""
2
+ """Changelog:
3
+ - Build the VQA Graph
4
+ - Setup state shared between nodes
5
+ - DiagramSupervisor function disabled, direct call to vqa_diagram
6
+ - Retrofitied supervisor function and added build_vqa_graph
7
+ #2025-06-03
8
+ - Refactored code to fix problems with linter and type checking (Standard mode)
9
+ - Refactored to use langgraph state management for MCP compatibility.
10
+ - Enabled input BYOD (Bring Your Own Data) for indexing.
11
+ """
12
+ #core libraries
13
+ from langchain_core.runnables import RunnableConfig
14
+ from langchain_core.prompts.chat import ChatPromptTemplate
15
+ from langchain_core.prompts import ChatPromptTemplate
16
+ from langchain_core.runnables.base import RunnableLambda, RunnableSerializable
17
+ from langchain_core.runnables import RunnableConfig
18
+ from langchain_core.language_models.chat_models import BaseChatModel
19
+
20
+ from langchain_core.messages import (
21
+ AIMessage,
22
+ HumanMessage,
23
+ ToolMessage,
24
+ )
25
+
26
+ #pydantic
27
+ from pydantic import BaseModel, Field
28
+
29
+ from json import JSONDecodeError
30
+
31
+ from typing import (
32
+ Annotated,
33
+ )
34
+ from typing_extensions import Literal, TypedDict
35
+
36
+ #Graphs, Agents
37
+ from langchain.agents import tool
38
+ from langchain_core.agents import AgentFinish
39
+ from langgraph.graph import (
40
+ START,
41
+ END,
42
+ StateGraph,
43
+ )
44
+ from langgraph.prebuilt import ToolNode, tools_condition, InjectedState
45
+ from langgraph.types import Command
46
+
47
+ #import APMGraph packages
48
+ from ea4all.src.ea4all_vqa.configuration import AgentConfiguration
49
+ from ea4all.src.ea4all_vqa.state import InputState, OutputState, OverallState
50
+
51
+ #import shared packages
52
+ from ea4all.src.shared.configuration import BaseConfiguration
53
+ from ea4all.src.shared.utils import (
54
+ get_llm_client,
55
+ _get_formatted_date,
56
+ get_raw_image,
57
+ extract_topic_from_business_input,
58
+ set_max_new_tokens,
59
+ get_predicted_num_tokens_from_prompt,
60
+ _join_paths,
61
+ )
62
+
63
+ import spaces
64
+
65
+ ##Diagram Graph Tools
66
+ #Data model Sageguarding
67
+ @tool("diagram_safeguard")
68
+ class DiagramV2S(BaseModel):
69
+ """Check whether the image provided is an architecture diagram or flowchart and safe to be processed."""
70
+ isArchitectureImage: bool = Field(...,description="Should be True if an image is an architecture diagram or flowchart, otherwise False.")
71
+ isSafe: bool = Field(...,description="Should be True if image or question are safe to be processed, False otherwise")
72
+ description: str = Field(description="One sentence describing the reason for being categorised as unsafe or not an architecture image.")
73
+
74
+ @tool("vqa_diagram", response_format="content")
75
+ @spaces.GPU
76
+ async def vqa_diagram(next:str, state: Annotated[OverallState, InjectedState], config: RunnableConfig):
77
+ """Diagram Vision Question Answering"""
78
+
79
+ print(f"---AGENT VQA PROCESSING QUESTION & ANSWERING---")
80
+
81
+ # Get any user-provided configs - LLM model in use
82
+ configuration = AgentConfiguration.from_runnable_config(config)
83
+ llm = get_llm_client(
84
+ model=configuration.vqa_model,
85
+ api_base_url=configuration.api_base_url,
86
+ streaming=configuration.streaming,
87
+ )
88
+
89
+ question = getattr(state, "question")
90
+ raw_image = get_raw_image(getattr(state,'image'))
91
+
92
+ user_message = HumanMessage(
93
+ content=[
94
+ {"type": "text", "text": f"{question}"},
95
+ {
96
+ "type": "image_url",
97
+ "image_url": {"url": f"data:image/png;base64,{raw_image}"},
98
+ },
99
+ ],
100
+ )
101
+ prompt = ChatPromptTemplate.from_messages([user_message])
102
+ values = {"question": question}
103
+
104
+ #llm.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(llm, prompt, values))
105
+ chain = prompt | llm
106
+
107
+ # Not streaming the response to MCP Inspector
108
+ #async for message in chain.astream(input=values, config={"tags": ["vqa_stream"]}, kwargs={"max_tokens": configuration.vqa_max_tokens}):
109
+ # yield message
110
+
111
+ response = await chain.ainvoke(input=values, config={"tags": ["vqa_stream"]}, kwargs={"max_tokens": configuration.vqa_max_tokens})
112
+
113
+ ## When exposed as MCP tool, output schema should as simple as possible as output is serialized to a single string
114
+ return response.content
115
+
116
+ ##Supervisor Agent Function custom parse with tool calling response support
117
+ def parse(output: ToolMessage) -> dict | AgentFinish:
118
+
119
+ # Parse out the function call
120
+ print("---PARSING SUPERVISOR AGENT OUTPUT---")
121
+ print(output.content)
122
+ try:
123
+ response = extract_topic_from_business_input(output.content)
124
+ _next = response['parameters']['next']
125
+ except JSONDecodeError:
126
+ return AgentFinish(return_values={"output": output.content}, log=str(output.content))
127
+
128
+ # If no function was selected, return to user
129
+ if _next == "FINISH":
130
+ return AgentFinish(return_values={"output": output.content}, log=str(output.content))
131
+
132
+ # If the DiagramTagging function was selected, return to the user with the function inputs
133
+ tool_call = {"name": "vqa_diagram", "args": {"next": _next}, "id": "1", "type": "tool_call"}
134
+
135
+ print(f"---ROUTING QUESTIONS TO {_next}---")
136
+ print(output.content)
137
+
138
+ return {
139
+ "messages": [AIMessage("", tool_calls=[tool_call])],
140
+ "next": _next,
141
+ }
142
+
143
+ #Create Safeguarding agent
144
+ def create_safeguarding_agent(llm, system_message: str, question: str, raw_image: str):
145
+ """Create an LLM-based safeguarding checker."""
146
+ # LLM with function call
147
+ structured_llm_safeguard = llm.with_structured_output(DiagramV2S)
148
+
149
+ human_message = HumanMessage(content=[
150
+ {"type": "text", "text": f"{question}"},
151
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{raw_image}"}},
152
+ ])
153
+
154
+ safeguard_prompt = ChatPromptTemplate.from_messages(
155
+ [
156
+ human_message,
157
+ ("system", system_message),
158
+ ]
159
+ )
160
+
161
+ diagram_safeguard = safeguard_prompt | structured_llm_safeguard
162
+
163
+ return diagram_safeguard
164
+
165
+ #Safeguard custom parse
166
+ def safeguard_check(state:OverallState, config:RunnableConfig) -> dict:
167
+ configuration = AgentConfiguration.from_runnable_config(config)
168
+ llm = get_llm_client(configuration.supervisor_model)
169
+
170
+ #'raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
171
+ question = getattr(state, "question", "Describe the image")
172
+ raw_image = get_raw_image(getattr(state,'image', _join_paths(configuration.ea4all_images,'multi-app-architecture.png')))
173
+
174
+ system_message = (
175
+ "Act as a safeguarding agent to check whether the image provided is an architecture diagram or flowchart and safe to be processed. "
176
+ "You will be provided with a question and an image. "
177
+ "You should return a JSON object with the following fields: "
178
+ "'isArchitectureImage':bool, 'isSafe': bool, 'description': str. "
179
+ "The 'isArchitectureImage' field should be True if the image is an architecture diagram or flowchart, otherwise False. "
180
+ "The 'isSafe' field should be True if the image or question are safe to be processed, False otherwise. "
181
+ "The 'description' field should contain a one sentence description of the reason for being categorised as unsafe or not an architecture image. "
182
+ "If the image is not an architecture diagram or flowchart, you should say it is not an architecture image as 'description' field. "
183
+ "If the image is not safe to be processed, you should say it is unsafe as 'description' field. "
184
+ )
185
+
186
+ safeguard_checker = create_safeguarding_agent(
187
+ llm,
188
+ system_message,
189
+ question,
190
+ raw_image
191
+ )
192
+
193
+ input = {"question": question, "raw_image": raw_image}
194
+ result = safeguard_checker.invoke(input=input, config=config)
195
+
196
+ return {"safety_status": result}
197
+
198
+ def call_finish(state:OverallState, config:RunnableConfig) -> dict:
199
+
200
+ return {
201
+ "messages": [],
202
+ "safety_status": getattr(state, 'safety_status', {}),
203
+ }
204
+
205
+ def make_supervisor_node(model: BaseChatModel, members: list[str]) -> RunnableLambda:
206
+ options = ["FINISH"] + members
207
+
208
+ system_prompt = (
209
+ "You are an enterprise architecture team supervisor tasked to manage a conversation between the following members: "
210
+ "[diagram_description, diagram_object, diagram_improvement, diagram_risk]. "
211
+ "Given the user request, use the function below to respond with team member to act next. "
212
+ " If none of team member can be used, select 'FINISH'."
213
+ )
214
+
215
+ class Router(TypedDict):
216
+ """Worker to route to next. If no workers needed, route to FINISH."""
217
+ next: Literal['FINISH', 'diagram_description', 'diagram_object', 'diagram_improvement', 'diagram_risk']
218
+
219
+ async def supervisor_node(state: OverallState, config: RunnableConfig) -> dict | AgentFinish:
220
+
221
+ """An LLM-based router."""
222
+ messages = [
223
+ {"role": "system", "content": system_prompt},
224
+ ] + getattr(state, 'messages')
225
+
226
+ response = await model.with_structured_output(Router, include_raw=True).ainvoke(messages, config=config)
227
+
228
+ if isinstance(response, dict):
229
+ if response['parsed']['next'] == "FINISH":
230
+ return AgentFinish(return_values={"output": response['raw']}, log=response['raw']['content'])
231
+
232
+ # If the DiagramTagging function was selected, return to the user with the function inputs
233
+ tool_call = {"name": "vqa_diagram", "args": {"next": response['parsed']['next']}, "id": "1", "type": "tool_call"}
234
+
235
+ return {
236
+ "messages": [AIMessage("", tool_calls=[tool_call])],
237
+ "next": response['parsed']['next'],
238
+ }
239
+ else:
240
+ return AgentFinish(return_values={"output": response}, log=str(response))
241
+
242
+ return RunnableLambda(supervisor_node)
243
+
244
+ #Create team supervisor
245
+ def create_team_supervisor(state:OverallState, config:RunnableConfig) -> RunnableSerializable: #Adding the parameter config:RunnableConfig causing Channel error
246
+ """An LLM-based router."""
247
+
248
+ configuration = AgentConfiguration.from_runnable_config(config)
249
+ llm = get_llm_client(
250
+ configuration.vqa_model,
251
+ api_base_url=configuration.api_base_url,
252
+ )
253
+
254
+ # Supervisor Tool Prompts
255
+ system_prompt = f"""
256
+ Environment: ipython
257
+ Cutting Knowledge Date: December 2023
258
+ Today Date: {_get_formatted_date()}
259
+ """
260
+
261
+ user_prompt = """
262
+ You are an enterprise architecture team supervisor tasked to manage a conversation between the following members:
263
+ ["diagram_description", "diagram_object", "diagram_improvement", "diagram_risk"].
264
+ Given the user request, use the function below to respond with team member to act next.
265
+ If none of team member can be used, select "FINISH".
266
+ Function (in JSON format):
267
+ {
268
+ "type": "function", "function": {
269
+ "name": "route",
270
+ "description": "Select one of the available tools that should be used next.",
271
+ "parameters": {
272
+ "title": "routeSchema",
273
+ "type": "object",
274
+ "properties": {
275
+ "next": {
276
+ "title": "Next",
277
+ "anyOf": [
278
+ {"enum": ["FINISH", "diagram_description", "diagram_object", "diagram_improvement", "diagram_risk"]},
279
+ ],
280
+ },
281
+ },
282
+ "required": ["next"],
283
+ },
284
+ }
285
+ }
286
+ """
287
+
288
+ messages = [
289
+ ("system", system_prompt),
290
+ ("human", "{user_prompt}"),
291
+ ("human", "{question}"),
292
+ ]
293
+
294
+ prompt = ChatPromptTemplate.from_messages(messages).partial(
295
+ user_prompt=user_prompt)
296
+
297
+ llm.max_tokens = set_max_new_tokens(get_predicted_num_tokens_from_prompt(llm, prompt, {"question":state.question}))
298
+
299
+ supervisor_agent = (
300
+ prompt |
301
+ llm |
302
+ parse
303
+ )
304
+
305
+ return supervisor_agent
306
+
307
+ # The following functions interoperate between the top level graph state
308
+ # and the state of the research sub-graph
309
+ # this makes it so that the states of each graph don't get intermixed
310
+ def enter_graph(state:OverallState, config:RunnableConfig) -> Command[Literal['safeguard_check']]:
311
+
312
+ configuration = AgentConfiguration.from_runnable_config(config)
313
+
314
+ messages = [
315
+ HumanMessage(content=state.question) #messages[-1]['content']),
316
+ ]
317
+
318
+ #if not configuration.ea4all_ask_human == "interrupt":
319
+ # raw_image = state.messages[0].content[0]['image_url']['url'].split(',')[1]
320
+ #else:
321
+ # image = getattr(state,'image', "")
322
+ # raw_image = image if image else _join_paths(configuration.ea4all_images,'multi-app-architecture.png')
323
+
324
+ image = getattr(state,'image', None)
325
+ if image:
326
+ raw_image = state.image #['image_url']['url'].split(',')[1]
327
+ else:
328
+ raw_image = _join_paths(configuration.ea4all_images,'multi-app-architecture.png')
329
+
330
+ return Command(
331
+ update={
332
+ "messages": messages,
333
+ "question": state.question, #messages[-1].content,
334
+ "image": raw_image
335
+ },
336
+ goto='safeguard_check',
337
+ )
338
+
339
+ return {
340
+ "messages": state.messages,
341
+ "question": messages[-1].content,
342
+ "image": raw_image,
343
+ }
344
+
345
+ async def choose_next(state: OverallState):
346
+ """Choose the next node based on the safety status."""
347
+ isArcihitectureImage = getattr(state, 'safety_status', {}).get('isArchitectureImage', False)
348
+ isSafe = getattr(state, 'safety_status', {}).get('isSafe', False)
349
+
350
+ return "diagram_supervisor" if isArcihitectureImage and isSafe else "final"
351
+
352
+ def build_vqa_graph():
353
+ model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
354
+ teams_supervisor_node = make_supervisor_node(model, ['diagram_description', 'diagram_object', 'diagram_improvement', 'diagram_risk'])
355
+
356
+ workflow = StateGraph(OverallState, input=InputState, output=OutputState,config_schema=AgentConfiguration) #input=InputState
357
+
358
+ #Setup Graph nodes
359
+ #Node name CANNOT have blank space - pattern: \'^[a-zA-Z0-9_-]+$\'.", \'type\'
360
+ workflow.add_node("start", enter_graph)
361
+ workflow.add_node("safeguard_check", safeguard_check)
362
+ workflow.add_node("diagram_supervisor", teams_supervisor_node)
363
+ workflow.add_node("final", call_finish)
364
+
365
+ tool_node = ToolNode([vqa_diagram])
366
+ workflow.add_node("tools", tool_node)
367
+
368
+ #Setup graph edges
369
+ #Graph entry point
370
+ workflow.add_edge(START, "start")
371
+ workflow.add_edge("start", "safeguard_check")
372
+
373
+ workflow.add_conditional_edges(
374
+ "safeguard_check",
375
+ choose_next,
376
+ {
377
+ "diagram_supervisor": "diagram_supervisor",
378
+ "final": "final",
379
+ }
380
+ )
381
+
382
+ workflow.add_conditional_edges(
383
+ "diagram_supervisor",
384
+ tools_condition,
385
+ #calls one of our tools. END causes the graph to terminate (and respond to the user)
386
+ {
387
+ "tools": "tools",
388
+ END: END,
389
+ }
390
+ )
391
+
392
+ workflow.add_edge("final", END)
393
+ workflow.add_edge("tools", END)
394
+
395
+ #memory = MemorySaver()
396
+ diagram_graph = workflow.compile() #checkpointer=memory)
397
+ diagram_graph.name = "DiagramGraph"
398
+
399
+ return diagram_graph
400
+
401
+ diagram_graph = build_vqa_graph()
ea4all/src/ea4all_vqa/state.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """State management for the VQA graph.
2
+
3
+ This module defines the state structures used in the VQA graph. It includes
4
+ definitions for agent state, input state.
5
+ """
6
+
7
+ import operator
8
+ from dataclasses import dataclass
9
+ from typing import (
10
+ Optional,
11
+ Annotated,
12
+ Sequence,
13
+ )
14
+
15
+ from langchain_core.messages import (
16
+ BaseMessage,
17
+ )
18
+ from langgraph.graph import MessagesState
19
+
20
+ # Optional, the InputState is a restricted version of the State that is used to
21
+ # define a narrower interface to the outside world vs. what is maintained
22
+ # internally.
23
+ @dataclass(kw_only=True)
24
+ class InputState:
25
+ """Represents the input state for the agent.
26
+
27
+ This class defines the structure of the input state, which includes
28
+ the messages exchanged between the user and the agent. It serves as
29
+ a restricted version of the full State, providing a narrower interface
30
+ to the outside world compared to what is maintained internally.
31
+ """
32
+
33
+ """Attributes:
34
+ question: user question
35
+ image: architecture diagram
36
+ """
37
+ question: str
38
+ image: str
39
+
40
+ # The index state defines the simple IO for the single-node index graph
41
+ @dataclass(kw_only=True)
42
+ class OutputState:
43
+ """Represents the output schema for the Diagram agent.
44
+ """
45
+
46
+ messages: Optional[Annotated[Sequence[MessagesState], operator.add]] = None
47
+ safety_status: Optional[dict[str,str]] = None
48
+
49
+ """Attributes:
50
+ safety_status: safety status of the diagram provided by the user
51
+ Answer to user's question about the Architectural Diagram.
52
+ """
53
+
54
+ @dataclass(kw_only=True)
55
+ class OverallState(InputState, OutputState):
56
+ """Represents the overall state of the Diagram graph."""
57
+
58
+ """Attributes:
59
+ error: tool error
60
+ next: next tool to be called
61
+ """
62
+
63
+ error: Optional[str] = None
64
+ next: Optional[str] = None
ea4all/src/graph.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main file for constructing the EA4ALL hierarchical graph"""
2
+
3
+ """
4
+ EA4ALL Hierarchical Graph
5
+ This module defines the main file for constructing the EA4ALL hierarchical graph. It contains functions and classes for creating and managing the graph structure.
6
+ Functions:
7
+ - make_supervisor_node: Creates a supervisor node for managing a conversation between architect workers.
8
+ - call_landscape_agentic: Calls the landscape agentic graph.
9
+ - call_diagram_agentic: Calls the diagram agentic graph.
10
+ - call_togaf_agentic: Calls the togaf agentic graph.
11
+ - websearch: Search for real-time data to answer user's question
12
+ Classes:
13
+ - Router: TypedDict representing the worker to route to next.
14
+ Attributes:
15
+ - model: The LLM client for the supervisor model.
16
+ - super_builder: The StateGraph builder for constructing the graph.
17
+ - super_graph: The compiled EA4ALL Agentic Workflow Graph.
18
+ Note: This module depends on other modules and packages such as langchain_core, langgraph, shared, ea4all_apm, ea4all_vqa, and ea4all_gra.
19
+ """
20
+
21
+ """Changelog:
22
+ - lanchain_openapi: 0.2.9 (0.3.6 issue with max_tokens for HF models)
23
+ #2025-06-03
24
+ - Refactored code to fix problems with linter and type checking (Standard mode)
25
+ """
26
+
27
+ from langgraph.types import Command
28
+ from langchain_core.messages import (
29
+ HumanMessage,
30
+ AIMessage
31
+ )
32
+ from langchain_core.language_models.chat_models import BaseChatModel
33
+ from langchain_core.runnables import RunnableConfig
34
+
35
+ from langchain import hub
36
+
37
+ from langgraph.graph import (
38
+ START,
39
+ END,
40
+ StateGraph,
41
+ )
42
+ from langgraph.checkpoint.memory import MemorySaver
43
+
44
+ from typing_extensions import Literal, TypedDict
45
+ import uuid
46
+
47
+ from ea4all.src.shared.configuration import BaseConfiguration
48
+ from ea4all.src.shared.utils import get_llm_client
49
+ from ea4all.src.shared.state import State
50
+ from ea4all.src.tools.tools import websearch
51
+
52
+ from ea4all.src.ea4all_indexer.graph import indexer_graph
53
+ from ea4all.src.ea4all_apm.graph import apm_graph
54
+ from ea4all.src.ea4all_vqa.graph import diagram_graph
55
+ from ea4all.src.ea4all_gra.graph import togaf_graph
56
+
57
+ async def call_indexer_apm(state: State, config: RunnableConfig):
58
+ response = await indexer_graph.ainvoke(input={"docs":[]}, config=config)
59
+
60
+ def make_supervisor_node(model: BaseChatModel, members: list[str]):
61
+ options = ["FINISH"] + members
62
+
63
+ system_prompt = hub.pull("ea4all_super_graph").template
64
+
65
+ class Router(TypedDict):
66
+ """Worker to route to next. If no workers needed, route to FINISH."""
67
+ next: Literal["FINISH", "portfolio_team", "diagram_team", "blueprint_team", "websearch_team"]
68
+
69
+ async def supervisor_node(state: State, config: RunnableConfig) -> Command[Literal["portfolio_team", "diagram_team", "blueprint_team", "websearch_team", '__end__']]:
70
+
71
+ """An LLM-based router."""
72
+ messages = [
73
+ {"role": "system", "content": system_prompt},
74
+ ] + [state["messages"][-1]]
75
+
76
+ response = await model.with_structured_output(Router).ainvoke(messages, config=config)
77
+
78
+ _goto = "__end__"
79
+
80
+ if isinstance(response, dict):
81
+ _goto = response["next"]
82
+ # Ensure _goto is one of the allowed Literal values
83
+ if _goto not in ["portfolio_team", "diagram_team", "blueprint_team", "websearch_team"]:
84
+ _goto = "__end__"
85
+
86
+ print(f"---Supervisor got a request--- Question: {state['messages'][-1].content} ==> Routing to {_goto}\n")
87
+
88
+ return Command(
89
+ #update={"next": _goto},
90
+ goto=_goto
91
+ )
92
+
93
+ return supervisor_node
94
+
95
+ async def call_landscape_agentic(state: State, config: RunnableConfig) -> Command[Literal['__end__']]: ##2025-02-21: NOT passing CHAT MEMORY to the APM_graph
96
+ response = await apm_graph.ainvoke({"question": state["messages"][-1].content}, config=config)
97
+ return Command(
98
+ update={
99
+ "messages": [
100
+ AIMessage(
101
+ content=str(response), name="landscape_agentic"
102
+ )
103
+ ]
104
+ },
105
+ goto="__end__",
106
+ )
107
+
108
+ async def call_diagram_agentic(state: State, config: RunnableConfig) -> Command[Literal['__end__']]: #NOT passing CHAT MEMORY to the Diagram_graph
109
+
110
+ inputs = {
111
+ "messages": [{"role": "user", "content": state.get('messages')[-1].content}],
112
+ "question": state['messages'][-1].content, "image":""
113
+ } #user response
114
+
115
+ response = await diagram_graph.ainvoke(
116
+ input=inputs,
117
+ config=config
118
+ )
119
+
120
+ return Command(
121
+ update={
122
+ "messages": [
123
+ AIMessage(
124
+ content=response['messages'][-1].content, name="landscape_agentic"
125
+ )
126
+ ]
127
+ },
128
+ goto="__end__",
129
+ )
130
+
131
+ async def call_togaf_agentic(state: State, config: RunnableConfig) -> Command[Literal["__end__"]]: #NOT passing CHAT MEMORY to the Togaf_graph
132
+ print(f"---TOGAF ROUTE team node ready to --- CALL_TOGAF_AGENTIC Routing to {state['next']} with User Question: {state['messages'][-1].content}")
133
+
134
+ inputs = {"messages": [{"role": "user", "content": state.get('messages')[-1].content}]} #user response
135
+
136
+ response = await togaf_graph.ainvoke(
137
+ input=inputs,
138
+ config=config
139
+ ) #astream not loading the graph
140
+
141
+ return Command(
142
+ update={
143
+ "messages": [
144
+ AIMessage(
145
+ content=response["messages"][-1].content, name="togaf_route"
146
+ )
147
+ ]
148
+ },
149
+ goto="__end__",
150
+ )
151
+
152
+ # Wrap-up websearch answer to user's question
153
+ async def call_generate_websearch(state:State, config: RunnableConfig) -> Command[Literal["__end__"]]:
154
+ from ea4all.src.ea4all_apm.state import OverallState
155
+
156
+ if config is not None:
157
+ source = config.get('metadata', {}).get('langgraph_node', 'unknown')
158
+
159
+ # Invoke GENERATOR node in the APMGraph
160
+ state_dict = {
161
+ "documents": state['messages'][-1].content,
162
+ "web_search": "Yes",
163
+ "question": state['messages'][-2].content,
164
+ "source": source
165
+ }
166
+
167
+ apm_state = OverallState(**state_dict)
168
+ generation = await apm_graph.nodes["generate"].ainvoke(apm_state, config)
169
+
170
+ return Command(
171
+ update={
172
+ "messages": [
173
+ AIMessage(
174
+ content=generation['generation'], name="generate_websearch"
175
+ )
176
+ ]
177
+ },
178
+ goto="__end__",
179
+ )
180
+
181
+ async def blueprint_team(state: State) -> Command[Literal["togaf_route"]]:
182
+ print("---Blueprint team got a request--- Routing to TOGAF_ROUTE node")
183
+
184
+ return Command(update={**state}, goto="togaf_route")
185
+
186
+ async def diagram_team(state: State) -> Command[Literal["diagram_route"]]:
187
+ print("---Diagram team got a request--- Routing to DIAGRAM_ROUTE node")
188
+
189
+ return Command(update={**state}, goto="diagram_route")
190
+
191
+ async def super_graph_entry_point(state: State):
192
+ # Generate a unique thread ID
193
+ thread_config = RunnableConfig({"configurable": {"thread_id": str(uuid.uuid4())}})
194
+
195
+ # Initialize state if not provided
196
+ if state is None:
197
+ state = {
198
+ "messages": [
199
+ ("system", "You are a helpful assistant"),
200
+ ("human", "Start the workflow")
201
+ ]
202
+ }
203
+
204
+ # Build and compile the graph
205
+ graph = build_super_graph()
206
+
207
+ # Async invocation
208
+ try:
209
+ # Use ainvoke for async execution
210
+ result = await graph.ainvoke(state, config=RunnableConfig(thread_config))
211
+ return result
212
+ except Exception as e:
213
+ print(f"Graph execution error: {e}")
214
+ raise
215
+
216
+ # Define & build the graph.
217
+ def build_super_graph():
218
+
219
+ model = get_llm_client(BaseConfiguration.supervisor_model, api_base_url="", streaming=BaseConfiguration.streaming)
220
+ teams_supervisor_node = make_supervisor_node(model, ["portfolio_team", "diagram_team", "blueprint_team","websearch_team"])
221
+
222
+ super_builder = StateGraph(State, config_schema=BaseConfiguration)
223
+
224
+ super_builder.add_node("apm_indexer", call_indexer_apm)
225
+ super_builder.add_node("supervisor", teams_supervisor_node)
226
+ super_builder.add_node("portfolio_team", call_landscape_agentic)
227
+ super_builder.add_node("websearch_team", websearch)
228
+ super_builder.add_node("diagram_team", diagram_team)
229
+ super_builder.add_node("blueprint_team", blueprint_team)
230
+ super_builder.add_node("generate_websearch", call_generate_websearch)
231
+ super_builder.add_node("diagram_route", call_diagram_agentic)
232
+ super_builder.add_node("togaf_route", call_togaf_agentic)
233
+
234
+
235
+ super_builder.add_edge(START, "apm_indexer")
236
+ super_builder.add_edge("apm_indexer", "supervisor")
237
+
238
+ super_builder.add_edge("websearch_team", "generate_websearch")
239
+ super_builder.add_edge("blueprint_team", "togaf_route")
240
+ super_builder.add_edge("diagram_team", "diagram_route")
241
+
242
+ super_builder.add_edge("portfolio_team", END)
243
+ super_builder.add_edge("generate_websearch", END)
244
+ super_builder.add_edge("togaf_route", END)
245
+ super_builder.add_edge("diagram_route", END)
246
+
247
+ #memory = MemorySaver() #With LangGraph API, inMemmory is handled directly by the platform
248
+ super_graph = super_builder.compile() #checkpointer=memory)
249
+ super_graph.name = "EA4ALL Agentic Workflow Graph"
250
+
251
+ return super_graph
252
+
253
+ # Export the graph for LangGraph Dev/Studio
254
+ super_graph = build_super_graph()
ea4all/src/shared/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Shared utilities module."""
ea4all/src/shared/configuration.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Define the configurable parameters for the agent."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import ast
6
+ from dataclasses import dataclass, field, fields
7
+ from typing import Annotated, Any, Optional, Type, TypeVar, Literal
8
+
9
+ from langchain_core.runnables import RunnableConfig, ensure_config
10
+
11
+ # This file contains sample APPLICATIONS to index
12
+ DEFAULT_APM_CATALOGUE = "APM-ea4all (test-split).xlsx"
13
+
14
+ # These files contains sample QUESTIONS
15
+ APM_MOCK_QNA = "apm_qna_mock.txt"
16
+ PMO_MOCK_QNA = "pmo_qna_mock.txt"
17
+
18
+ @dataclass(kw_only=True)
19
+ class BaseConfiguration:
20
+ """Configuration class for all Agents.
21
+
22
+ This class defines the parameters needed for configuring the indexing and
23
+ retrieval processes, including embedding model selection, retriever provider choice, and search parameters.
24
+ """
25
+
26
+ supervisor_model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
27
+ default="gpt-4o-mini",
28
+ metadata={
29
+ "description": "The language model used for supervisor agents. Should be in the form: provider/model-name."
30
+ },
31
+ )
32
+
33
+ api_base_url: Annotated[str, {"__template_metadata__": {"kind": "hosting"}}] = field(
34
+ default="https://api-inference.huggingface.co/models/",
35
+ metadata={
36
+ "description": "The base url for models hosted on Hugging Face's model hub."
37
+ },
38
+ )
39
+
40
+ max_tokens: Annotated[int, {"__template_metadata__": {"kind": "llm"}}] = field(
41
+ default=4096,
42
+ metadata={
43
+ "description": "The maximum number of tokens allowed for in general question and answer model."
44
+ },
45
+ )
46
+
47
+ temperature: Annotated[int, {"__template_metadata__": {"kind": "llm"}}] = field(
48
+ default=0,
49
+ metadata={
50
+ "description": "The default tempature to infere the LLM."
51
+ },
52
+ )
53
+
54
+ streaming: Annotated[bool, {"__template_metadata__": {"kind": "llm"}}] = field(
55
+ default=True,
56
+ metadata={
57
+ "description": "Default streaming mode."
58
+ },
59
+ )
60
+
61
+ ea4all_images: str = field(
62
+ default="ea4all/images",
63
+ metadata={
64
+ "description": "Configuration for the EA4ALL images folder."
65
+ },
66
+ )
67
+
68
+ ea4all_store: Annotated[str, {"__template_metadata__": {"kind": "infra"}}] = field(
69
+ default="ea4all/ea4all_store",
70
+ metadata={
71
+ "description": "The EA4ALL folder for mock & demo content."
72
+ },
73
+ )
74
+
75
+ ea4all_ask_human: Annotated[str, {"__template_metadata__": {"kind": "integration"}}] = field(
76
+ default="interrupt", #"Frontend"
77
+ metadata={
78
+ "description": "Trigger EA4ALL ask human input via interruption or receive from external frontend."
79
+ },
80
+ )
81
+
82
+ ea4all_recursion_limit: Annotated[int, {"__template_metadata__": {"kind": "graph"}}] = field(
83
+ default=25,
84
+ metadata={
85
+ "description": "Maximum recursion allowed for EA4ALL graphs."
86
+ },
87
+ )
88
+
89
+ # models
90
+ embedding_model: Annotated[str, {"__template_metadata__": {"kind": "embeddings"}}] = field(
91
+ default="openai/text-embedding-3-small",
92
+ metadata={
93
+ "description": "Name of the embedding model to use. Must be a valid embedding model name."
94
+ },
95
+ )
96
+
97
+ retriever_provider: Annotated[
98
+ Literal["faiss"],
99
+ {"__template_metadata__": {"kind": "retriever"}},
100
+ ] = field(
101
+ default="faiss",
102
+ metadata={
103
+ "description": "The vector store provider to use for retrieval. Options are 'FAISS' at moment only."
104
+ },
105
+ )
106
+
107
+ apm_faiss: Annotated[str, {"__template_metadata__": {"kind": "infra"}}] = field(
108
+ default="apm_faiss_index",
109
+ metadata={
110
+ "description": "The EA4ALL APM default Vectorstore index name."
111
+ },
112
+ )
113
+
114
+ apm_catalogue: str = field(
115
+ default=DEFAULT_APM_CATALOGUE,
116
+ metadata={
117
+ "description": "The EA4ALL APM default Vectorstore index name."
118
+ },
119
+ )
120
+
121
+ search_kwargs: Annotated[str, {"__template_metadata__": {"kind": "retriever"}}] = field(
122
+ #default="{'k': 50, 'score_threshold': 0.8, 'filter': {'namespace':'ea4all_agent'}}",
123
+ default="{'k':10, 'fetch_k':50}",
124
+ metadata={
125
+ "description": "Additional keyword arguments to pass to the search function of the retriever."
126
+ }
127
+ )
128
+
129
+ def __post_init__(self):
130
+ # Convert search_kwargs from string to dictionary
131
+ try:
132
+ if isinstance(self.search_kwargs, str):
133
+ self.search_kwargs = ast.literal_eval(self.search_kwargs)
134
+ except (SyntaxError, ValueError):
135
+ # Fallback to an empty dict or log an error
136
+ self.search_kwargs = {}
137
+ print("Error parsing search_kwargs")
138
+
139
+ @classmethod
140
+ def from_runnable_config(
141
+ cls: Type[T], config: Optional[RunnableConfig] = None
142
+ ) -> T:
143
+ """Create an IndexConfiguration instance from a RunnableConfig object.
144
+
145
+ Args:
146
+ cls (Type[T]): The class itself.
147
+ config (Optional[RunnableConfig]): The configuration object to use.
148
+
149
+ Returns:
150
+ T: An instance of IndexConfiguration with the specified configuration.
151
+ """
152
+ config = ensure_config(config)
153
+ configurable = config.get("configurable") or {}
154
+ _fields = {f.name for f in fields(cls) if f.init}
155
+
156
+ # Special handling for search_kwargs
157
+ if 'search_kwargs' in configurable and isinstance(configurable['search_kwargs'], str):
158
+ try:
159
+ configurable['search_kwargs'] = ast.literal_eval(configurable['search_kwargs'])
160
+ except (SyntaxError, ValueError):
161
+ configurable['search_kwargs'] = {}
162
+
163
+ return cls(**{k: v for k, v in configurable.items() if k in _fields})
164
+
165
+ T = TypeVar("T", bound=BaseConfiguration)
ea4all/src/shared/prompts.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from langchain_core.prompts.chat import (
4
+ ChatPromptTemplate,
5
+ HumanMessagePromptTemplate,
6
+ SystemMessagePromptTemplate
7
+ )
8
+
9
+ from langchain_core.prompts import PromptTemplate, FewShotChatMessagePromptTemplate
10
+ from langchain_core.prompts import MessagesPlaceholder, format_document
11
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
12
+ from langchain.chains.prompt_selector import ConditionalPromptSelector
13
+
14
+ from langchain_core.messages import (
15
+ HumanMessage,
16
+ )
17
+
18
+ from langchain_core.output_parsers import (
19
+ JsonOutputParser
20
+ )
21
+
22
+ from langsmith import (
23
+ traceable,
24
+ )
25
+ ################################
26
+ ##COLLECTION of prompt functions
27
+ ################################
28
+ ##Llama-3.1 Prompt Format
29
+ # Define the prompt format with special tokens
30
+ LLAMA31_CHAT_PROMPT_FORMAT = (
31
+ "<|begin_of_text|>"
32
+ "<|start_header_id|>system<|end_header_id|>{system_message}<|eot_id|>\n"
33
+ "<|start_header_id|>user<|end_header_id|>{human_message}<|eot_id|>\n"
34
+ "<|start_header_id|>ai<|end_header_id|>{ai_message}\n"
35
+ )
36
+
37
+ LLAMA31_PROMPT_FORMAT = (
38
+ "<|begin_of_text|>"
39
+ "<|start_header_id|>system<|end_header_id|>{system_message}<|eot_id|>\n"
40
+ "<|start_header_id|>user<|end_header_id|>{human_message}<|eot_id|>\n"
41
+ "<|start_header_id|>ai<|end_header_id|>{ai_message}\n"
42
+ )
43
+
44
+ ##return a prompt-template class with informed user inquiry
45
+ def ea4all_prompt(query):
46
+ prompt_template = PromptTemplate(
47
+ input_variables=["query", "answer"],
48
+ template=TEMPLATE_QUERY_ANSWER)
49
+
50
+ prompt = prompt_template.format(
51
+ query=query,
52
+ answer="")
53
+
54
+ return prompt
55
+
56
+ ##return a chat-prompt-template class from the informed template
57
+ def ea4all_chat_prompt(template):
58
+ system_message_prompt = SystemMessagePromptTemplate.from_template(template)
59
+ human_template = "{user_question}"
60
+ human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
61
+
62
+ ea4all_prompt = ChatPromptTemplate.from_messages(
63
+ messages=[
64
+ system_message_prompt,
65
+ ## MessagesPlaceholder(variable_name="history"),
66
+ human_message_prompt],
67
+ )
68
+ ea4all_prompt.output_parser=JsonOutputParser()
69
+
70
+ return ea4all_prompt
71
+
72
+ ##select best prompt based on user inquiry's category
73
+ @traceable(
74
+ tags={os.environ["EA4ALL_ENV"]}
75
+ )
76
+ def ea4ll_prompt_selector(category):
77
+ QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
78
+ default_prompt = ea4all_chat_prompt(GENERAL_TEMPLATE),
79
+ conditionals=[
80
+ (lambda category: True if category == "Strategy" else False, ea4all_chat_prompt(STRATEGY_TEMPLATE)),
81
+ (lambda category: True if category == "Application" else False, ea4all_chat_prompt(APPLICATION_TEMPLATE)),
82
+ (lambda category: True if category == "Recommendation" else False, ea4all_chat_prompt(RECOMMENDATION_TEMPLATE)),
83
+ (lambda category: True if category not in ("Strategy","Application", "Recommendation") else False, ea4all_chat_prompt(GENERAL_TEMPLATE))
84
+ ]
85
+ )
86
+
87
+ prompt = QUESTION_PROMPT_SELECTOR.get_prompt(category)
88
+
89
+ return(prompt)
90
+
91
+
92
+ #######################
93
+ ##COLLECTION of prompts
94
+ #######################
95
+
96
+ ##Template-basic instruction + context
97
+ TEMPLATE_CONTEXT = """You are a helpful Enterprise Architect with knowledge on enterprises IT landscapes.
98
+ Use only the context delimited by trible backticks to answer questions. Return the answer formatted as a text paragraph.
99
+ If you don't know the answer return I could not find the information.
100
+ Don't make up the response.
101
+ Context: ```{cdocs}```
102
+ Help answer: ""
103
+ """
104
+
105
+ ##Template-basic instruction + question + answer
106
+ TEMPLATE_QUERY_ANSWER = """You are Enterprise Architect highly knowledgable on IT landscape. \
107
+ Answer the question that is delimited by triple backticks into a style that is bullet list. \
108
+ If the question cannot be answered using the information provided answer with "I don't know". \
109
+
110
+ Always say "thanks for asking!" at the end of the answer.
111
+
112
+ Question: ```{user_question}```
113
+ Answer: {answer}
114
+ """
115
+
116
+ TEMPLATE_APM_QNA_ROUTING = """application portfolio assessment, application/IT landscape rationalisation, simplification or optimisation, business capability assessment, line of business landscape, who can I talk to, assistance from architecture team."""
117
+
118
+ ##Template-break-into-simpler-tasks
119
+ #https://platform.openai.com/docs/guides/prompt-engineering/strategy-split-complex-tasks-into-simpler-subtasks
120
+ TEMPLATE_HEADER = """You are a helpful enterprise architect assistant. """
121
+ TEMPLATE_HEADER += """Your goal is to provide accurate and detailed responses to user inquiry. """
122
+ TEMPLATE_HEADER += """You have access to a vast amount of enterprise architecture knowledge, """
123
+ TEMPLATE_HEADER += """and you can understand and generate language fluently. """
124
+ TEMPLATE_HEADER += """You can assist with a wide range of architectural topics, including but not limited to """
125
+ TEMPLATE_HEADER += """business, application, data and technology architectures. """
126
+ TEMPLATE_HEADER += """You should always strive to promote a positive and respectful conversation.
127
+ """
128
+
129
+ TEMPLATE_TASKS = ""
130
+ TEMPLATE_TASKS += """You will be provided with a user inquiry. """
131
+ TEMPLATE_TASKS += """Classify the inquiry into primary category and secondary category. """
132
+ TEMPLATE_TASKS += """Primary categories: Strategy, Application, Recommendation or General Inquiry. """
133
+ TEMPLATE_TASKS += """Strategy secondary categories:
134
+ - Architecture and Technology Strategy
135
+ - Vision
136
+ - Architecture Principles
137
+ """
138
+ TEMPLATE_TASKS += """Application secondary categories:
139
+ - Meet business and technical need
140
+ - Business criticality
141
+ - Roadmap
142
+ - Business Capability
143
+ - Hosting
144
+ """
145
+ TEMPLATE_TASKS += """Recommendation secondary categories:
146
+ - Application rationalisation
147
+ - Landscape simplification
148
+ - Reuse existent invested application
149
+ - Business capability with overlapping applications
150
+ - Opportunities and innovation
151
+ """
152
+ TEMPLATE_TASKS += """General inquiry:
153
+ - Speak to an architect
154
+ """
155
+ TEMPLATE_TASKS += """You may also revise the original inquiry if you think that revising \
156
+ it will ultimately lead to a better response from the language model """
157
+ TEMPLATE_TASKS += """Provide your output in JSON format with the keys: primary, secondary, question.
158
+ """
159
+
160
+ #Template-break-into-specific-prompt-by-category
161
+ strategy_template = """You will be provided with inquiry about architecture strategy.
162
+ Follow these steps to answer user inquiry:
163
+ STEP 1 - Using only the context delimited by triple backticks.
164
+ STEP 2 - Look at application with roadmap to invest.
165
+ STEP 3 - Extract the information that is only relevant to help answer the user inquiry
166
+ """
167
+
168
+ application_template = """You will be provided with an inquiry about application architecture.
169
+ Follow these steps to answer user inquiry:
170
+ STEP 1 - Using only the context delimited by triple backticks.
171
+ STEP 2 - Extract the information that is only relevant to help answer the user inquiry
172
+ """
173
+
174
+ recommendation_template = """You will be provided with enterprise architecture inquiry that needs a recommendation.
175
+ Follow these steps to answer user inquiry:
176
+ STEP 1 - Use only the context delimited by triple backticks.
177
+ STEP 2 - Look at applications with low business or technical fit
178
+ STEP 3 - Look at application with roadmap diffent to invest
179
+ STEP 4 - Look at applicatins hosted on premise
180
+ STEP 5 - Look at Business capability with overlapping applications
181
+ """
182
+
183
+ general_template = """You will provided with a general inquiry about enterprise architecture IT landscape.
184
+ Follow these steps to answer user queries:
185
+ STEP 1 - use only the context delimited by triple backticks
186
+ STEP 2 - Extract the information that is only relevant to help answer the user inquiry
187
+ """
188
+
189
+ default_template = """
190
+ FINAL STEP - Do not make up or guess ANY extra information. \
191
+ Ask follow-up question to the user if you need further clarification to understand and answer their inquiry. \
192
+ After a follow-up question if you still don't know the answer or don't find specific information needed to answer the user inquiry \
193
+ return I could not find the information. \
194
+ Ensure that the response contain all relevant context needed to interpret them -
195
+ in other words don't extract small snippets that are missing important context.
196
+ Format the output as top-like string formatted with the most appropriate style to make it clear, concise and user-friendly for a chatbot response.
197
+ Here is the question: {user_question}
198
+ Here is the context: ```{cdocs}```
199
+ """
200
+ STRATEGY_TEMPLATE = TEMPLATE_HEADER + strategy_template + default_template
201
+ APPLICATION_TEMPLATE = TEMPLATE_HEADER + application_template + default_template
202
+ RECOMMENDATION_TEMPLATE = TEMPLATE_HEADER + recommendation_template + default_template
203
+ GENERAL_TEMPLATE = TEMPLATE_HEADER + general_template + default_template
204
+
205
+
206
+ ###############################################
207
+ ##COLLECTION of prompts for conversation memory
208
+ ###############################################
209
+
210
+ _template = """Given the following conversation and a follow up question,\
211
+ rephrase the follow up question to be a standalone question, in its original language.\
212
+ Chat History:
213
+ {chat_history}
214
+ Follow Up Input: {user_question}
215
+ Standalone question:"""
216
+
217
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
218
+ DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
219
+
220
+
221
+ def _combine_documents(
222
+ docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
223
+ ):
224
+ doc_strings = [format_document(doc, document_prompt) for doc in docs]
225
+
226
+ return document_separator.join(doc_strings)
227
+
228
+
229
+ ##################################################
230
+ ##COLLECTION of prompts - RAG query transformation
231
+ ##################################################
232
+ ## Multi Query
233
+ # Prompt
234
+ multiquery_template = """You are an AI Enterprise Architect language model assistant. Your task is to generate five
235
+ different versions of the given user question to retrieve relevant documents from a vector
236
+ database. By generating multiple perspectives on the user question, your goal is to help
237
+ the user overcome some of the limitations of the distance-based similarity search.
238
+ Provide these alternative questions separated by newlines. Original question: {standalone_question}"""
239
+
240
+ decomposition_template = """You are a helpful enterprise architect assistant that generates multiple sub-questions related to an input question. \n
241
+ The goal is to break down the input into a set of sub-problems / sub-questions that can be answers in isolation. \n
242
+ Generate multiple search queries related to: {user_question} \n
243
+ Output (3 queries):"""
244
+
245
+ decomposition_answer_recursevely_template = """
246
+ Here is the question you need to answer:
247
+
248
+ \n --- \n {question} \n --- \n
249
+
250
+ Here is any available background question + answer pairs:
251
+
252
+ \n --- \n {q_a_pairs} \n --- \n
253
+
254
+ Here is additional context relevant to the question:
255
+
256
+ \n --- \n {context} \n --- \n
257
+
258
+ Use the above context and any background question + answer pairs to answer the question: \n {user_question}
259
+ """
260
+
261
+ rag_fusion_questions_template = """You are a helpful enterprise architect assistant that generates multiple search queries based on a single input query. \n
262
+ Generate multiple search queries related to: {standalone_question} \n
263
+ Output (4 queries):"""
264
+
265
+ # Few Shot Examples
266
+ few_shot_step_back_examples = [
267
+ {
268
+ "input": "Could the members of The Police perform lawful arrests?",
269
+ "output": "what can the members of The Police do?",
270
+ },
271
+ {
272
+ "input": "Jan Sindel was born in what country?",
273
+ "output": "what is Jan Sindel personal history?",
274
+ },
275
+ ]
276
+ # We now transform these to example messages
277
+ few_shot_step_back_examples_prompt = ChatPromptTemplate.from_messages(
278
+ [
279
+ ("human", "{input}"),
280
+ ("ai", "{output}"),
281
+ ]
282
+ )
283
+ few_shot_prompt = FewShotChatMessagePromptTemplate(
284
+ input_variables=["standalone_question"],
285
+ example_prompt=few_shot_step_back_examples_prompt,
286
+ examples=few_shot_step_back_examples,
287
+ )
288
+ few_shot_step_back_prompt = ChatPromptTemplate.from_messages(
289
+ [
290
+ (
291
+ "system",
292
+ """You are an expert at enterprise architecture world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
293
+ ),
294
+ # Few shot examples
295
+ few_shot_prompt,
296
+ # New question
297
+ ("user", "{standalone_question}"),
298
+ ]
299
+ )
300
+ # Response prompt
301
+ step_back_response_prompt_template = """You are an expert of enterprise architecture world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
302
+
303
+ # {normal_context}
304
+ # {step_back_context}
305
+
306
+ # Original Question: {standalone_question}
307
+ """
308
+
309
+ # HyDE document generation
310
+ hyde_template = """Please write an architecture scientific passage to answer the question
311
+ Question: {standalone_question}
312
+ Passage:"""
313
+
314
+ ##################################################
315
+ ##COLLECTION of prompts - Agentic Workflows
316
+ ##################################################
317
+ #Agent system prompt
318
+ #System prompt embedded into human prompt
319
+ awqa_human_message = HumanMessage(content=[
320
+ {"type": "text", "text": "{user_question}"},
321
+ {"type": "text", "text": "You are a helpful AI assistant, collaborating with other assistants."},
322
+ {"type": "text", "text": "{system_message}"},
323
+ {"type": "text", "text": " Use the provided tools to progress towards answering the question."},
324
+ {"type": "text", "text": " You have access to the following tools: {tool_names}."},
325
+ ])
326
+
327
+ awqa_template = ChatPromptTemplate.from_messages(
328
+ [
329
+ (
330
+ "human",
331
+ "You are a helpful AI assistant, collaborating with other assistants."
332
+ "{system_message}"
333
+ " Use the provided tools to progress towards answering the question: {user_question}"
334
+ " You have access to the following tools: {tool_names}."
335
+ ),
336
+ MessagesPlaceholder(variable_name="messages"),
337
+ ]
338
+ )
339
+
340
+ #DiagramV2T
341
+ diagramV2T_question = "How this architecture solution meets quality standards and alignment with architectural best practices?"
342
+ diagramV2T_template = """An image will be passed to you. Please explain how it meets quality standards and alignment with architecture best practices."""
343
+ agent_diagram_v2t_system_message = diagramV2T_template
344
+
345
+ #DiagramType
346
+ diagram_type_question = "What is this diagram type? Is a flowchart, C4, sequence-diagram, data flow or any other?"
347
+ diagramType_template = """An image will be passed to you. Identify the type of architecture diagram this image is.
348
+ For example, flowchart, C4, sequence flow, data flow, or other.
349
+
350
+ If a type of diagram is not identified that's fine! Just return a that is was not possible to identify the architectural diagram style in this image.
351
+
352
+ Do not make up or guess ANY extra information. Only extract what exactly diagram type is the images.
353
+ """
354
+
355
+ agent_diagram_type_system_message = diagramType_template
356
+
357
+ #DiagramComponents
358
+ diagram_component_question = "Please list all components that are part of this current solution architecture"
359
+ diagramComponent_template = """An image will be passed to you. Extract from it all components identified in this image.
360
+ For example, application, software, connector, relationship, user, name, microservice, middeware, container or other.
361
+
362
+ If no components are identified that's fine - you don't need to extract any! Just return an empty list.
363
+
364
+ Do not make up or guess ANY extra information. Only extract what exactly is in the images.
365
+ """
366
+
367
+ agent_diagram_components_system_message = diagramComponent_template
368
+
369
+ #DiagramRiskVulnerabilityMitigation
370
+ diagram_risk_question = "What are the potential risks and vulnerabilities in this current solution architecture, and how can we mitigate them?"
371
+ diagramRVM_template = """An image will be passed to you. Extract from it potential risks and vulnerabilities along with mitigation strategy in current solution architecture.
372
+
373
+ For example, risk: SQL injection, description: application A connected to MySQL database, mitigation: Use prepared
374
+ statements and parameterised queries to handle user input. Also, implement input validation and sanitisation to prevent malicious input from being processed.
375
+
376
+ If no risks, vulnerabilities or mitigation strategy are identified that's fine - you don't need to extract any! Just return an empty list.
377
+
378
+ Do not make up or guess ANY extra information. Only extract what exactly is in the image.
379
+ """
380
+
381
+ agent_diagram_rvm_system_message = diagramRVM_template
382
+
383
+ #DiagramPatternsStandardsBestPractices
384
+ diagram_pattern_question = "Please describe well-architected patterns, standards and best practices that can be applied to the current solution architecture."
385
+ diagramPSBP_template = """An image will be passed to you.
386
+ List well-architected standards, patterns or best-practices that can be applied to the current solution architecture.
387
+ """
388
+ agent_diagram_psbp_system_message = diagramPSBP_template
389
+
390
+ #DiagramVisualQuestionAnswerer Prompts
391
+ diagramVQA_question = """Please describe this diagram"""
392
+ diagramVQA_template = """An image will be passed to you. It should be a flowchart or diagram. Please answer the user question."""
393
+ agent_diagram_vqa_system_message = diagramVQA_template
ea4all/src/shared/state.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Shared functions for state management."""
2
+
3
+ import hashlib
4
+ import uuid
5
+ from typing import Any, Literal, Optional, Union
6
+
7
+ from langgraph.graph import MessagesState
8
+ from langchain_core.documents import Document
9
+
10
+ class State(MessagesState):
11
+ next: Optional[str]
12
+ user_feedback: Optional[str]
13
+
14
+ def _generate_uuid(page_content: str) -> str:
15
+ """Generate a UUID for a document based on page content."""
16
+ md5_hash = hashlib.md5(page_content.encode()).hexdigest()
17
+ return str(uuid.UUID(md5_hash))
18
+
19
+ def reduce_docs(
20
+ existing: Optional[list[Document]],
21
+ new: Union[
22
+ list[Document],
23
+ list[dict[str, Any]],
24
+ list[str],
25
+ str,
26
+ Literal["delete"],
27
+ ],
28
+ ) -> list[Document]:
29
+ """Reduce and process documents based on the input type.
30
+
31
+ This function handles various input types and converts them into a sequence of Document objects.
32
+ It can delete existing documents, create new ones from strings or dictionaries, or return the existing documents.
33
+ It also combines existing documents with the new one based on the document ID.
34
+
35
+ Args:
36
+ existing (Optional[Sequence[Document]]): The existing docs in the state, if any.
37
+ new (Union[Sequence[Document], Sequence[dict[str, Any]], Sequence[str], str, Literal["delete"]]):
38
+ The new input to process. Can be a sequence of Documents, dictionaries, strings, a single string,
39
+ or the literal "delete".
40
+ """
41
+ if new == "delete":
42
+ return []
43
+
44
+ existing_list = list(existing) if existing else []
45
+ if isinstance(new, str):
46
+ return existing_list + [
47
+ Document(page_content=new, metadata={"uuid": _generate_uuid(new)})
48
+ ]
49
+
50
+ new_list = []
51
+ if isinstance(new, list):
52
+ existing_ids = set(doc.metadata.get("uuid") for doc in existing_list)
53
+ for item in new:
54
+ if isinstance(item, str):
55
+ item_id = _generate_uuid(item)
56
+ new_list.append(Document(page_content=item, metadata={"uuid": item_id}))
57
+ existing_ids.add(item_id)
58
+
59
+ elif isinstance(item, dict):
60
+ metadata = item.get("metadata", {})
61
+ item_id = metadata.get("uuid") or _generate_uuid(
62
+ item.get("page_content", "")
63
+ )
64
+
65
+ if item_id not in existing_ids:
66
+ new_list.append(
67
+ Document(**{**item, "metadata": {**metadata, "uuid": item_id}})
68
+ )
69
+ existing_ids.add(item_id)
70
+
71
+ elif isinstance(item, Document):
72
+ item_id = item.metadata.get("uuid", "")
73
+ if not item_id:
74
+ item_id = _generate_uuid(item.page_content)
75
+ new_item = item.copy(deep=True)
76
+ new_item.metadata["uuid"] = item_id
77
+ else:
78
+ new_item = item
79
+
80
+ if item_id not in existing_ids:
81
+ new_list.append(new_item)
82
+ existing_ids.add(item_id)
83
+
84
+ return existing_list + new_list
ea4all/src/shared/utils.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Shared utility functions used in the project.
2
+
3
+ Functions:
4
+
5
+ """
6
+
7
+ import os
8
+ import datetime
9
+ import getpass
10
+ import base64
11
+ import json
12
+ import re
13
+ from dotenv import load_dotenv, find_dotenv
14
+ import markdown
15
+ from markdownify import markdownify as md2text
16
+ from io import BytesIO
17
+ import pandas as pd
18
+
19
+ from pydantic import BaseModel, SecretStr
20
+
21
+ from langchain_community.vectorstores import Chroma
22
+ from langchain import hub
23
+ from langchain_core.prompts import PromptTemplate
24
+
25
+ #Model & Index & Embeddings
26
+ from langchain_openai import (
27
+ ChatOpenAI,
28
+ )
29
+
30
+ from langchain_core.output_parsers import (
31
+ PydanticOutputParser,
32
+ )
33
+
34
+ from langchain_core.messages import (
35
+ AIMessage,
36
+ HumanMessage,
37
+ get_buffer_string,
38
+ )
39
+
40
+ from PIL import Image
41
+
42
+ from ea4all.src.shared.prompts import (
43
+ LLAMA31_CHAT_PROMPT_FORMAT,
44
+ )
45
+
46
+ from ea4all.src.shared.configuration import BaseConfiguration as ea4all_config
47
+
48
+ ############
49
+ ##INIT model
50
+ ############
51
+ #initialise model / constants
52
+ EA4ALL_ARCHITECTURE = "ea4all_architecture.png"
53
+ EA4ALL_PODCAST = "ea4all_podcast.wav"
54
+
55
+ class CFG:
56
+ # Constants
57
+ EA4ALL_OVERVIEW = "ea4all_overview.png"
58
+ EA4ALL_ABOUT = "ea4all_overview.txt"
59
+ APM_MOCK_QNA = "apm_qna_mock.txt"
60
+ STREAM_SLEEP = 0.05
61
+ REGEX_BACKTICKS = r"```(.*?)```"
62
+
63
+ # LLMs
64
+ #model = {"gpt-4":'gpt-4o-mini', "gpt-4o":'gpt-4o'}
65
+ #llama = {"11": "meta-llama/llama-3.2-11B-Vision-Instruct", "90":"meta-llama/llama-3.2-90B-Vision-Instruct", "70":"meta-llama/Llama-3.1-70B-Instruct", "73":"meta-llama/Llama-3.3-70B-Instruct"}
66
+
67
+ #hf_model="meta-llama/Llama-3.1-70B-Instruct"
68
+ #hf_api_base="https://api-inference.huggingface.co/models/"
69
+
70
+ #hf_max_tokens=16192
71
+ #max_new_tokens = 4096
72
+ #llama32_max_tokens = 4096 ##TOKEN ISSUE LLAMA-3.2 w/ ChatOpenAI not working tokens > 4096 2024-10-13
73
+
74
+ #temperature = 0
75
+ top_p = 0.95
76
+ repetition_penalty = 1.15
77
+
78
+ # splitting
79
+ split_chunk_size = 500
80
+ split_overlap = 0
81
+
82
+ # embeddings
83
+ #embeddings_model = OpenAIEmbeddings()
84
+
85
+ # similar passages
86
+ k = 3
87
+
88
+ #debug
89
+ verbose=True
90
+
91
+ #streamming
92
+ #streamming=True
93
+
94
+ #VQA resized images - maximum resolution for Llama-3.2
95
+ RESIZE_TO = 512
96
+ MAX_WIDTH = 1024
97
+ MAX_HEIGHT = 768
98
+
99
+ ##Diagrams format
100
+ diagram_format = "png"
101
+
102
+ # paths ea4all/src/tools
103
+ #apm_store = "/Users/avfranco/Documents/GitHub/ea4all-agentic-staging/ea4all/apm_store/"
104
+ #apm_path = apm_store + 'APM-ea4all (test-split).xlsx'
105
+ #dbr_demo = apm_store + "reference_architecture_dbr_demo.txt"
106
+
107
+ #'ea4all_images = "/Users/avfranco/Documents/GitHub/ea4all-agentic-staging/ea4all/images/"
108
+
109
+ #apm_faiss = "apm_store"
110
+ #faiss_index = 'apm_faiss_index'
111
+
112
+ ###################################
113
+ ##COLLECTION of re-usable functions
114
+ ###################################
115
+
116
+ #return current date-time
117
+ def _get_datetime():
118
+ now = datetime.datetime.now()
119
+ return now.strftime("%m/%d/%Y, %H:%M:%S")
120
+
121
+ def _get_formatted_date():
122
+ current_date = datetime.datetime.now()
123
+ formatted_date = current_date.strftime("%d %B %Y")
124
+
125
+ return formatted_date
126
+
127
+ #calculate dif end-start execution
128
+ def time_elapsed(start,end):
129
+ time_elapsed = int(round(end - start, 0))
130
+ time_elapsed_str = f'{time_elapsed}'
131
+
132
+ return time_elapsed_str
133
+
134
+ def _join_paths(*paths):
135
+ """
136
+ Join two or more paths using os.path.join.
137
+
138
+ Parameters:
139
+ *paths: str
140
+ Two or more path components to be joined.
141
+
142
+ Returns:
143
+ str
144
+ The joined path.
145
+ """
146
+ return os.path.join(*paths)
147
+
148
+ #get user request info
149
+ def get_user_identification(request):
150
+ if request:
151
+ try:
152
+ user_pip = request.headers.get('X-Forwarded-For')
153
+ return user_pip.split(',')[0]
154
+ except Exception:
155
+ print(f"user info: {request}")
156
+ return request.client.host
157
+
158
+ return "ea4all_agent"
159
+
160
+ #Initialise model
161
+ ## SETUP LLM CLIENT
162
+ def get_llm_client(model, api_base_url=None,temperature=0, streaming=False, tokens=ea4all_config.max_tokens) -> ChatOpenAI:
163
+ """Initializes and returns a ChatOpenAI client based on the specified model and parameters."""
164
+ client = ChatOpenAI()
165
+
166
+ if model.startswith("gpt-"):
167
+ client = ChatOpenAI(
168
+ model=model,
169
+ temperature=temperature,
170
+ streaming=streaming,
171
+ max_completion_tokens=tokens,
172
+ stream_usage=True
173
+ )
174
+ elif "llama" in model.lower(): # Meta-llama models
175
+ client = ChatOpenAI(
176
+ model=model,
177
+ api_key=SecretStr(os.environ['HUGGINGFACEHUB_API_TOKEN']),
178
+ base_url=_join_paths(api_base_url, model, "v1/"),
179
+ temperature=temperature,
180
+ streaming=streaming,
181
+ max_completion_tokens=tokens,
182
+ stream_usage=True,
183
+ )
184
+
185
+ return client
186
+
187
+ #load local env variables
188
+ def load_local_env(local):
189
+ ###read local .env file
190
+ _ = load_dotenv(find_dotenv())
191
+ if local not in os.environ:
192
+ os.environ[local] = getpass.getpass(f"Provide your {local} Key")
193
+ return os.environ[local]
194
+
195
+ #locad landscape data into chroma
196
+ def load_to_chroma(documents, embeddings, path, collection_name="apm_collection"):
197
+ #Read chromadb chroma-apm-db
198
+ chroma_collection = Chroma (
199
+ collection_name=collection_name,
200
+ persist_directory=path,
201
+ embedding_function=embeddings
202
+ )
203
+
204
+ if chroma_collection._collection.count():
205
+ chroma_collection.delete_collection()
206
+ else:
207
+ #Add apm records
208
+ chroma_collection = Chroma.from_documents(
209
+ collection_name=collection_name,
210
+ persist_directory=path,
211
+ documents=documents,
212
+ embedding=embeddings
213
+ )
214
+ chroma_collection.persist()
215
+
216
+ return chroma_collection
217
+
218
+ ##Convert gradio chat_history to langchain chat_history_format
219
+ def get_history_gradio(history, chat_history=[]):
220
+ history_langchain_format = []
221
+ #triggered by loaded memory runnable to replace ConversationMemoryBuffer.load_memory_variables
222
+ #if chat_history or not history:
223
+ # memory = chat_history
224
+ #triggered by loaded_memory runnable
225
+ #else:
226
+ history = history["chat_memory"]
227
+
228
+ for human, ai in history:
229
+ history_langchain_format.append(HumanMessage(content=human))
230
+ history_langchain_format.append(AIMessage(content=ai))
231
+
232
+ history = {"history":get_buffer_string(history_langchain_format)}
233
+
234
+ return history
235
+
236
+ #retrieve relevant questions based on user interaction
237
+ def get_vqa_examples() -> list:
238
+ examples=[
239
+ {"text": "Describe this image.", "files": ["ea4all/images/multi-app-architecture.png"]},
240
+ {"text": "Assess any risk and vulnerabilities in the current solution.", "files": ["ea4all/images/ea4all_architecture.png"]},
241
+ ]
242
+ return examples
243
+
244
+ # Function to encode the image
245
+ def encode_image(image_path):
246
+ with open(image_path, "rb") as image_file:
247
+ return base64.b64encode(image_file.read()).decode('utf-8')
248
+
249
+ def resize_image_1(raw_image, input_size):
250
+ w, h = raw_image.size
251
+ scale = input_size / max(w, h)
252
+ new_w = int(w * scale)
253
+ new_h = int(h * scale)
254
+ resized_image = raw_image.resize((new_w, new_h))
255
+
256
+ return resized_image
257
+
258
+ def resize_image_2(image, width):
259
+ wpercent = width / float(image.size[0])
260
+ hsize = int( float(image.size[1]) * wpercent )
261
+ raw_image = image.resize([width, hsize])
262
+
263
+ return raw_image
264
+
265
+ def resize_image_3(image):
266
+ from PIL import Image
267
+ # Get the current size
268
+ width, height = image.size
269
+
270
+ # Calculate the new size maintaining the aspect ratio
271
+ if width > CFG.MAX_WIDTH or height > CFG.MAX_HEIGHT:
272
+ ratio = min(CFG.MAX_WIDTH / width, CFG.MAX_HEIGHT / height)
273
+ new_width = int(width * ratio)
274
+ new_height = int(height * ratio)
275
+ else:
276
+ new_width, new_height = width, height
277
+
278
+ # Resize the image
279
+ image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
280
+
281
+ # Return new resized image
282
+ return image
283
+
284
+ #Encode PIL.Image to base64
285
+ def encode_raw_image(raw_image):
286
+ # Create a BytesIO buffer
287
+ buffer = BytesIO()
288
+
289
+ # Save the image to the buffer in PNG format
290
+ raw_image.save(buffer, format='PNG')
291
+
292
+ # Get the content of the buffer
293
+ img_bytes = buffer.getvalue()
294
+
295
+ # Encode the bytes to base64
296
+ img_base64 = base64.b64encode(img_bytes)
297
+
298
+ # Convert the bytes to string
299
+ img_str = img_base64.decode('utf-8')
300
+
301
+ return img_str
302
+
303
+ #Return a raw image ready to OpenAI GPT4-Vision
304
+ def get_raw_image(image_path):
305
+ # Open & Resize & Encode image
306
+ diagram = Image.open(image_path)
307
+ w, h = diagram.size
308
+ if w > CFG.RESIZE_TO or h > CFG.RESIZE_TO:
309
+ resized_image = resize_image_3(diagram)
310
+ else:
311
+ resized_image = diagram
312
+
313
+ #Encode diagram
314
+ raw_image = encode_raw_image(resized_image)
315
+
316
+ return raw_image
317
+
318
+ def load_mock_content(file_path) -> str:
319
+ try:
320
+ with open(_join_paths(ea4all_config.ea4all_store,file_path), "r") as file:
321
+ content = file.read()
322
+ return content
323
+ except ValueError as e:
324
+ return str(e)
325
+
326
+ def print_json_to_md(data, indent=0, column=None):
327
+ try:
328
+ result = ""
329
+ header = ""
330
+ body = ""
331
+ if isinstance(data, dict):
332
+ for key, value in data.items():
333
+ result += print_json_to_md(value, indent + 2, key)
334
+ return result
335
+ elif isinstance(data, list):
336
+ if column: # Print list items as a Markdown table
337
+ header = ' ' * indent + f"| {' | '.join(data[0].keys())} | \n"
338
+ header += ' ' * indent + f"| {' | '.join(['---'] * len(data[0]))} | \n"
339
+ for item in data:
340
+ body += ' ' * indent + f"\n\n | {' | '.join(str(item[k]) for k in item.keys())} |"
341
+ result += header + body
342
+ return result
343
+ else:
344
+ for item in data:
345
+ header = ' ' * indent + f"| {' | '.join(data[0].keys())} |"
346
+ body += ' ' * indent + f"\n\n | {' | '.join(str(item[k]) for k in item.keys())} |"
347
+ result += header + "\n" + body
348
+ return result
349
+ else:
350
+ header += ' ' * indent + f"| {column} "
351
+ body += f"{str(data)}\n\n"
352
+ result += header + body
353
+ return result
354
+
355
+ except Exception as e:
356
+ return f"{e} - {data}"
357
+
358
+ def markdown_to_plain_text(md):
359
+ # Convert Markdown to HTML
360
+ html = markdown.markdown(md)
361
+ # Convert HTML to plain text using markdownify
362
+ plain_text = md2text(html)
363
+ return plain_text
364
+
365
+ def extract_structured_output(response):
366
+ ##EXTRACT Topic from the content
367
+ try:
368
+ return json.loads(response)
369
+ except ValueError:
370
+ match = re.search(CFG.REGEX_BACKTICKS, response, re.DOTALL)
371
+
372
+ if match:
373
+ return json.loads(match.group(1))
374
+ else:
375
+ return None
376
+
377
+ def get_predicted_num_tokens(llm, content):
378
+ return llm.get_num_tokens(content)
379
+
380
+ def get_predicted_num_tokens_from_prompt(llm, prompt, values):
381
+ final_prompt = prompt.format(**values)
382
+ return llm.get_num_tokens(final_prompt)
383
+
384
+ def set_max_new_tokens(predicted_tokens):
385
+ #Return max new tokens to be generated
386
+ return int((ea4all_config.max_tokens - predicted_tokens) * 0.95)
387
+
388
+ def escape_special_characters(input_string):
389
+ # Use json.dumps to escape special characters
390
+ escaped_string = json.dumps(input_string)
391
+ # Remove the surrounding double quotes added by json.dumps
392
+ return escaped_string[1:-1]
393
+
394
+ def clean_and_load_json(content) -> dict:
395
+ try:
396
+ json_data = json.loads(content)
397
+ return json_data
398
+ except ValueError:
399
+ clean_string = content.replace("\n","").replace("json","")
400
+ json_data = json.loads(clean_string)
401
+ return json_data
402
+
403
+ def extract_response_from_backticks(response):
404
+ pattern = r"```(.*?)```"
405
+ match = re.search(pattern, str(response), re.DOTALL)
406
+
407
+ return match.group(1) if match else response
408
+
409
+ def extract_topic_from_business_input(response) -> dict:
410
+ ##IS JSON already
411
+ if isinstance(response, dict):
412
+ return response
413
+
414
+ ##EXTRACT Topic from the content
415
+ topic = extract_response_from_backticks(response)
416
+
417
+ return clean_and_load_json(topic)
418
+
419
+ ## LLM STRUCTURED OUTPUT Helper functions
420
+ def extract_landscape(topic):
421
+ # Prompt
422
+ extract_landscape_prompt = hub.pull('learn-it-all-do-it-all/ea4all_togaf_landscape_business_query')
423
+
424
+ # Set up a parser: LandscapeAsIs
425
+ parser = PydanticOutputParser(pydantic_object=topic)
426
+
427
+ final_prompt = extract_landscape_prompt.partial(
428
+ format_instructions=parser.get_format_instructions(),
429
+ ai_output=LLAMA31_CHAT_PROMPT_FORMAT,
430
+ )
431
+
432
+ return final_prompt
433
+
434
+ def extract_principles(topic):
435
+
436
+ # Set up a parser: LandscapeAsIs
437
+ parser = PydanticOutputParser(pydantic_object=topic)
438
+
439
+ #PROMPT REVISED TO WORK w/ Llama-3
440
+ principle_template = """Identify the list of principles and its meaning from the given context.
441
+ Do not add any superfluous information.
442
+ Context: \n {strategic_principles} \n
443
+ Output your answer as JSON that matches the given schema and nothing else: \n{format_instructions}\n
444
+ """
445
+
446
+ prompt = PromptTemplate(
447
+ template=principle_template,
448
+ input_variables=["strategic_principles"],
449
+ partial_variables={
450
+ "format_instructions": parser.get_format_instructions(),
451
+ },
452
+ )
453
+
454
+ return prompt
455
+
456
+ # Task-1: Identify the business requirements, objectives, user journey, and all other relevant information
457
+ def extract_detailed_business_requirements(llm, topic: type[BaseModel], name:str, values:dict):
458
+ parser = PydanticOutputParser(pydantic_object=topic)
459
+
460
+ hub_prompt = hub.pull('learn-it-all-do-it-all/ea4all_extract_business_topic')
461
+ hub_prompt = hub_prompt.partial(
462
+ topic=name,
463
+ format_instructions=parser.get_format_instructions(),
464
+ )
465
+
466
+ task_1_requirement = hub_prompt | llm | parser
467
+ response = task_1_requirement.invoke(
468
+ input=values,
469
+ config={
470
+ 'tags': ['assess_business_query'],
471
+ 'run_name': name # Custom run name
472
+ }
473
+ )
474
+
475
+ return response
476
+
477
+ # Post-processing
478
+ def format_docs(docs):
479
+ return "\n".join(doc.page_content for doc in docs)
480
+
481
+ #load mock data
482
+ def get_relevant_questions(source: str) -> list:
483
+ relevant_questions = []
484
+ mock = load_mock_content(source)
485
+ for line in mock.splitlines(): relevant_questions += [line]
486
+
487
+ return relevant_questions
ea4all/src/shared/vectorstore.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_core.runnables import RunnableConfig
2
+ from langchain.docstore.document import Document
3
+ from langchain_core.embeddings import Embeddings
4
+ from langchain_core.vectorstores import VectorStoreRetriever
5
+
6
+ import ast
7
+ import numpy as np
8
+ import pandas as pd
9
+ from contextlib import contextmanager
10
+ from typing import Generator
11
+
12
+ from ea4all.src.shared.utils import _join_paths
13
+ from ea4all.src.shared.configuration import BaseConfiguration
14
+
15
+ global _vectorstore
16
+ _vectorstore = None
17
+
18
+ def make_text_encoder(model: str) -> Embeddings:
19
+ """Connect to the configured text encoder."""
20
+ provider, model = model.split("/", maxsplit=1)
21
+ match provider:
22
+ case "openai":
23
+ from langchain_openai import OpenAIEmbeddings
24
+
25
+ return OpenAIEmbeddings(model=model)
26
+ case _:
27
+ raise ValueError(f"Unsupported embedding provider: {provider}")
28
+
29
+ @contextmanager
30
+ def make_faiss_retriever(
31
+ configuration: BaseConfiguration, embeddings: Embeddings
32
+ ) -> Generator[VectorStoreRetriever, None, None]:
33
+ """Configure this agent to connect to a FAISS index & namespaces."""
34
+ from langchain_community.docstore.in_memory import InMemoryDocstore
35
+ from langchain_community.vectorstores import FAISS
36
+ import faiss
37
+
38
+ global _vectorstore
39
+
40
+ if _vectorstore is None:
41
+ try:
42
+ _vectorstore = FAISS.load_local(
43
+ folder_path=configuration.ea4all_store,
44
+ embeddings=embeddings,
45
+ index_name=configuration.apm_faiss,
46
+ allow_dangerous_deserialization=True)
47
+
48
+ except Exception as e:
49
+ # Create an empty index
50
+ index = faiss.IndexFlatL2(len(embeddings.embed_query("")))
51
+
52
+ #Initialize an empty FAISS vectorstore
53
+ _vectorstore = FAISS(
54
+ embedding_function=embeddings,
55
+ index=index,
56
+ docstore=InMemoryDocstore(),
57
+ index_to_docstore_id={},
58
+ )
59
+ #apm_docs = get_apm_excel_content(configuration)
60
+ #_vectorstore = FAISS.from_documents(apm_docs, embeddings)
61
+ #_vectorstore.save_local(folder_path=configuration.ea4all_store, index_name=configuration.apm_faiss,)
62
+
63
+ search_kwargs = configuration.search_kwargs
64
+
65
+ yield _vectorstore.as_retriever(search_type="similarity", search_kwargs=search_kwargs)
66
+
67
+ @contextmanager
68
+ def make_retriever(
69
+ config: RunnableConfig,
70
+ ) -> Generator[VectorStoreRetriever, None, None]:
71
+ """Create a retriever for the agent, based on the current configuration."""
72
+ configuration = BaseConfiguration.from_runnable_config(config)
73
+ embeddings = make_text_encoder(configuration.embedding_model)
74
+ match configuration.retriever_provider:
75
+ case "faiss":
76
+ with make_faiss_retriever(configuration, embeddings) as retriever:
77
+ yield retriever
78
+
79
+ case _:
80
+ raise ValueError(
81
+ "Unrecognized retriever_provider in configuration. "
82
+ f"Expected one of: {', '.join(BaseConfiguration.__annotations__['retriever_provider'].__args__)}\n"
83
+ f"Got: {configuration.retriever_provider}"
84
+ )
85
+
86
+ #convert dataframe to langchain document structure, added user_ip
87
+ def panda_to_langchain_document(dataframe,user_ip):
88
+ # create an empty list to store the documents
89
+ apm_documents = []
90
+ # iterate over the rows of the dataframe
91
+ for index, row in dataframe.iterrows():
92
+ # create a document object from the row values for all df columns
93
+ page_content = ""
94
+ application = ""
95
+ capability = ""
96
+ description = ""
97
+ fit = ""
98
+ roadmap = ""
99
+ for column in dataframe.columns:
100
+ column = ' '.join(column.split())
101
+ page_content += f" {column}:{row[column]}"
102
+ if 'application' in column.lower(): application = row[column]
103
+ elif 'capabilit' in column.lower(): capability = row[column]
104
+ elif 'desc' in column.lower(): description = row[column]
105
+ elif 'business fit' in column.lower(): fit = row[column]
106
+ elif 'roadmap' in column.lower(): roadmap = row[column]
107
+ doc = Document(
108
+ page_content=page_content,
109
+ metadata={
110
+ "source": application,
111
+ "capability": capability,
112
+ "description": description,
113
+ "business fit": fit,
114
+ "roadmap": roadmap,
115
+ "row_number": index, "namespace": user_ip}
116
+ )
117
+ # append the document object to the list
118
+ apm_documents.append(doc)
119
+ return(apm_documents)
120
+
121
+ #local landscape data (excel file)
122
+ def apm_dataframe_loader(file):
123
+ pd.set_option('display.max_colwidth', None)
124
+ df = pd.read_excel(file)
125
+ df = df.dropna(axis=0, how='all')
126
+ df = df.dropna(axis=1, how='all')
127
+ df.fillna('NaN')
128
+
129
+ return df
130
+
131
+ ##New APM Excel loader
132
+ #Removed df from return
133
+ def get_apm_excel_content(config:RunnableConfig, file=None, user_ip="ea4all_agent"):
134
+
135
+ if file is None:
136
+ file = _join_paths(
137
+ getattr(config, "ea4all_store", BaseConfiguration.ea4all_store),
138
+ getattr(config, "apm_catalogue", BaseConfiguration.apm_catalogue)
139
+ )
140
+
141
+ #load file into dataframe
142
+ df = apm_dataframe_loader(file)
143
+ #add user_id into df
144
+ df['namespace'] = user_ip
145
+
146
+ apm_docs = panda_to_langchain_document(df, user_ip)
147
+ return apm_docs
148
+
149
+ def remove_user_apm_faiss(config, db, ea4all_user):
150
+ #apm_vectorstore.docstore.__dict__["_dict"][apm_vectorstore.index_to_docstore_id[0]].metadata
151
+
152
+ #check if user's uploaded any apm before
153
+ byod = ea4all_user in str(db.docstore._dict.values())
154
+
155
+ #if yes
156
+ if byod:
157
+ removed_ids = []
158
+ for id, doc in db.docstore._dict.items():
159
+ if doc.metadata['namespace'] == ea4all_user:
160
+ removed_ids.append(id)
161
+
162
+ ##save updated index
163
+ if removed_ids:
164
+ index_ids = [
165
+ i_id
166
+ for i_id, d_id in db.index_to_docstore_id.items()
167
+ if d_id in removed_ids
168
+ ]
169
+ #Remove ids from docstore
170
+ db.delete(ids=removed_ids)
171
+ #Remove the corresponding embeddings from the FAISS index
172
+ db.index.remove_ids(np.array(index_ids,dtype=np.int64))
173
+ #Reorg embeddings
174
+ db.index_to_docstore_id = {
175
+ i: d_id
176
+ for i, d_id in enumerate(db.index_to_docstore_id.values())
177
+ }
178
+ #save updated index
179
+ db.save_local(folder_path=config.ea4all_store, index_name=config.apm_faiss)
180
+
181
+ #Get faiss index as a retriever
182
+ def retriever_faiss(db, user_ip="ea4all_agent"):
183
+ ##size: len(retriever.vectorstore.index_to_docstore_id), retriever.vectorstore.index.ntotal
184
+
185
+ #check if user's BYOData
186
+ byod = user_ip in str(db.docstore._dict.values())
187
+
188
+ if byod==False:
189
+ namespace="ea4all_agent"
190
+ else:
191
+ namespace = user_ip
192
+
193
+ retriever = db.as_retriever(search_type="similarity",
194
+ search_kwargs={'k': 50, 'score_threshold': 0.8, 'filter': {'namespace':namespace}})
195
+
196
+ return retriever
ea4all/src/tools/tools.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal, Annotated
2
+ from typing_extensions import TypedDict
3
+ import json
4
+ import tempfile
5
+ import os
6
+
7
+ from langchain_core.runnables import RunnableLambda, RunnableConfig
8
+
9
+ from langgraph.graph import END
10
+ from langgraph.types import Command
11
+ from langgraph.prebuilt import InjectedState
12
+
13
+ from langchain_community.utilities import BingSearchAPIWrapper
14
+ from langchain_community.tools.bing_search.tool import BingSearchResults
15
+ from langchain_community.document_loaders import JSONLoader
16
+
17
+ from langchain.agents import tool
18
+
19
+ from ea4all.src.shared.configuration import (
20
+ BaseConfiguration
21
+ )
22
+
23
+ from ea4all.src.shared.state import (
24
+ State
25
+ )
26
+
27
+ from ea4all.src.shared.utils import (
28
+ get_llm_client,
29
+ format_docs,
30
+ )
31
+
32
+ def make_supervisor_node(config: RunnableConfig, members: list[str]) -> RunnableLambda:
33
+ options = ["FINISH"] + members
34
+ system_prompt = (
35
+ "You are a supervisor tasked with managing a conversation between the"
36
+ f" following workers: {members}. Given the following user request,"
37
+ " respond with the worker to act next. Each worker will perform a"
38
+ " task and respond with their results and status. When finished,"
39
+ " respond with FINISH."
40
+ )
41
+
42
+ configuration = BaseConfiguration.from_runnable_config(config)
43
+ model = get_llm_client(
44
+ configuration.supervisor_model,
45
+ api_base_url="",
46
+ )
47
+
48
+ class Router(TypedDict):
49
+ """Worker to route to next. If no workers needed, route to FINISH."""
50
+
51
+ next: Literal[*options]
52
+
53
+ def supervisor_node(state: State) -> Command[Literal[*members, "__end__"]]:
54
+ """An LLM-based router."""
55
+ messages = [
56
+ {"role": "system", "content": system_prompt},
57
+ ] + state["messages"]
58
+ response = model.with_structured_output(Router).invoke(messages)
59
+ goto = response["next"]
60
+ if goto == "FINISH":
61
+ goto = END
62
+
63
+ return Command(goto=goto, update={"next": goto})
64
+
65
+ return RunnableLambda(supervisor_node)
66
+
67
+ async def websearch(state: dict[str, dict | str]) -> dict[str,dict[str,str]]:
68
+ """
69
+ Web search based on the re-phrased question.
70
+
71
+ Args:
72
+ state (dict): The current graph state
73
+ config (RunnableConfig): Configuration with the model used for query analysis.
74
+
75
+ Returns:
76
+ state (dict): Updates documents key with appended web results
77
+ """
78
+
79
+ ##API Wrapper
80
+ bing_subscription_key = os.environ.get("BING_SUBSCRIPTION_KEY", "")
81
+ bing_search_url = os.environ.get("BING_SEARCH_URL", "https://api.bing.microsoft.com/v7.0/search")
82
+ search = BingSearchAPIWrapper(
83
+ bing_subscription_key=bing_subscription_key,
84
+ bing_search_url=bing_search_url
85
+ )
86
+
87
+ question = getattr(state,'messages')[-1].content if getattr(state,'messages', False) else getattr(state,'question')
88
+
89
+ ##Bing Search Results
90
+ web_results = BingSearchResults(
91
+ api_wrapper=search,
92
+ handle_tool_error=True,
93
+ args_schema={"k":"5"},
94
+ )
95
+
96
+ result = await web_results.ainvoke({"query": question})
97
+
98
+ fixed_string = result.replace("'", "\"")
99
+ result_json = json.loads(fixed_string)
100
+
101
+ # Create a temporary file
102
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file:
103
+ # Write the JSON data to the temporary file
104
+ json.dump(result_json, temp_file)
105
+ temp_file.flush()
106
+
107
+ # Load the JSON data from the temporary file
108
+ loader = JSONLoader(file_path=temp_file.name, jq_schema=".[]", text_content=False)
109
+ docs = loader.load()
110
+
111
+ return {"messages": {"role":"assistant", "content":format_docs(docs)}}
ea4all/utils/utils.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ea4all.src.ea4all_apm.graph as e4a
2
+ import ea4all.src.ea4all_vqa.graph as e4v
3
+ import ea4all.src.ea4all_gra.graph as e4t
4
+ import ea4all.src.shared.utils as e4u
5
+ from ea4all.src.ea4all_gra.configuration import AgentConfiguration as gra
6
+ from ea4all.src.shared import vectorstore
7
+ from ea4all.src.shared.configuration import BaseConfiguration
8
+ from ea4all.src.ea4all_indexer.configuration import IndexConfiguration
9
+ from ea4all.src.ea4all_indexer.graph import indexer_graph
10
+
11
+ from langchain_community.document_loaders import ConfluenceLoader
12
+ from langchain_core.messages import ChatMessage
13
+ from langsmith import Client
14
+
15
+ import uuid
16
+ import os
17
+ import time
18
+ import pandas as pd
19
+ import gradio as gr
20
+
21
+ class UIUtils:
22
+ #ea4all-about
23
+ @staticmethod
24
+ def ea4all_about(show_api=False):
25
+ readme = e4u.load_mock_content(e4u.CFG.EA4ALL_ABOUT)
26
+ return readme
27
+
28
+ #vqa_chatbot (ChatInterface -> Chatbot)
29
+ @staticmethod
30
+ def add_message(message, history):
31
+ if message["text"] is not None:
32
+ history.append({"role": "user", "content": message["text"]})
33
+
34
+ if len(message['files']) > 0:
35
+ history.append({"role": "user", "content": {"path": message['files'][-1]}})
36
+
37
+ return (
38
+ gr.MultimodalTextbox(value=message, interactive=True, placeholder="Upload a diagram and type your message..."),
39
+ history
40
+ )
41
+
42
+ #Upload & clear business requirement
43
+ @staticmethod
44
+ def load_dbr(file):
45
+ return file.decode()
46
+
47
+ #Load demo business requirements
48
+ def init_dbr():
49
+ # Open the file in read mode ('r')
50
+ with open(e4u._join_paths(BaseConfiguration.ea4all_store, gra.dbr_mock), 'r') as file:
51
+ # Read the contents of the file
52
+ contents = file.read()
53
+ return contents
54
+
55
+ def init_df(show_api=False):
56
+ return vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
57
+
58
+ #load core-architecture image
59
+ #fix the issue with gr.Image(path) inside a docker containder
60
+ def get_image(_image):
61
+ #from PIL import Image
62
+ # Load an image
63
+ image = e4u._join_paths(BaseConfiguration.ea4all_images,_image)
64
+ print(f"Full path: {image}")
65
+
66
+ return image
67
+
68
+ def ea4all_confluence(show_api=False):
69
+
70
+ #Confluence API Key
71
+ confluence_api_key = os.environ['CONFLUENCE_API_KEY']
72
+
73
+ loader = ConfluenceLoader(
74
+ url="https://learnitall.atlassian.net/wiki", username="[email protected]", api_key=confluence_api_key,
75
+ space_key="~71202000cd55f36336455f8c07afa1860ba810",
76
+ include_attachments=False, limit=10,
77
+ keep_markdown_format=True
78
+ )
79
+
80
+ documents = loader.load()
81
+
82
+ data = {
83
+ "title": [doc.metadata["title"] for doc in documents],
84
+ "source": [doc.metadata["source"] for doc in documents],
85
+ "page_content": [doc.page_content for doc in documents],
86
+ }
87
+
88
+ df = pd.DataFrame(data)
89
+
90
+ return df
91
+
92
+ def filter_page(page_list, title):
93
+ x = page_list[page_list["title"] == title]
94
+ return x.iloc[0]['page_content']
95
+
96
+ #get LLM response user's feedback
97
+ def get_user_feedback(evt: gr.SelectData, request:gr.Request):
98
+ ##{evt.index} {evt.value} {evt._data['liked']}
99
+ try:
100
+ uuid_str = os.environ["EA4ALL_" + e4u.get_user_identification(request).replace(".","_")]
101
+ gr.Info("Thanks for your feedback - run_id: " + uuid_str)
102
+ run_id = uuid.UUID(uuid_str)
103
+ client = Client()
104
+ client.create_feedback(
105
+ run_id,
106
+ key="feedback-key",
107
+ score= 1.0 if evt._data['liked'] == True else 0,
108
+ comment=str(evt.value)
109
+ )
110
+ except Exception as e:
111
+ gr.Warning(f"Couldn't capture a feedback: {e}")
112
+
113
+ #Set initial state of apm, llm and capture user-ip
114
+ async def ea4all_agent_init(request:gr.Request):
115
+
116
+ agentic_qna_desc="""Hi,
117
+ improve effieciency, knowledge sharing, and get valuable insights from your IT landscape using natural language.
118
+ As an Enterprise Architect Agentic Companion I can answer questions related to Enterprise Architecture, Technology, plus the following IT Landscape sample dataset: """
119
+
120
+ #capture user IP address
121
+ #ea4all_user = e4u.get_user_identification(request)
122
+ gr.Info("Thank you for using the EA4ALL Agentic MCP Server!")
123
+
124
+ # Set initial landscape vectorstore
125
+
126
+ #await indexer_graph.ainvoke(input={"docs":[]}, config=config)
127
+
128
+ #set chatbot description w/ user apm columns
129
+ df = vectorstore.apm_dataframe_loader(e4u._join_paths(BaseConfiguration.ea4all_store, IndexConfiguration.apm_catalogue))
130
+ columns_string = ', '.join(df.columns)
131
+ apm_columns = agentic_qna_desc + columns_string
132
+
133
+ prompt=ChatMessage(role='assistant', content='Hi, I am your Architect Copilot! How can I help you today?')
134
+
135
+ page_list = ea4all_confluence()
136
+
137
+ #Load gradio.dataframe with Portfolio sample dataset
138
+ pmo_df = pd.read_csv("ea4all/ea4all_store/ea4all-portfolio-management.csv")
139
+
140
+ dbr_text = init_dbr()
141
+
142
+ return (
143
+ apm_columns,
144
+ [{"role": "system", "content": "You are a helpful assistant."}],
145
+ [{"role": "system", "content": "You are a helpful assistant."}],
146
+ [{"role": "system", "content": "You are a helpful assistant."}],
147
+ gr.DataFrame(value=df),
148
+ gr.DataFrame(value=pmo_df),
149
+ dbr_text
150
+ )
151
+
152
+ #authentication
153
+ def ea4all_login(username, password):
154
+ return (username==password)
155
+
156
+ #TABS & Reference Architecture look-and-feel control
157
+ def off_dbrtext():
158
+ return gr.TextArea(visible=False), gr.Tab(visible=False), gr.Tab(visible=False)
159
+
160
+ def on_dbrtext(file):
161
+ if file:
162
+ return gr.TextArea(visible=True)
163
+ return gr.TextArea(visible=False)
164
+
165
+ def unload_dbr():
166
+ return gr.TextArea(visible=False)
167
+
168
+ def get_question_diagram_from_example(value) -> list:
169
+ """
170
+ Extracts the question and diagram from the selected example.
171
+ """
172
+ if value:
173
+ return [value['text'], value['files'][-1]] if 'files' in value else [value['text'], None]
174
+ return ["", None]
175
+
176
+ def on_image_update(image: gr.Image) -> gr.Image:
177
+ """
178
+ Callback to handle image updates.
179
+ """
180
+ visible = True if image is not None else False
181
+
182
+ return gr.Image(visible=visible)