diff --git a/__pycache__/app_settings.cpython-310.pyc b/__pycache__/app_settings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..607feeb9c79e6b3f5bee083ecf5d2d78c643be53
Binary files /dev/null and b/__pycache__/app_settings.cpython-310.pyc differ
diff --git a/__pycache__/constants.cpython-310.pyc b/__pycache__/constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..adfa7913d72d541acc59cd3b4b962656528cb4f0
Binary files /dev/null and b/__pycache__/constants.cpython-310.pyc differ
diff --git a/__pycache__/context.cpython-310.pyc b/__pycache__/context.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7fc77a9042356e1f81c62c8ece12bf2658c877b3
Binary files /dev/null and b/__pycache__/context.cpython-310.pyc differ
diff --git a/__pycache__/image_ops.cpython-310.pyc b/__pycache__/image_ops.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2845aace23505d6b50041e2cc7e088d85b324dce
Binary files /dev/null and b/__pycache__/image_ops.cpython-310.pyc differ
diff --git a/__pycache__/paths.cpython-310.pyc b/__pycache__/paths.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3cbe8d764b18f0c4b129f39eb7d671a02e9d1730
Binary files /dev/null and b/__pycache__/paths.cpython-310.pyc differ
diff --git a/__pycache__/state.cpython-310.pyc b/__pycache__/state.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feb4f0a8ef72d7091a60ab8409e8c9a3d793afc4
Binary files /dev/null and b/__pycache__/state.cpython-310.pyc differ
diff --git a/__pycache__/utils.cpython-310.pyc b/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b4b6aee5677d7fec48116e50b430ef90cf31beb
Binary files /dev/null and b/__pycache__/utils.cpython-310.pyc differ
diff --git a/configs/lcm-lora-models.txt b/configs/lcm-lora-models.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3a0a2e2a07ec88947f8880eab849146a2d7d071d
--- /dev/null
+++ b/configs/lcm-lora-models.txt
@@ -0,0 +1,3 @@
+latent-consistency/lcm-lora-sdv1-5
+latent-consistency/lcm-lora-sdxl
+latent-consistency/lcm-lora-ssd-1b
\ No newline at end of file
diff --git a/configs/lcm-models.txt b/configs/lcm-models.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2cc517fcd56a6e33ed374386beaa36b5c8942ee1
--- /dev/null
+++ b/configs/lcm-models.txt
@@ -0,0 +1,5 @@
+stabilityai/sd-turbo
+stabilityai/sdxl-turbo
+SimianLuo/LCM_Dreamshaper_v7
+latent-consistency/lcm-sdxl
+latent-consistency/lcm-ssd-1b
\ No newline at end of file
diff --git a/configs/openvino-lcm-models.txt b/configs/openvino-lcm-models.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0043f1203db0394dadabd02a86e6a2bae3b9a83e
--- /dev/null
+++ b/configs/openvino-lcm-models.txt
@@ -0,0 +1,4 @@
+rupeshs/sd-turbo-openvino
+rupeshs/sdxl-turbo-openvino-int8
+rupeshs/LCM-dreamshaper-v7-openvino
+Disty0/LCM_SoteMix
\ No newline at end of file
diff --git a/configs/settings.yaml b/configs/settings.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0f5db8da9ef32cd87678daf41bc953b38558df2b
--- /dev/null
+++ b/configs/settings.yaml
@@ -0,0 +1,24 @@
+lcm_diffusion_setting:
+  diffusion_task: text_to_image
+  guidance_scale: 1.0
+  image_height: 512
+  image_width: 512
+  inference_steps: 1
+  init_image: null
+  lcm_lora:
+    base_model_id: Lykon/dreamshaper-8
+    lcm_lora_id: latent-consistency/lcm-lora-sdv1-5
+  lcm_model_id: stabilityai/sd-turbo
+  negative_prompt: ''
+  number_of_images: 1
+  openvino_lcm_model_id: rupeshs/sd-turbo-openvino
+  prompt: a girl dance
+  seed: 123123
+  strength: 0.6
+  use_lcm_lora: false
+  use_offline_model: false
+  use_openvino: true
+  use_safety_checker: false
+  use_seed: false
+  use_tiny_auto_encoder: false
+results_path: /workspaces/fastsdcpu/results
diff --git a/configs/stable-diffusion-models.txt b/configs/stable-diffusion-models.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d5d21c9c5e64bb55c642243d27c230f04c6aab58
--- /dev/null
+++ b/configs/stable-diffusion-models.txt
@@ -0,0 +1,7 @@
+Lykon/dreamshaper-8
+Fictiverse/Stable_Diffusion_PaperCut_Model
+stabilityai/stable-diffusion-xl-base-1.0
+runwayml/stable-diffusion-v1-5
+segmind/SSD-1B
+stablediffusionapi/anything-v5
+prompthero/openjourney-v4
\ No newline at end of file
diff --git a/testlcm/.gitattributes b/testlcm/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b
--- /dev/null
+++ b/testlcm/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/testlcm/README.md b/testlcm/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a0022446fd0923b9af472a47226bd75948c81af8
--- /dev/null
+++ b/testlcm/README.md
@@ -0,0 +1,12 @@
+---
+title: Testlcm
+emoji: 🏢
+colorFrom: pink
+colorTo: purple
+sdk: gradio
+sdk_version: 4.7.1
+app_file: app.py
+pinned: false
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/testlcm/__init__.py b/testlcm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testlcm/__pycache__/app_settings.cpython-310.pyc b/testlcm/__pycache__/app_settings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44d18f066334b262fa8543651ed17338c39b89ef
Binary files /dev/null and b/testlcm/__pycache__/app_settings.cpython-310.pyc differ
diff --git a/testlcm/__pycache__/constants.cpython-310.pyc b/testlcm/__pycache__/constants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef6668bc9f50d2540332e17ae2f627a12bd0b77f
Binary files /dev/null and b/testlcm/__pycache__/constants.cpython-310.pyc differ
diff --git a/testlcm/__pycache__/context.cpython-310.pyc b/testlcm/__pycache__/context.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e88f0986efed645964581d64cc65803053024a71
Binary files /dev/null and b/testlcm/__pycache__/context.cpython-310.pyc differ
diff --git a/testlcm/__pycache__/image_ops.cpython-310.pyc b/testlcm/__pycache__/image_ops.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e0e0cdbc5af90f3650ed509650138c59bef6fc7a
Binary files /dev/null and b/testlcm/__pycache__/image_ops.cpython-310.pyc differ
diff --git a/testlcm/__pycache__/paths.cpython-310.pyc b/testlcm/__pycache__/paths.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71415896b554ad1359ac1f37a38ef66b9ce76848
Binary files /dev/null and b/testlcm/__pycache__/paths.cpython-310.pyc differ
diff --git a/testlcm/__pycache__/state.cpython-310.pyc b/testlcm/__pycache__/state.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d84d1be3a9eb847e91eed27ef45bf5e0128625a
Binary files /dev/null and b/testlcm/__pycache__/state.cpython-310.pyc differ
diff --git a/testlcm/__pycache__/utils.cpython-310.pyc b/testlcm/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c1b1480bd7088837e68eaa2a47b1ae47f93a787
Binary files /dev/null and b/testlcm/__pycache__/utils.cpython-310.pyc differ
diff --git a/testlcm/app.py b/testlcm/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..0889c57373d08dfb98d41df31af3dc0409e0a67f
--- /dev/null
+++ b/testlcm/app.py
@@ -0,0 +1,161 @@
+from app_settings import AppSettings
+from utils import show_system_info
+import constants
+from argparse import ArgumentParser
+from context import Context
+from constants import APP_VERSION, LCM_DEFAULT_MODEL_OPENVINO
+from models.interface_types import InterfaceType
+from constants import DEVICE
+
+parser = ArgumentParser(description=f"FAST SD CPU {constants.APP_VERSION}")
+parser.add_argument(
+    "-s",
+    "--share",
+    action="store_true",
+    help="Create sharable link(Web UI)",
+    required=False,
+)
+group = parser.add_mutually_exclusive_group(required=False)
+group.add_argument(
+    "-g",
+    "--gui",
+    action="store_true",
+    help="Start desktop GUI",
+)
+group.add_argument(
+    "-w",
+    "--webui",
+    action="store_true",
+    help="Start Web UI",
+)
+group.add_argument(
+    "-r",
+    "--realtime",
+    action="store_true",
+    help="Start realtime inference UI(experimental)",
+)
+group.add_argument(
+    "-v",
+    "--version",
+    action="store_true",
+    help="Version",
+)
+parser.add_argument(
+    "--lcm_model_id",
+    type=str,
+    help="Model ID or path,Default SimianLuo/LCM_Dreamshaper_v7",
+    default="SimianLuo/LCM_Dreamshaper_v7",
+)
+parser.add_argument(
+    "--prompt",
+    type=str,
+    help="Describe the image you want to generate",
+)
+parser.add_argument(
+    "--image_height",
+    type=int,
+    help="Height of the image",
+    default=512,
+)
+parser.add_argument(
+    "--image_width",
+    type=int,
+    help="Width of the image",
+    default=512,
+)
+parser.add_argument(
+    "--inference_steps",
+    type=int,
+    help="Number of steps,default : 4",
+    default=4,
+)
+parser.add_argument(
+    "--guidance_scale",
+    type=int,
+    help="Guidance scale,default : 1.0",
+    default=1.0,
+)
+
+parser.add_argument(
+    "--number_of_images",
+    type=int,
+    help="Number of images to generate ,default : 1",
+    default=1,
+)
+parser.add_argument(
+    "--seed",
+    type=int,
+    help="Seed,default : -1 (disabled) ",
+    default=-1,
+)
+parser.add_argument(
+    "--use_openvino",
+    action="store_true",
+    help="Use OpenVINO model",
+)
+
+parser.add_argument(
+    "--use_offline_model",
+    action="store_true",
+    help="Use offline model",
+)
+parser.add_argument(
+    "--use_safety_checker",
+    action="store_false",
+    help="Use safety checker",
+)
+parser.add_argument(
+    "--use_lcm_lora",
+    action="store_true",
+    help="Use LCM-LoRA",
+)
+parser.add_argument(
+    "--base_model_id",
+    type=str,
+    help="LCM LoRA base model ID,Default Lykon/dreamshaper-8",
+    default="Lykon/dreamshaper-8",
+)
+parser.add_argument(
+    "--lcm_lora_id",
+    type=str,
+    help="LCM LoRA model ID,Default latent-consistency/lcm-lora-sdv1-5",
+    default="latent-consistency/lcm-lora-sdv1-5",
+)
+parser.add_argument(
+    "-i",
+    "--interactive",
+    action="store_true",
+    help="Interactive CLI mode",
+)
+parser.add_argument(
+    "--use_tiny_auto_encoder",
+    action="store_true",
+    help="Use tiny auto encoder for SD (TAESD)",
+)
+args = parser.parse_args()
+
+if args.version:
+    print(APP_VERSION)
+    exit()
+
+# parser.print_help()
+show_system_info()
+print(f"Using device : {constants.DEVICE}")
+app_settings = AppSettings()
+app_settings.load()
+print(
+    f"Found {len(app_settings.stable_diffsuion_models)} stable diffusion models in config/stable-diffusion-models.txt"
+)
+print(
+    f"Found {len(app_settings.lcm_lora_models)} LCM-LoRA models in config/lcm-lora-models.txt"
+)
+print(
+    f"Found {len(app_settings.openvino_lcm_models)} OpenVINO LCM models in config/openvino-lcm-models.txt"
+)
+
+from frontend.webui.ui import start_webui
+
+print("Starting web UI mode")
+start_webui(
+    args.share,
+)
\ No newline at end of file
diff --git a/testlcm/app_settings.py b/testlcm/app_settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..b31227026633c68ccb154b001c2df53fcbf2c236
--- /dev/null
+++ b/testlcm/app_settings.py
@@ -0,0 +1,89 @@
+import yaml
+from os import path, makedirs
+from models.settings import Settings
+from paths import FastStableDiffusionPaths
+from utils import get_models_from_text_file
+from constants import (
+    OPENVINO_LCM_MODELS_FILE,
+    LCM_LORA_MODELS_FILE,
+    SD_MODELS_FILE,
+    LCM_MODELS_FILE,
+)
+from copy import deepcopy
+
+
+class AppSettings:
+    def __init__(self):
+        self.config_path = FastStableDiffusionPaths().get_app_settings_path()
+        self._stable_diffsuion_models = get_models_from_text_file(
+            FastStableDiffusionPaths().get_models_config_path(SD_MODELS_FILE)
+        )
+        self._lcm_lora_models = get_models_from_text_file(
+            FastStableDiffusionPaths().get_models_config_path(LCM_LORA_MODELS_FILE)
+        )
+        self._openvino_lcm_models = get_models_from_text_file(
+            FastStableDiffusionPaths().get_models_config_path(OPENVINO_LCM_MODELS_FILE)
+        )
+        self._lcm_models = get_models_from_text_file(
+            FastStableDiffusionPaths().get_models_config_path(LCM_MODELS_FILE)
+        )
+
+    @property
+    def settings(self):
+        return self._config
+
+    @property
+    def stable_diffsuion_models(self):
+        return self._stable_diffsuion_models
+
+    @property
+    def openvino_lcm_models(self):
+        return self._openvino_lcm_models
+
+    @property
+    def lcm_models(self):
+        return self._lcm_models
+
+    @property
+    def lcm_lora_models(self):
+        return self._lcm_lora_models
+
+    def load(self, skip_file=False):
+        if skip_file:
+            print("Skipping config file")
+            settings_dict = self._load_default()
+            self._config = Settings.parse_obj(settings_dict)
+        else:
+            if not path.exists(self.config_path):
+                base_dir = path.dirname(self.config_path)
+                if not path.exists(base_dir):
+                    makedirs(base_dir)
+                try:
+                    print("Settings not found creating default settings")
+                    with open(self.config_path, "w") as file:
+                        yaml.dump(
+                            self._load_default(),
+                            file,
+                        )
+                except Exception as ex:
+                    print(f"Error in creating settings : {ex}")
+                    exit()
+            try:
+                with open(self.config_path) as file:
+                    settings_dict = yaml.safe_load(file)
+                    self._config = Settings.parse_obj(settings_dict)
+            except Exception as ex:
+                print(f"Error in loading settings : {ex}")
+
+    def save(self):
+        try:
+            with open(self.config_path, "w") as file:
+                tmp_cfg = deepcopy(self._config)
+                tmp_cfg.lcm_diffusion_setting.init_image = None
+                yaml.dump(tmp_cfg.dict(), file)
+        except Exception as ex:
+            print(f"Error in saving settings : {ex}")
+
+    def _load_default(self) -> dict:
+        defult_config = Settings()
+        return defult_config.dict()
diff --git a/testlcm/backend/__init__.py b/testlcm/backend/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/testlcm/backend/__pycache__/__init__.cpython-310.pyc b/testlcm/backend/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..35fa733640165f3c883072b4b1e2f1da2e9ff421
Binary files /dev/null and b/testlcm/backend/__pycache__/__init__.cpython-310.pyc differ
diff --git a/testlcm/backend/__pycache__/device.cpython-310.pyc b/testlcm/backend/__pycache__/device.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b3c8b896e40f3c53db76a734b8576053dc77b805
Binary files /dev/null and b/testlcm/backend/__pycache__/device.cpython-310.pyc differ
diff --git a/testlcm/backend/__pycache__/image_saver.cpython-310.pyc b/testlcm/backend/__pycache__/image_saver.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..defb140dc5d5d4d038d3becb607691407a459b4d
Binary files /dev/null and b/testlcm/backend/__pycache__/image_saver.cpython-310.pyc differ
diff --git a/testlcm/backend/__pycache__/lcm_text_to_image.cpython-310.pyc b/testlcm/backend/__pycache__/lcm_text_to_image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f43c02d3f85cf8cb1f25bffbb84da67bd559b182
Binary files /dev/null and b/testlcm/backend/__pycache__/lcm_text_to_image.cpython-310.pyc differ
diff --git a/testlcm/backend/__pycache__/tiny_decoder.cpython-310.pyc b/testlcm/backend/__pycache__/tiny_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b73032138284776f9692ed12cef4d8e3dfe698b
Binary files /dev/null and b/testlcm/backend/__pycache__/tiny_decoder.cpython-310.pyc differ
diff --git a/testlcm/backend/device.py b/testlcm/backend/device.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3a0aeb72cd292c107e5cd138c22bbdba38f83bc
--- /dev/null
+++ b/testlcm/backend/device.py
@@ -0,0 +1,23 @@
+import platform
+from constants import DEVICE
+import torch
+import openvino as ov
+
+core = ov.Core()
+
+
+def is_openvino_device() -> bool:
+    if DEVICE.lower() == "cpu" or DEVICE.lower()[0] == "g":
+        return True
+    else:
+        return False
+
+
+def get_device_name() -> str:
+    if DEVICE == "cuda" or DEVICE == "mps":
+        default_gpu_index = torch.cuda.current_device()
+        return torch.cuda.get_device_name(default_gpu_index)
+    elif platform.system().lower() == "darwin":
+        return platform.processor()
+    elif is_openvino_device():
+        return core.get_property(DEVICE.upper(), "FULL_DEVICE_NAME")
diff --git a/testlcm/backend/image_saver.py b/testlcm/backend/image_saver.py
new file mode 100644
index 0000000000000000000000000000000000000000..9dcfc204491a2058d19fb1dd17c0881cafcee78d
--- /dev/null
+++ b/testlcm/backend/image_saver.py
@@ -0,0 +1,40 @@
+from os import path, mkdir
+from typing import Any
+from uuid import uuid4
+from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
+import json
+
+
+class ImageSaver:
+    @staticmethod
+    def save_images(
+        output_path: str,
+        images: Any,
+        folder_name: str = "",
+        format: str = ".png",
+        lcm_diffusion_setting: LCMDiffusionSetting = None,
+    ) -> None:
+        gen_id = uuid4()
+
+        for index, image in enumerate(images):
+            if not path.exists(output_path):
+                mkdir(output_path)
+
+            if folder_name:
+                out_path = path.join(
+                    output_path,
+                    folder_name,
+                )
+            else:
+                out_path = output_path
+
+            if not path.exists(out_path):
+                mkdir(out_path)
+            image.save(path.join(out_path, f"{gen_id}-{index+1}{format}"))
+        if lcm_diffusion_setting:
+            with open(path.join(out_path, f"{gen_id}.json"), "w") as json_file:
+                json.dump(
+                    lcm_diffusion_setting.model_dump(exclude="init_image"),
+                    json_file,
+                    indent=4,
+                )
diff --git a/testlcm/backend/lcm_text_to_image.py b/testlcm/backend/lcm_text_to_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..e77c51c2e3096ca3401868970e402b40e4e31098
--- /dev/null
+++ b/testlcm/backend/lcm_text_to_image.py
@@ -0,0 +1,352 @@
+from typing import Any
+from diffusers import LCMScheduler
+import torch
+from backend.models.lcmdiffusion_setting import LCMDiffusionSetting
+import numpy as np
+from constants import DEVICE
+from backend.models.lcmdiffusion_setting import LCMLora
+from backend.device import is_openvino_device
+from backend.openvino.pipelines import (
+    get_ov_text_to_image_pipeline,
+    ov_load_taesd,
+    get_ov_image_to_image_pipeline,
+)
+from backend.pipelines.lcm import (
+    get_lcm_model_pipeline,
+    load_taesd,
+    get_image_to_image_pipeline,
+)
+from backend.pipelines.lcm_lora import get_lcm_lora_pipeline
+from backend.models.lcmdiffusion_setting import DiffusionTask
+from image_ops import resize_pil_image
+from math import ceil
+
+
+class LCMTextToImage:
+    def __init__(
+        self,
+        device: str = "cpu",
+    ) -> None:
+        self.pipeline = None
+        self.use_openvino = False
+        self.device = ""
+        self.previous_model_id = None
+        self.previous_use_tae_sd = False
+        self.previous_use_lcm_lora = False
+        self.previous_ov_model_id = ""
+        self.previous_safety_checker = False
+        self.previous_use_openvino = False
+        self.img_to_img_pipeline = None
+        self.is_openvino_init = False
+        self.torch_data_type = (
+            torch.float32 if is_openvino_device() or DEVICE == "mps" else torch.float16
+        )
+        print(f"Torch datatype : {self.torch_data_type}")
+
+    def _pipeline_to_device(self):
+        print(f"Pipeline device : {DEVICE}")
+        print(f"Pipeline dtype : {self.torch_data_type}")
+        self.pipeline.to(
+            torch_device=DEVICE,
+            torch_dtype=self.torch_data_type,
+        )
+
+    def _add_freeu(self):
+        pipeline_class = self.pipeline.__class__.__name__
+        if isinstance(self.pipeline.scheduler, LCMScheduler):
+            if pipeline_class == "StableDiffusionPipeline":
+                print("Add FreeU - SD")
+                self.pipeline.enable_freeu(
+                    s1=0.9,
+                    s2=0.2,
+                    b1=1.2,
+                    b2=1.4,
+                )
+            elif pipeline_class == "StableDiffusionXLPipeline":
+                print("Add FreeU - SDXL")
+                self.pipeline.enable_freeu(
+                    s1=0.6,
+                    s2=0.4,
+                    b1=1.1,
+                    b2=1.2,
+                )
+
+    def _update_lcm_scheduler_params(self):
+        if isinstance(self.pipeline.scheduler, LCMScheduler):
+            self.pipeline.scheduler = LCMScheduler.from_config(
+                self.pipeline.scheduler.config,
+                beta_start=0.001,
+                beta_end=0.01,
+            )
+
+    def init(
+        self,
+        device: str = "cpu",
+        lcm_diffusion_setting: LCMDiffusionSetting = LCMDiffusionSetting(),
+    ) -> None:
+        self.device = device
+        self.use_openvino = lcm_diffusion_setting.use_openvino
+        model_id = lcm_diffusion_setting.lcm_model_id
+        use_local_model = lcm_diffusion_setting.use_offline_model
+        use_tiny_auto_encoder = lcm_diffusion_setting.use_tiny_auto_encoder
+        use_lora = lcm_diffusion_setting.use_lcm_lora
+        lcm_lora: LCMLora = lcm_diffusion_setting.lcm_lora
+        ov_model_id = lcm_diffusion_setting.openvino_lcm_model_id
+
+        if lcm_diffusion_setting.diffusion_task == DiffusionTask.image_to_image.value:
+            lcm_diffusion_setting.init_image = resize_pil_image(
+                lcm_diffusion_setting.init_image,
+                lcm_diffusion_setting.image_width,
+                lcm_diffusion_setting.image_height,
+            )
+
+        if (
+            self.pipeline is None
+            or self.previous_model_id != model_id
+            or self.previous_use_tae_sd != use_tiny_auto_encoder
+            or self.previous_lcm_lora_base_id != lcm_lora.base_model_id
+            or self.previous_lcm_lora_id != lcm_lora.lcm_lora_id
+            or self.previous_use_lcm_lora != use_lora
+            or self.previous_ov_model_id != ov_model_id
+            or self.previous_safety_checker != lcm_diffusion_setting.use_safety_checker
+            or self.previous_use_openvino != lcm_diffusion_setting.use_openvino
+        ):
+            if self.use_openvino and is_openvino_device():
+                if self.pipeline:
+                    del self.pipeline
+                    self.pipeline = None
+                self.is_openvino_init = True
+                if (
+                    lcm_diffusion_setting.diffusion_task
+                    == DiffusionTask.text_to_image.value
+                ):
+                    print(f"***** Init Text to image (OpenVINO) - {ov_model_id} *****")
+                    self.pipeline = get_ov_text_to_image_pipeline(
+                        ov_model_id,
+                        use_local_model,
+                    )
+                elif (
+                    lcm_diffusion_setting.diffusion_task
+                    == DiffusionTask.image_to_image.value
+                ):
+                    print(f"***** Image to image (OpenVINO) - {ov_model_id} *****")
+                    self.pipeline = get_ov_image_to_image_pipeline(
+                        ov_model_id,
+                        use_local_model,
+                    )
+            else:
+                if self.pipeline:
+                    del self.pipeline
+                    self.pipeline = None
+                if self.img_to_img_pipeline:
+                    del self.img_to_img_pipeline
+                    self.img_to_img_pipeline = None
+
+                if use_lora:
+                    print(
+                        f"***** Init LCM-LoRA pipeline - {lcm_lora.base_model_id} *****"
+                    )
+                    self.pipeline = get_lcm_lora_pipeline(
+                        lcm_lora.base_model_id,
+                        lcm_lora.lcm_lora_id,
+                        use_local_model,
+                        torch_data_type=self.torch_data_type,
+                    )
+                else:
+                    print(f"***** Init LCM Model pipeline - {model_id} *****")
+                    self.pipeline = get_lcm_model_pipeline(
+                        model_id,
+                        use_local_model,
+                    )
+
+                if (
+                    lcm_diffusion_setting.diffusion_task
+                    == DiffusionTask.image_to_image.value
+                ):
+                    self.img_to_img_pipeline = get_image_to_image_pipeline(
+                        self.pipeline
+                    )
+                self._pipeline_to_device()
+
+            if use_tiny_auto_encoder:
+                if self.use_openvino and is_openvino_device():
+                    print("Using Tiny Auto Encoder (OpenVINO)")
+                    ov_load_taesd(
+                        self.pipeline,
+                        use_local_model,
+                    )
+                else:
+                    print("Using Tiny Auto Encoder")
+                    if (
+                        lcm_diffusion_setting.diffusion_task
+                        == DiffusionTask.text_to_image.value
+                    ):
+                        load_taesd(
+                            self.pipeline,
+                            use_local_model,
+                            self.torch_data_type,
+                        )
+                    elif (
+                        lcm_diffusion_setting.diffusion_task
+                        == DiffusionTask.image_to_image.value
+                    ):
+                        load_taesd(
+                            self.img_to_img_pipeline,
+                            use_local_model,
+                            self.torch_data_type,
+                        )
+
+            if (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.image_to_image.value
+                and lcm_diffusion_setting.use_openvino
+            ):
+                self.pipeline.scheduler = LCMScheduler.from_config(
+                    self.pipeline.scheduler.config,
+                )
+            else:
+                self._update_lcm_scheduler_params()
+
+            if use_lora:
+                self._add_freeu()
+
+            self.previous_model_id = model_id
+            self.previous_ov_model_id = ov_model_id
+            self.previous_use_tae_sd = use_tiny_auto_encoder
+            self.previous_lcm_lora_base_id = lcm_lora.base_model_id
+            self.previous_lcm_lora_id = lcm_lora.lcm_lora_id
+            self.previous_use_lcm_lora = use_lora
+            self.previous_safety_checker = lcm_diffusion_setting.use_safety_checker
+            self.previous_use_openvino = lcm_diffusion_setting.use_openvino
+            if (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.text_to_image.value
+            ):
+                print(f"Pipeline : {self.pipeline}")
+            elif (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.image_to_image.value
+            ):
+                if self.use_openvino and is_openvino_device():
+                    print(f"Pipeline : {self.pipeline}")
+                else:
+                    print(f"Pipeline : {self.img_to_img_pipeline}")
+
+    def generate(
+        self,
+        lcm_diffusion_setting: LCMDiffusionSetting,
+        reshape: bool = False,
+    ) -> Any:
+        guidance_scale = lcm_diffusion_setting.guidance_scale
+        img_to_img_inference_steps = lcm_diffusion_setting.inference_steps
+        check_step_value = int(
+            lcm_diffusion_setting.inference_steps * lcm_diffusion_setting.strength
+        )
+        if (
+            lcm_diffusion_setting.diffusion_task == DiffusionTask.image_to_image.value
+            and check_step_value < 1
+        ):
+            img_to_img_inference_steps = ceil(1 / lcm_diffusion_setting.strength)
+            print(
+                f"Strength: {lcm_diffusion_setting.strength},{img_to_img_inference_steps}"
+            )
+
+        if lcm_diffusion_setting.use_seed:
+            cur_seed = lcm_diffusion_setting.seed
+            if self.use_openvino:
+                np.random.seed(cur_seed)
+            else:
+                torch.manual_seed(cur_seed)
+
+        is_openvino_pipe = lcm_diffusion_setting.use_openvino and is_openvino_device()
+        if is_openvino_pipe:
+            print("Using OpenVINO")
+            if reshape and not self.is_openvino_init:
+                print("Reshape and compile")
+                self.pipeline.reshape(
+                    batch_size=-1,
+                    height=lcm_diffusion_setting.image_height,
+                    width=lcm_diffusion_setting.image_width,
+                    num_images_per_prompt=lcm_diffusion_setting.number_of_images,
+                )
+                self.pipeline.compile()
+
+            if self.is_openvino_init:
+                self.is_openvino_init = False
+
+        if not lcm_diffusion_setting.use_safety_checker:
+            self.pipeline.safety_checker = None
+            if (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.image_to_image.value
+                and not is_openvino_pipe
+            ):
+                self.img_to_img_pipeline.safety_checker = None
+
+        if (
+            not lcm_diffusion_setting.use_lcm_lora
+            and not lcm_diffusion_setting.use_openvino
+            and lcm_diffusion_setting.guidance_scale != 1.0
+        ):
+            print("Not using LCM-LoRA so setting guidance_scale 1.0")
+            guidance_scale = 1.0
+
+        if lcm_diffusion_setting.use_openvino:
+            if (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.text_to_image.value
+            ):
+                result_images = self.pipeline(
+                    prompt=lcm_diffusion_setting.prompt,
+                    negative_prompt=lcm_diffusion_setting.negative_prompt,
+                    num_inference_steps=lcm_diffusion_setting.inference_steps,
+                    guidance_scale=guidance_scale,
+                    width=lcm_diffusion_setting.image_width,
+                    height=lcm_diffusion_setting.image_height,
+                    num_images_per_prompt=lcm_diffusion_setting.number_of_images,
+                ).images
+            elif (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.image_to_image.value
+            ):
+                result_images = self.pipeline(
+                    image=lcm_diffusion_setting.init_image,
+                    strength=lcm_diffusion_setting.strength,
+                    prompt=lcm_diffusion_setting.prompt,
+                    negative_prompt=lcm_diffusion_setting.negative_prompt,
+                    num_inference_steps=img_to_img_inference_steps * 3,
+                    guidance_scale=guidance_scale,
+                    num_images_per_prompt=lcm_diffusion_setting.number_of_images,
+                ).images
+
+        else:
+            if (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.text_to_image.value
+            ):
+                result_images = self.pipeline(
+                    prompt=lcm_diffusion_setting.prompt,
+                    negative_prompt=lcm_diffusion_setting.negative_prompt,
+                    num_inference_steps=lcm_diffusion_setting.inference_steps,
+                    guidance_scale=guidance_scale,
+                    width=lcm_diffusion_setting.image_width,
+                    height=lcm_diffusion_setting.image_height,
+                    num_images_per_prompt=lcm_diffusion_setting.number_of_images,
+                ).images
+            elif (
+                lcm_diffusion_setting.diffusion_task
+                == DiffusionTask.image_to_image.value
+            ):
+                result_images = self.img_to_img_pipeline(
+                    image=lcm_diffusion_setting.init_image,
+                    strength=lcm_diffusion_setting.strength,
+                    prompt=lcm_diffusion_setting.prompt,
+                    negative_prompt=lcm_diffusion_setting.negative_prompt,
+                    num_inference_steps=img_to_img_inference_steps,
+                    guidance_scale=guidance_scale,
+                    width=lcm_diffusion_setting.image_width,
+                    height=lcm_diffusion_setting.image_height,
+                    num_images_per_prompt=lcm_diffusion_setting.number_of_images,
+                ).images
+
+        return result_images
diff --git a/testlcm/backend/models/__pycache__/lcmdiffusion_setting.cpython-310.pyc b/testlcm/backend/models/__pycache__/lcmdiffusion_setting.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67562bbda61a1d25ea41f0fe04154e6c276371fa
Binary files /dev/null and b/testlcm/backend/models/__pycache__/lcmdiffusion_setting.cpython-310.pyc differ
diff --git a/testlcm/backend/models/lcmdiffusion_setting.py b/testlcm/backend/models/lcmdiffusion_setting.py
new file mode 100644
index 0000000000000000000000000000000000000000..c11a7902d7159d15529bdc75e1e6b639d7fd6462
--- /dev/null
+++ b/testlcm/backend/models/lcmdiffusion_setting.py
@@ -0,0 +1,39 @@
+from typing import Optional, Any
+from enum import Enum
+from pydantic import BaseModel
+from constants import LCM_DEFAULT_MODEL, LCM_DEFAULT_MODEL_OPENVINO
+
+
+class LCMLora(BaseModel):
+    base_model_id: str = "Lykon/dreamshaper-8"
+    lcm_lora_id: str = "latent-consistency/lcm-lora-sdv1-5"
+
+
+class DiffusionTask(str, Enum):
+    """Diffusion task types"""
+
+    text_to_image = "text_to_image"
+    image_to_image = "image_to_image"
+
+
+class LCMDiffusionSetting(BaseModel):
+    lcm_model_id: str = LCM_DEFAULT_MODEL
+    openvino_lcm_model_id: str = LCM_DEFAULT_MODEL_OPENVINO
+    use_offline_model: bool = False
+    use_lcm_lora: bool = False
+    lcm_lora: Optional[LCMLora] = LCMLora()
+    use_tiny_auto_encoder: bool = False
+    use_openvino: bool = False
+    prompt: str = ""
+    negative_prompt: str = ""
+    init_image: Any = None
+    strength: Optional[float] = 0.6
+    image_height: Optional[int] = 512
+    image_width: Optional[int] = 512
+    inference_steps: Optional[int] = 1
+    guidance_scale: Optional[float] = 1
+    number_of_images: Optional[int] = 1
+    seed: Optional[int] = 123123
+    use_seed: bool = False
+    use_safety_checker: bool = False
+    diffusion_task: str = DiffusionTask.text_to_image.value
diff --git a/testlcm/backend/openvino/__pycache__/custom_ov_model_vae_decoder.cpython-310.pyc b/testlcm/backend/openvino/__pycache__/custom_ov_model_vae_decoder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac2e366a799d2a3e52f3adc9a7266b89b6fbdc2b
Binary files /dev/null and b/testlcm/backend/openvino/__pycache__/custom_ov_model_vae_decoder.cpython-310.pyc differ
diff --git a/testlcm/backend/openvino/__pycache__/pipelines.cpython-310.pyc b/testlcm/backend/openvino/__pycache__/pipelines.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c51b34da2e829e3db5dd7fb5d1bbad77b5e3a1b
Binary files /dev/null and b/testlcm/backend/openvino/__pycache__/pipelines.cpython-310.pyc differ
diff --git a/testlcm/backend/openvino/custom_ov_model_vae_decoder.py b/testlcm/backend/openvino/custom_ov_model_vae_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef83fb079f9956c80043cab04a65e114f7e56c66
--- /dev/null
+++ b/testlcm/backend/openvino/custom_ov_model_vae_decoder.py
@@ -0,0 +1,21 @@
+from backend.device import is_openvino_device
+
+if is_openvino_device():
+    from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder
+
+
+class CustomOVModelVaeDecoder(OVModelVaeDecoder):
+    def __init__(
+        self,
+        model,
+        parent_model,
+        ov_config=None,
+        model_dir=None,
+    ):
+        super(OVModelVaeDecoder, self).__init__(
+            model,
+            parent_model,
+            ov_config,
+            "vae_decoder",
+            model_dir,
+        )
diff --git a/testlcm/backend/openvino/pipelines.py b/testlcm/backend/openvino/pipelines.py
new file mode 100644
index 0000000000000000000000000000000000000000..62d936dd7426bbe1dd7f43376bbfa61089cf0a8a
--- /dev/null
+++ b/testlcm/backend/openvino/pipelines.py
@@ -0,0 +1,75 @@
+from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
+from backend.tiny_decoder import get_tiny_decoder_vae_model
+from typing import Any
+from backend.device import is_openvino_device
+from paths import get_base_folder_name
+
+if is_openvino_device():
+    from huggingface_hub import snapshot_download
+    from optimum.intel.openvino.modeling_diffusion import OVBaseModel
+
+    from optimum.intel.openvino.modeling_diffusion import (
+        OVStableDiffusionPipeline,
+        OVStableDiffusionImg2ImgPipeline,
+        OVStableDiffusionXLPipeline,
+        OVStableDiffusionXLImg2ImgPipeline,
+    )
+    from backend.openvino.custom_ov_model_vae_decoder import CustomOVModelVaeDecoder
+
+
+def ov_load_taesd(
+    pipeline: Any,
+    use_local_model: bool = False,
+):
+    taesd_dir = snapshot_download(
+        repo_id=get_tiny_decoder_vae_model(pipeline.__class__.__name__),
+        local_files_only=use_local_model,
+    )
+    pipeline.vae_decoder = CustomOVModelVaeDecoder(
+        model=OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
+        parent_model=pipeline,
+        model_dir=taesd_dir,
+    )
+
+
+def get_ov_text_to_image_pipeline(
+    model_id: str = LCM_DEFAULT_MODEL_OPENVINO,
+    use_local_model: bool = False,
+) -> Any:
+    if "xl" in get_base_folder_name(model_id).lower():
+        pipeline = OVStableDiffusionXLPipeline.from_pretrained(
+            model_id,
+            local_files_only=use_local_model,
+            ov_config={"CACHE_DIR": ""},
+            device=DEVICE.upper(),
+        )
+    else:
+        pipeline = OVStableDiffusionPipeline.from_pretrained(
+            model_id,
+            local_files_only=use_local_model,
+            ov_config={"CACHE_DIR": ""},
+            device=DEVICE.upper(),
+        )
+
+    return pipeline
+
+
+def get_ov_image_to_image_pipeline(
+    model_id: str = LCM_DEFAULT_MODEL_OPENVINO,
+    use_local_model: bool = False,
+) -> Any:
+    if "xl" in get_base_folder_name(model_id).lower():
+        pipeline = OVStableDiffusionXLImg2ImgPipeline.from_pretrained(
+            model_id,
+            local_files_only=use_local_model,
+            ov_config={"CACHE_DIR": ""},
+            device=DEVICE.upper(),
+        )
+    else:
+        pipeline = OVStableDiffusionImg2ImgPipeline.from_pretrained(
+            model_id,
+            local_files_only=use_local_model,
+            ov_config={"CACHE_DIR": ""},
+            device=DEVICE.upper(),
+        )
+    return pipeline
diff --git a/testlcm/backend/pipelines/__pycache__/lcm.cpython-310.pyc b/testlcm/backend/pipelines/__pycache__/lcm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8ed335c8ba74ed4ecee744917976e0603f4ff79
Binary files /dev/null and b/testlcm/backend/pipelines/__pycache__/lcm.cpython-310.pyc differ
diff --git a/testlcm/backend/pipelines/__pycache__/lcm_lora.cpython-310.pyc b/testlcm/backend/pipelines/__pycache__/lcm_lora.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b495991d4fab15e04a07624c61d68e6eff4dd079
Binary files /dev/null and b/testlcm/backend/pipelines/__pycache__/lcm_lora.cpython-310.pyc differ
diff --git a/testlcm/backend/pipelines/lcm.py b/testlcm/backend/pipelines/lcm.py
new file mode 100644
index 0000000000000000000000000000000000000000..18fea2ae5033700b95fbc2530045fcfc58193bcb
--- /dev/null
+++ b/testlcm/backend/pipelines/lcm.py
@@ -0,0 +1,90 @@
+from constants import LCM_DEFAULT_MODEL
+from diffusers import (
+    DiffusionPipeline,
+    AutoencoderTiny,
+    UNet2DConditionModel,
+    LCMScheduler,
+)
+import torch
+from backend.tiny_decoder import get_tiny_decoder_vae_model
+from typing import Any
+from diffusers import (
+    LCMScheduler,
+    StableDiffusionImg2ImgPipeline,
+    StableDiffusionXLImg2ImgPipeline,
+)
+
+
+def _get_lcm_pipeline_from_base_model(
+    lcm_model_id: str,
+    base_model_id: str,
+    use_local_model: bool,
+):
+    pipeline = None
+    unet = UNet2DConditionModel.from_pretrained(
+        lcm_model_id,
+        torch_dtype=torch.float32,
+        local_files_only=use_local_model,
+    )
+    pipeline = DiffusionPipeline.from_pretrained(
+        base_model_id,
+        unet=unet,
+        torch_dtype=torch.float32,
+        local_files_only=use_local_model,
+    )
+    pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
+    return pipeline
+
+
+def load_taesd(
+    pipeline: Any,
+    use_local_model: bool = False,
+    torch_data_type: torch.dtype = torch.float32,
+):
+    vae_model = get_tiny_decoder_vae_model(pipeline.__class__.__name__)
+    pipeline.vae = AutoencoderTiny.from_pretrained(
+        vae_model,
+        torch_dtype=torch_data_type,
+        local_files_only=use_local_model,
+    )
+
+
+def get_lcm_model_pipeline(
+    model_id: str = LCM_DEFAULT_MODEL,
+    use_local_model: bool = False,
+):
+    pipeline = None
+    if model_id == "latent-consistency/lcm-sdxl":
+        pipeline = _get_lcm_pipeline_from_base_model(
+            model_id,
+            "stabilityai/stable-diffusion-xl-base-1.0",
+            use_local_model,
+        )
+
+    elif model_id == "latent-consistency/lcm-ssd-1b":
+        pipeline = _get_lcm_pipeline_from_base_model(
+            model_id,
+            "segmind/SSD-1B",
+            use_local_model,
+        )
+    else:
+        pipeline = DiffusionPipeline.from_pretrained(
+            model_id,
+            local_files_only=use_local_model,
+        )
+
+    return pipeline
+
+
+def get_image_to_image_pipeline(pipeline: Any) -> Any:
+    components = pipeline.components
+    pipeline_class = pipeline.__class__.__name__
+    if (
+        pipeline_class == "LatentConsistencyModelPipeline"
+        or pipeline_class == "StableDiffusionPipeline"
+    ):
+        return StableDiffusionImg2ImgPipeline(**components)
+    elif pipeline_class == "StableDiffusionXLPipeline":
+        return StableDiffusionXLImg2ImgPipeline(**components)
+    else:
+        raise Exception(f"Unknown pipeline {pipeline_class}")
diff --git a/testlcm/backend/pipelines/lcm_lora.py b/testlcm/backend/pipelines/lcm_lora.py
new file mode 100644
index 0000000000000000000000000000000000000000..96f8cb716ab0ea3438c8e80062a7e9ca70ddcc43
--- /dev/null
+++ b/testlcm/backend/pipelines/lcm_lora.py
@@ -0,0 +1,25 @@
+from diffusers import DiffusionPipeline, LCMScheduler
+import torch
+
+
+def get_lcm_lora_pipeline(
+    base_model_id: str,
+    lcm_lora_id: str,
+    use_local_model: bool,
+    torch_data_type: torch.dtype,
+):
+    pipeline = DiffusionPipeline.from_pretrained(
+        base_model_id,
+        torch_dtype=torch_data_type,
+        local_files_only=use_local_model,
+    )
+    pipeline.load_lora_weights(
+        lcm_lora_id,
+        local_files_only=use_local_model,
+    )
+    if "lcm" in lcm_lora_id.lower():
+        print("LCM LoRA model detected so using recommended LCMScheduler")
+        pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
+    pipeline.fuse_lora()
+    pipeline.unet.to(memory_format=torch.channels_last)
+    return pipeline
diff --git a/testlcm/backend/tiny_decoder.py b/testlcm/backend/tiny_decoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..40f4619fa722fde78dbda0ebf3a0d4317ef6232d
--- /dev/null
+++ b/testlcm/backend/tiny_decoder.py
@@ -0,0 +1,30 @@
+from constants import (
+    TAESD_MODEL,
+    TAESDXL_MODEL,
+    TAESD_MODEL_OPENVINO,
+    TAESDXL_MODEL_OPENVINO,
+)
+
+
+def get_tiny_decoder_vae_model(pipeline_class) -> str:
+    print(f"Pipeline class : {pipeline_class}")
+    if (
+        pipeline_class == "LatentConsistencyModelPipeline"
+        or pipeline_class == "StableDiffusionPipeline"
+        or pipeline_class == "StableDiffusionImg2ImgPipeline"
+    ):
+        return TAESD_MODEL
+    elif (
+        pipeline_class == "StableDiffusionXLPipeline"
+        or pipeline_class == "StableDiffusionXLImg2ImgPipeline"
+    ):
+        return TAESDXL_MODEL
+    elif (
+        pipeline_class == "OVStableDiffusionPipeline"
+        or pipeline_class == "OVStableDiffusionImg2ImgPipeline"
+    ):
+        return TAESD_MODEL_OPENVINO
+    elif pipeline_class == "OVStableDiffusionXLPipeline":
+        return TAESDXL_MODEL_OPENVINO
+    else:
+        raise Exception("No valid pipeline class found!")
diff --git a/testlcm/constants.py b/testlcm/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..811b8a65f1660facd539f593339569b9f49f0f35
--- /dev/null
+++ b/testlcm/constants.py
@@ -0,0 +1,18 @@
+from os import environ
+
+APP_VERSION = "v1.0.0 beta 22"
+LCM_DEFAULT_MODEL = "stabilityai/sd-turbo"
+LCM_DEFAULT_MODEL_OPENVINO = "rupeshs/sd-turbo-openvino"
+APP_NAME = "FastSD CPU"
+APP_SETTINGS_FILE = "settings.yaml"
+RESULTS_DIRECTORY = "results"
+CONFIG_DIRECTORY = "configs"
+DEVICE = environ.get("DEVICE", "cpu")
+SD_MODELS_FILE = "stable-diffusion-models.txt"
+LCM_LORA_MODELS_FILE = "lcm-lora-models.txt"
+OPENVINO_LCM_MODELS_FILE = "openvino-lcm-models.txt"
+TAESD_MODEL = "madebyollin/taesd"
+TAESDXL_MODEL = "madebyollin/taesdxl"
+TAESD_MODEL_OPENVINO = "deinferno/taesd-openvino"
+LCM_MODELS_FILE = "lcm-models.txt"
+TAESDXL_MODEL_OPENVINO = "rupeshs/taesdxl-openvino"
diff --git a/testlcm/context.py b/testlcm/context.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9dc3e1a4d817ee59a6208e88d4f52bd50e1f30d
--- /dev/null
+++ b/testlcm/context.py
@@ -0,0 +1,46 @@
+from typing import Any
+from app_settings import Settings
+from models.interface_types import InterfaceType
+from backend.lcm_text_to_image import LCMTextToImage
+from time import perf_counter
+from backend.image_saver import ImageSaver
+from pprint import pprint
+from state import get_settings
+
+
+class Context:
+    def __init__(
+        self,
+        interface_type: InterfaceType,
+        device="cpu",
+    ):
+        self.interface_type = interface_type
+        self.lcm_text_to_image = LCMTextToImage(device)
+
+    def generate_text_to_image(
+        self,
+        settings: Settings,
+        reshape: bool = False,
+        device: str = "cpu",
+    ) -> Any:
+        get_settings().save()
+        tick = perf_counter()
+        pprint(settings.lcm_diffusion_setting.model_dump())
+        if not settings.lcm_diffusion_setting.lcm_lora:
+            return None
+        self.lcm_text_to_image.init(
+            device,
+            settings.lcm_diffusion_setting,
+        )
+        images = self.lcm_text_to_image.generate(
+            settings.lcm_diffusion_setting,
+            reshape,
+        )
+        elapsed = perf_counter() - tick
+        ImageSaver.save_images(
+            settings.results_path,
+            images=images,
+            lcm_diffusion_setting=settings.lcm_diffusion_setting,
+        )
+        print(f"Latency : {elapsed:.2f} seconds")
+        return images
diff --git a/testlcm/frontend/__pycache__/utils.cpython-310.pyc b/testlcm/frontend/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dda44ea5be926ed5a2ba00758ecd39b55c497b6c
Binary files /dev/null and b/testlcm/frontend/__pycache__/utils.cpython-310.pyc differ
diff --git a/testlcm/frontend/gui/app_window.py b/testlcm/frontend/gui/app_window.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbdd6ce3f7b9d76227bba5b8c9bfee4344a707f2
--- /dev/null
+++ b/testlcm/frontend/gui/app_window.py
@@ -0,0 +1,604 @@
+from PyQt5.QtWidgets import (
+    QWidget,
+    QPushButton,
+    QHBoxLayout,
+    QVBoxLayout,
+    QLabel,
+    QLineEdit,
+    QMainWindow,
+    QSlider,
+    QTabWidget,
+    QSpacerItem,
+    QSizePolicy,
+    QComboBox,
+    QCheckBox,
+    QTextEdit,
+    QToolButton,
+    QFileDialog,
+)
+from PyQt5 import QtWidgets, QtCore
+from PyQt5.QtGui import QPixmap, QDesktopServices
+from PyQt5.QtCore import QSize, QThreadPool, Qt, QUrl
+
+from PIL.ImageQt import ImageQt
+from constants import (
+    LCM_DEFAULT_MODEL,
+    LCM_DEFAULT_MODEL_OPENVINO,
+    APP_NAME,
+    APP_VERSION,
+)
+from frontend.gui.image_generator_worker import ImageGeneratorWorker
+from app_settings import AppSettings
+from paths import FastStableDiffusionPaths
+from frontend.utils import is_reshape_required
+from context import Context
+from models.interface_types import InterfaceType
+from constants import DEVICE
+from frontend.utils import enable_openvino_controls, get_valid_model_id
+from backend.models.lcmdiffusion_setting import DiffusionTask
+
+# DPI scale fix
+QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
+QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
+
+
+class MainWindow(QMainWindow):
+    def __init__(self, config: AppSettings):
+        super().__init__()
+        self.config = config
+        self.setWindowTitle(APP_NAME)
+        self.setFixedSize(QSize(600, 670))
+        self.init_ui()
+        self.pipeline = None
+        self.threadpool = QThreadPool()
+        self.device = "cpu"
+        self.previous_width = 0
+        self.previous_height = 0
+        self.previous_model = ""
+        self.previous_num_of_images = 0
+        self.context = Context(InterfaceType.GUI)
+        self.init_ui_values()
+        self.gen_images = []
+        self.image_index = 0
+        print(f"Output path : {  self.config.settings.results_path}")
+
+    def init_ui_values(self):
+        self.lcm_model.setEnabled(
+            not self.config.settings.lcm_diffusion_setting.use_openvino
+        )
+        self.guidance.setValue(
+            int(self.config.settings.lcm_diffusion_setting.guidance_scale * 10)
+        )
+        self.seed_value.setEnabled(self.config.settings.lcm_diffusion_setting.use_seed)
+        self.safety_checker.setChecked(
+            self.config.settings.lcm_diffusion_setting.use_safety_checker
+        )
+        self.use_openvino_check.setChecked(
+            self.config.settings.lcm_diffusion_setting.use_openvino
+        )
+        self.width.setCurrentText(
+            str(self.config.settings.lcm_diffusion_setting.image_width)
+        )
+        self.height.setCurrentText(
+            str(self.config.settings.lcm_diffusion_setting.image_height)
+        )
+        self.inference_steps.setValue(
+            int(self.config.settings.lcm_diffusion_setting.inference_steps)
+        )
+        self.seed_check.setChecked(self.config.settings.lcm_diffusion_setting.use_seed)
+        self.seed_value.setText(str(self.config.settings.lcm_diffusion_setting.seed))
+        self.use_local_model_folder.setChecked(
+            self.config.settings.lcm_diffusion_setting.use_offline_model
+        )
+        self.results_path.setText(self.config.settings.results_path)
+        self.num_images.setValue(
+            self.config.settings.lcm_diffusion_setting.number_of_images
+        )
+        self.use_tae_sd.setChecked(
+            self.config.settings.lcm_diffusion_setting.use_tiny_auto_encoder
+        )
+        self.use_lcm_lora.setChecked(
+            self.config.settings.lcm_diffusion_setting.use_lcm_lora
+        )
+        self.lcm_model.setCurrentText(
+            get_valid_model_id(
+                self.config.lcm_models,
+                self.config.settings.lcm_diffusion_setting.lcm_model_id,
+                LCM_DEFAULT_MODEL,
+            )
+        )
+        self.base_model_id.setCurrentText(
+            get_valid_model_id(
+                self.config.stable_diffsuion_models,
+                self.config.settings.lcm_diffusion_setting.lcm_lora.base_model_id,
+            )
+        )
+        self.lcm_lora_id.setCurrentText(
+            get_valid_model_id(
+                self.config.lcm_lora_models,
+                self.config.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id,
+            )
+        )
+        self.openvino_lcm_model_id.setCurrentText(
+            get_valid_model_id(
+                self.config.openvino_lcm_models,
+                self.config.settings.lcm_diffusion_setting.openvino_lcm_model_id,
+                LCM_DEFAULT_MODEL_OPENVINO,
+            )
+        )
+        self.neg_prompt.setEnabled(
+            self.config.settings.lcm_diffusion_setting.use_lcm_lora
+            or self.config.settings.lcm_diffusion_setting.use_openvino
+        )
+        self.openvino_lcm_model_id.setEnabled(
+            self.config.settings.lcm_diffusion_setting.use_openvino
+        )
+
+    def init_ui(self):
+        self.create_main_tab()
+        self.create_settings_tab()
+        self.create_about_tab()
+        self.show()
+
+    def create_main_tab(self):
+        self.img = QLabel("<<Image>>")
+        self.img.setAlignment(Qt.AlignCenter)
+        self.img.setFixedSize(QSize(512, 512))
+        self.vspacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)
+
+        self.prompt = QTextEdit()
+        self.prompt.setPlaceholderText("A fantasy landscape")
+        self.prompt.setAcceptRichText(False)
+        self.neg_prompt = QTextEdit()
+        self.neg_prompt.setPlaceholderText("")
+        self.neg_prompt.setAcceptRichText(False)
+        self.neg_prompt_label = QLabel("Negative prompt (Set guidance scale > 1.0):")
+        self.generate = QPushButton("Generate")
+        self.generate.clicked.connect(self.text_to_image)
+        self.prompt.setFixedHeight(40)
+        self.neg_prompt.setFixedHeight(35)
+        self.browse_results = QPushButton("...")
+        self.browse_results.setFixedWidth(30)
+        self.browse_results.clicked.connect(self.on_open_results_folder)
+        self.browse_results.setToolTip("Open output folder")
+
+        hlayout = QHBoxLayout()
+        hlayout.addWidget(self.neg_prompt)
+        hlayout.addWidget(self.generate)
+        hlayout.addWidget(self.browse_results)
+
+        self.previous_img_btn = QToolButton()
+        self.previous_img_btn.setText("<")
+        self.previous_img_btn.clicked.connect(self.on_show_previous_image)
+        self.next_img_btn = QToolButton()
+        self.next_img_btn.setText(">")
+        self.next_img_btn.clicked.connect(self.on_show_next_image)
+        hlayout_nav = QHBoxLayout()
+        hlayout_nav.addWidget(self.previous_img_btn)
+        hlayout_nav.addWidget(self.img)
+        hlayout_nav.addWidget(self.next_img_btn)
+
+        vlayout = QVBoxLayout()
+        vlayout.addLayout(hlayout_nav)
+        vlayout.addItem(self.vspacer)
+        vlayout.addWidget(self.prompt)
+        vlayout.addWidget(self.neg_prompt_label)
+        vlayout.addLayout(hlayout)
+
+        self.tab_widget = QTabWidget(self)
+        self.tab_main = QWidget()
+        self.tab_settings = QWidget()
+        self.tab_about = QWidget()
+        self.tab_main.setLayout(vlayout)
+
+        self.tab_widget.addTab(self.tab_main, "Text to Image")
+        self.tab_widget.addTab(self.tab_settings, "Settings")
+        self.tab_widget.addTab(self.tab_about, "About")
+
+        self.setCentralWidget(self.tab_widget)
+        self.use_seed = False
+
+    def create_settings_tab(self):
+        self.lcm_model_label = QLabel("Latent Consistency Model:")
+        # self.lcm_model = QLineEdit(LCM_DEFAULT_MODEL)
+        self.lcm_model = QComboBox(self)
+        self.lcm_model.addItems(self.config.lcm_models)
+        self.lcm_model.currentIndexChanged.connect(self.on_lcm_model_changed)
+
+        self.use_lcm_lora = QCheckBox("Use LCM LoRA")
+        self.use_lcm_lora.setChecked(False)
+        self.use_lcm_lora.stateChanged.connect(self.use_lcm_lora_changed)
+
+        self.lora_base_model_id_label = QLabel("Lora base model ID :")
+        self.base_model_id = QComboBox(self)
+        self.base_model_id.addItems(self.config.stable_diffsuion_models)
+        self.base_model_id.currentIndexChanged.connect(self.on_base_model_id_changed)
+
+        self.lcm_lora_model_id_label = QLabel("LCM LoRA model ID :")
+        self.lcm_lora_id = QComboBox(self)
+        self.lcm_lora_id.addItems(self.config.lcm_lora_models)
+        self.lcm_lora_id.currentIndexChanged.connect(self.on_lcm_lora_id_changed)
+
+        self.inference_steps_value = QLabel("Number of inference steps: 4")
+        self.inference_steps = QSlider(orientation=Qt.Orientation.Horizontal)
+        self.inference_steps.setMaximum(25)
+        self.inference_steps.setMinimum(1)
+        self.inference_steps.setValue(4)
+        self.inference_steps.valueChanged.connect(self.update_steps_label)
+
+        self.num_images_value = QLabel("Number of images: 1")
+        self.num_images = QSlider(orientation=Qt.Orientation.Horizontal)
+        self.num_images.setMaximum(100)
+        self.num_images.setMinimum(1)
+        self.num_images.setValue(1)
+        self.num_images.valueChanged.connect(self.update_num_images_label)
+
+        self.guidance_value = QLabel("Guidance scale: 1")
+        self.guidance = QSlider(orientation=Qt.Orientation.Horizontal)
+        self.guidance.setMaximum(20)
+        self.guidance.setMinimum(10)
+        self.guidance.setValue(10)
+        self.guidance.valueChanged.connect(self.update_guidance_label)
+
+        self.width_value = QLabel("Width :")
+        self.width = QComboBox(self)
+        self.width.addItem("256")
+        self.width.addItem("512")
+        self.width.addItem("768")
+        self.width.addItem("1024")
+        self.width.setCurrentText("512")
+        self.width.currentIndexChanged.connect(self.on_width_changed)
+
+        self.height_value = QLabel("Height :")
+        self.height = QComboBox(self)
+        self.height.addItem("256")
+        self.height.addItem("512")
+        self.height.addItem("768")
+        self.height.addItem("1024")
+        self.height.setCurrentText("512")
+        self.height.currentIndexChanged.connect(self.on_height_changed)
+
+        self.seed_check = QCheckBox("Use seed")
+        self.seed_value = QLineEdit()
+        self.seed_value.setInputMask("9999999999")
+        self.seed_value.setText("123123")
+        self.seed_check.stateChanged.connect(self.seed_changed)
+
+        self.safety_checker = QCheckBox("Use safety checker")
+        self.safety_checker.setChecked(True)
+        self.safety_checker.stateChanged.connect(self.use_safety_checker_changed)
+
+        self.use_openvino_check = QCheckBox("Use OpenVINO")
+        self.use_openvino_check.setChecked(False)
+        self.openvino_model_label = QLabel("OpenVINO LCM model:")
+        self.use_local_model_folder = QCheckBox(
+            "Use locally cached model or downloaded model folder(offline)"
+        )
+        self.openvino_lcm_model_id = QComboBox(self)
+        self.openvino_lcm_model_id.addItems(self.config.openvino_lcm_models)
+        self.openvino_lcm_model_id.currentIndexChanged.connect(
+            self.on_openvino_lcm_model_id_changed
+        )
+
+        self.use_openvino_check.setEnabled(enable_openvino_controls())
+        self.use_local_model_folder.setChecked(False)
+        self.use_local_model_folder.stateChanged.connect(self.use_offline_model_changed)
+        self.use_openvino_check.stateChanged.connect(self.use_openvino_changed)
+
+        self.use_tae_sd = QCheckBox(
+            "Use Tiny Auto Encoder - TAESD (Fast, moderate quality)"
+        )
+        self.use_tae_sd.setChecked(False)
+        self.use_tae_sd.stateChanged.connect(self.use_tae_sd_changed)
+
+        hlayout = QHBoxLayout()
+        hlayout.addWidget(self.seed_check)
+        hlayout.addWidget(self.seed_value)
+        hspacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
+        slider_hspacer = QSpacerItem(20, 10, QSizePolicy.Expanding, QSizePolicy.Minimum)
+
+        self.results_path_label = QLabel("Output path:")
+        self.results_path = QLineEdit()
+        self.results_path.textChanged.connect(self.on_path_changed)
+        self.browse_folder_btn = QToolButton()
+        self.browse_folder_btn.setText("...")
+        self.browse_folder_btn.clicked.connect(self.on_browse_folder)
+
+        self.reset = QPushButton("Reset All")
+        self.reset.clicked.connect(self.reset_all_settings)
+
+        vlayout = QVBoxLayout()
+        vspacer = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)
+        vlayout.addItem(hspacer)
+        vlayout.setSpacing(3)
+        vlayout.addWidget(self.lcm_model_label)
+        vlayout.addWidget(self.lcm_model)
+        vlayout.addWidget(self.use_local_model_folder)
+        vlayout.addWidget(self.use_lcm_lora)
+        vlayout.addWidget(self.lora_base_model_id_label)
+        vlayout.addWidget(self.base_model_id)
+        vlayout.addWidget(self.lcm_lora_model_id_label)
+        vlayout.addWidget(self.lcm_lora_id)
+        vlayout.addWidget(self.use_openvino_check)
+        vlayout.addWidget(self.openvino_model_label)
+        vlayout.addWidget(self.openvino_lcm_model_id)
+        vlayout.addWidget(self.use_tae_sd)
+        vlayout.addItem(slider_hspacer)
+        vlayout.addWidget(self.inference_steps_value)
+        vlayout.addWidget(self.inference_steps)
+        vlayout.addWidget(self.num_images_value)
+        vlayout.addWidget(self.num_images)
+        vlayout.addWidget(self.width_value)
+        vlayout.addWidget(self.width)
+        vlayout.addWidget(self.height_value)
+        vlayout.addWidget(self.height)
+        vlayout.addWidget(self.guidance_value)
+        vlayout.addWidget(self.guidance)
+        vlayout.addLayout(hlayout)
+        vlayout.addWidget(self.safety_checker)
+
+        vlayout.addWidget(self.results_path_label)
+        hlayout_path = QHBoxLayout()
+        hlayout_path.addWidget(self.results_path)
+        hlayout_path.addWidget(self.browse_folder_btn)
+        vlayout.addLayout(hlayout_path)
+        self.tab_settings.setLayout(vlayout)
+        hlayout_reset = QHBoxLayout()
+        hspacer = QSpacerItem(20, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
+        hlayout_reset.addItem(hspacer)
+        hlayout_reset.addWidget(self.reset)
+        vlayout.addLayout(hlayout_reset)
+        vlayout.addItem(vspacer)
+
+    def create_about_tab(self):
+        self.label = QLabel()
+        self.label.setAlignment(Qt.AlignCenter)
+        self.label.setText(
+            f"""<h1>FastSD CPU {APP_VERSION}</h1> 
+               <h3>(c)2023 - Rupesh Sreeraman</h3>
+                <h3>Faster stable diffusion on CPU</h3>
+                 <h3>Based on Latent Consistency Models</h3>
+                <h3>GitHub : https://github.com/rupeshs/fastsdcpu/</h3>"""
+        )
+
+        vlayout = QVBoxLayout()
+        vlayout.addWidget(self.label)
+        self.tab_about.setLayout(vlayout)
+
+    def show_image(self, pixmap):
+        image_width = self.config.settings.lcm_diffusion_setting.image_width
+        image_height = self.config.settings.lcm_diffusion_setting.image_height
+        if image_width > 512 or image_height > 512:
+            new_width = 512 if image_width > 512 else image_width
+            new_height = 512 if image_height > 512 else image_height
+            self.img.setPixmap(
+                pixmap.scaled(
+                    new_width,
+                    new_height,
+                    Qt.KeepAspectRatio,
+                )
+            )
+        else:
+            self.img.setPixmap(pixmap)
+
+    def on_show_next_image(self):
+        if self.image_index != len(self.gen_images) - 1 and len(self.gen_images) > 0:
+            self.previous_img_btn.setEnabled(True)
+            self.image_index += 1
+            self.show_image(self.gen_images[self.image_index])
+            if self.image_index == len(self.gen_images) - 1:
+                self.next_img_btn.setEnabled(False)
+
+    def on_open_results_folder(self):
+        QDesktopServices.openUrl(QUrl.fromLocalFile(self.config.settings.results_path))
+
+    def on_show_previous_image(self):
+        if self.image_index != 0:
+            self.next_img_btn.setEnabled(True)
+            self.image_index -= 1
+            self.show_image(self.gen_images[self.image_index])
+            if self.image_index == 0:
+                self.previous_img_btn.setEnabled(False)
+
+    def on_path_changed(self, text):
+        self.config.settings.results_path = text
+
+    def on_browse_folder(self):
+        options = QFileDialog.Options()
+        options |= QFileDialog.ShowDirsOnly
+
+        folder_path = QFileDialog.getExistingDirectory(
+            self, "Select a Folder", "", options=options
+        )
+
+        if folder_path:
+            self.config.settings.results_path = folder_path
+            self.results_path.setText(folder_path)
+
+    def on_width_changed(self, index):
+        width_txt = self.width.itemText(index)
+        self.config.settings.lcm_diffusion_setting.image_width = int(width_txt)
+
+    def on_height_changed(self, index):
+        height_txt = self.height.itemText(index)
+        self.config.settings.lcm_diffusion_setting.image_height = int(height_txt)
+
+    def on_lcm_model_changed(self, index):
+        model_id = self.lcm_model.itemText(index)
+        self.config.settings.lcm_diffusion_setting.lcm_model_id = model_id
+
+    def on_base_model_id_changed(self, index):
+        model_id = self.base_model_id.itemText(index)
+        self.config.settings.lcm_diffusion_setting.lcm_lora.base_model_id = model_id
+
+    def on_lcm_lora_id_changed(self, index):
+        model_id = self.lcm_lora_id.itemText(index)
+        self.config.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = model_id
+
+    def on_openvino_lcm_model_id_changed(self, index):
+        model_id = self.openvino_lcm_model_id.itemText(index)
+        self.config.settings.lcm_diffusion_setting.openvino_lcm_model_id = model_id
+
+    def use_openvino_changed(self, state):
+        if state == 2:
+            self.lcm_model.setEnabled(False)
+            self.use_lcm_lora.setEnabled(False)
+            self.lcm_lora_id.setEnabled(False)
+            self.base_model_id.setEnabled(False)
+            self.neg_prompt.setEnabled(True)
+            self.openvino_lcm_model_id.setEnabled(True)
+            self.config.settings.lcm_diffusion_setting.use_openvino = True
+        else:
+            self.lcm_model.setEnabled(True)
+            self.use_lcm_lora.setEnabled(True)
+            self.lcm_lora_id.setEnabled(True)
+            self.base_model_id.setEnabled(True)
+            self.neg_prompt.setEnabled(False)
+            self.openvino_lcm_model_id.setEnabled(False)
+            self.config.settings.lcm_diffusion_setting.use_openvino = False
+
+    def use_tae_sd_changed(self, state):
+        if state == 2:
+            self.config.settings.lcm_diffusion_setting.use_tiny_auto_encoder = True
+        else:
+            self.config.settings.lcm_diffusion_setting.use_tiny_auto_encoder = False
+
+    def use_offline_model_changed(self, state):
+        if state == 2:
+            self.config.settings.lcm_diffusion_setting.use_offline_model = True
+        else:
+            self.config.settings.lcm_diffusion_setting.use_offline_model = False
+
+    def use_lcm_lora_changed(self, state):
+        if state == 2:
+            self.lcm_model.setEnabled(False)
+            self.lcm_lora_id.setEnabled(True)
+            self.base_model_id.setEnabled(True)
+            self.neg_prompt.setEnabled(True)
+            self.config.settings.lcm_diffusion_setting.use_lcm_lora = True
+        else:
+            self.lcm_model.setEnabled(True)
+            self.lcm_lora_id.setEnabled(False)
+            self.base_model_id.setEnabled(False)
+            self.neg_prompt.setEnabled(False)
+            self.config.settings.lcm_diffusion_setting.use_lcm_lora = False
+
+    def use_safety_checker_changed(self, state):
+        if state == 2:
+            self.config.settings.lcm_diffusion_setting.use_safety_checker = True
+        else:
+            self.config.settings.lcm_diffusion_setting.use_safety_checker = False
+
+    def update_steps_label(self, value):
+        self.inference_steps_value.setText(f"Number of inference steps: {value}")
+        self.config.settings.lcm_diffusion_setting.inference_steps = value
+
+    def update_num_images_label(self, value):
+        self.num_images_value.setText(f"Number of images: {value}")
+        self.config.settings.lcm_diffusion_setting.number_of_images = value
+
+    def update_guidance_label(self, value):
+        val = round(int(value) / 10, 1)
+        self.guidance_value.setText(f"Guidance scale: {val}")
+        self.config.settings.lcm_diffusion_setting.guidance_scale = val
+
+    def seed_changed(self, state):
+        if state == 2:
+            self.seed_value.setEnabled(True)
+            self.config.settings.lcm_diffusion_setting.use_seed = True
+        else:
+            self.seed_value.setEnabled(False)
+            self.config.settings.lcm_diffusion_setting.use_seed = False
+
+    def get_seed_value(self) -> int:
+        use_seed = self.config.settings.lcm_diffusion_setting.use_seed
+        seed_value = int(self.seed_value.text()) if use_seed else -1
+        return seed_value
+
+    def generate_image(self):
+        self.config.settings.lcm_diffusion_setting.seed = self.get_seed_value()
+        self.config.settings.lcm_diffusion_setting.prompt = self.prompt.toPlainText()
+        self.config.settings.lcm_diffusion_setting.negative_prompt = (
+            self.neg_prompt.toPlainText()
+        )
+        self.config.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = (
+            self.lcm_lora_id.currentText()
+        )
+        self.config.settings.lcm_diffusion_setting.lcm_lora.base_model_id = (
+            self.base_model_id.currentText()
+        )
+
+        if self.config.settings.lcm_diffusion_setting.use_openvino:
+            model_id = self.openvino_lcm_model_id.currentText()
+        else:
+            model_id = self.lcm_model.currentText()
+
+        self.config.settings.lcm_diffusion_setting.lcm_model_id = model_id
+
+        reshape_required = False
+        if self.config.settings.lcm_diffusion_setting.use_openvino:
+            # Detect dimension change
+            reshape_required = is_reshape_required(
+                self.previous_width,
+                self.config.settings.lcm_diffusion_setting.image_width,
+                self.previous_height,
+                self.config.settings.lcm_diffusion_setting.image_height,
+                self.previous_model,
+                model_id,
+                self.previous_num_of_images,
+                self.config.settings.lcm_diffusion_setting.number_of_images,
+            )
+        self.config.settings.lcm_diffusion_setting.diffusion_task = (
+            DiffusionTask.text_to_image.value
+        )
+        images = self.context.generate_text_to_image(
+            self.config.settings,
+            reshape_required,
+            DEVICE,
+        )
+        self.image_index = 0
+        self.gen_images = []
+        for img in images:
+            im = ImageQt(img).copy()
+            pixmap = QPixmap.fromImage(im)
+            self.gen_images.append(pixmap)
+
+        if len(self.gen_images) > 1:
+            self.next_img_btn.setEnabled(True)
+            self.previous_img_btn.setEnabled(False)
+        else:
+            self.next_img_btn.setEnabled(False)
+            self.previous_img_btn.setEnabled(False)
+
+        self.show_image(self.gen_images[0])
+
+        self.previous_width = self.config.settings.lcm_diffusion_setting.image_width
+        self.previous_height = self.config.settings.lcm_diffusion_setting.image_height
+        self.previous_model = model_id
+        self.previous_num_of_images = (
+            self.config.settings.lcm_diffusion_setting.number_of_images
+        )
+
+    def text_to_image(self):
+        self.img.setText("Please wait...")
+        worker = ImageGeneratorWorker(self.generate_image)
+        self.threadpool.start(worker)
+
+    def closeEvent(self, event):
+        self.config.settings.lcm_diffusion_setting.seed = self.get_seed_value()
+        print(self.config.settings.lcm_diffusion_setting)
+        print("Saving settings")
+        self.config.save()
+
+    def reset_all_settings(self):
+        self.use_local_model_folder.setChecked(False)
+        self.width.setCurrentText("512")
+        self.height.setCurrentText("512")
+        self.inference_steps.setValue(4)
+        self.guidance.setValue(10)
+        self.use_openvino_check.setChecked(False)
+        self.seed_check.setChecked(False)
+        self.safety_checker.setChecked(False)
+        self.results_path.setText(FastStableDiffusionPaths().get_results_path())
+        self.use_tae_sd.setChecked(False)
+        self.use_lcm_lora.setChecked(False)
diff --git a/testlcm/frontend/gui/image_generator_worker.py b/testlcm/frontend/gui/image_generator_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a948365085ece82337309ac91d278e77fa03e40
--- /dev/null
+++ b/testlcm/frontend/gui/image_generator_worker.py
@@ -0,0 +1,37 @@
+from PyQt5.QtCore import (
+    pyqtSlot,
+    QRunnable,
+    pyqtSignal,
+    pyqtSlot,
+)
+from PyQt5.QtCore import QObject
+import traceback
+import sys
+
+
+class WorkerSignals(QObject):
+    finished = pyqtSignal()
+    error = pyqtSignal(tuple)
+    result = pyqtSignal(object)
+
+
+class ImageGeneratorWorker(QRunnable):
+    def __init__(self, fn, *args, **kwargs):
+        super(ImageGeneratorWorker, self).__init__()
+        self.fn = fn
+        self.args = args
+        self.kwargs = kwargs
+        self.signals = WorkerSignals()
+
+    @pyqtSlot()
+    def run(self):
+        try:
+            result = self.fn(*self.args, **self.kwargs)
+        except:
+            traceback.print_exc()
+            exctype, value = sys.exc_info()[:2]
+            self.signals.error.emit((exctype, value, traceback.format_exc()))
+        else:
+            self.signals.result.emit(result)
+        finally:
+            self.signals.finished.emit()
diff --git a/testlcm/frontend/gui/ui.py b/testlcm/frontend/gui/ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..9250bf676da1f3dc8a2f5435095b9cec9b08041e
--- /dev/null
+++ b/testlcm/frontend/gui/ui.py
@@ -0,0 +1,15 @@
+from typing import List
+from frontend.gui.app_window import MainWindow
+from PyQt5.QtWidgets import QApplication
+import sys
+from app_settings import AppSettings
+
+
+def start_gui(
+    argv: List[str],
+    app_settings: AppSettings,
+):
+    app = QApplication(sys.argv)
+    window = MainWindow(app_settings)
+    window.show()
+    app.exec()
diff --git a/testlcm/frontend/utils.py b/testlcm/frontend/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7024d8fdcb0b2dbcc5b9de22b9562d853ddaf6e
--- /dev/null
+++ b/testlcm/frontend/utils.py
@@ -0,0 +1,54 @@
+from constants import DEVICE
+from typing import List
+import platform
+from backend.device import is_openvino_device
+
+
+def is_reshape_required(
+    prev_width: int,
+    cur_width: int,
+    prev_height: int,
+    cur_height: int,
+    prev_model: int,
+    cur_model: int,
+    prev_num_of_images: int,
+    cur_num_of_images: int,
+) -> bool:
+    reshape_required = False
+    if (
+        prev_width != cur_width
+        or prev_height != cur_height
+        or prev_model != cur_model
+        or prev_num_of_images != cur_num_of_images
+    ):
+        print("Reshape and compile")
+        reshape_required = True
+
+    return reshape_required
+
+
+def enable_openvino_controls() -> bool:
+    return is_openvino_device() and platform.system().lower() != "darwin"
+
+
+def get_valid_model_id(
+    models: List,
+    model_id: str,
+    default_model: str = "",
+) -> str:
+    if len(models) == 0:
+        print("Error: model configuration file is empty,please add some models.")
+        return ""
+    if model_id == "":
+        if default_model:
+            return default_model
+        else:
+            return models[0]
+
+    if model_id in models:
+        return model_id
+    else:
+        print(
+            f"Error:{model_id} Model not found in configuration file,so using first model : {models[0]}"
+        )
+        return models[0]
diff --git a/testlcm/frontend/webui/__pycache__/generation_settings_ui.cpython-310.pyc b/testlcm/frontend/webui/__pycache__/generation_settings_ui.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71e1fbc07a439978eaaa3a55eae9f9872761c76e
Binary files /dev/null and b/testlcm/frontend/webui/__pycache__/generation_settings_ui.cpython-310.pyc differ
diff --git a/testlcm/frontend/webui/__pycache__/image_to_image_ui.cpython-310.pyc b/testlcm/frontend/webui/__pycache__/image_to_image_ui.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e0c9de4efdff9fb1360506acbdd940c103c1dc91
Binary files /dev/null and b/testlcm/frontend/webui/__pycache__/image_to_image_ui.cpython-310.pyc differ
diff --git a/testlcm/frontend/webui/__pycache__/models_ui.cpython-310.pyc b/testlcm/frontend/webui/__pycache__/models_ui.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d98c5373db48571a656b295d30f00c9ec7ad890e
Binary files /dev/null and b/testlcm/frontend/webui/__pycache__/models_ui.cpython-310.pyc differ
diff --git a/testlcm/frontend/webui/__pycache__/text_to_image_ui.cpython-310.pyc b/testlcm/frontend/webui/__pycache__/text_to_image_ui.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc5d99e2ed1f87b79d9b21aaf5e6556938eb358d
Binary files /dev/null and b/testlcm/frontend/webui/__pycache__/text_to_image_ui.cpython-310.pyc differ
diff --git a/testlcm/frontend/webui/__pycache__/ui.cpython-310.pyc b/testlcm/frontend/webui/__pycache__/ui.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6902613323b34e07fbb956d46b9c4f7abdde8139
Binary files /dev/null and b/testlcm/frontend/webui/__pycache__/ui.cpython-310.pyc differ
diff --git a/testlcm/frontend/webui/css/style.css b/testlcm/frontend/webui/css/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..216e9f54bd789851bb1aa091cc8beaf98ddbe75d
--- /dev/null
+++ b/testlcm/frontend/webui/css/style.css
@@ -0,0 +1,22 @@
+footer {
+    visibility: hidden
+}
+
+#generate_button {
+    color: white;
+    border-color: #007bff;
+    background: #2563eb;
+
+}
+
+#save_button {
+    color: white;
+    border-color: #028b40;
+    background: #01b97c;
+    width: 200px;
+}
+
+#settings_header {
+    background: rgb(245, 105, 105);
+
+}
\ No newline at end of file
diff --git a/testlcm/frontend/webui/generation_settings_ui.py b/testlcm/frontend/webui/generation_settings_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a3af2987420aac054d15f77610b1c623d3434dd
--- /dev/null
+++ b/testlcm/frontend/webui/generation_settings_ui.py
@@ -0,0 +1,140 @@
+import gradio as gr
+from state import get_settings
+
+app_settings = get_settings()
+
+
+def on_change_inference_steps(steps):
+    app_settings.settings.lcm_diffusion_setting.inference_steps = steps
+
+
+def on_change_image_width(img_width):
+    app_settings.settings.lcm_diffusion_setting.image_width = img_width
+
+
+def on_change_image_height(img_height):
+    app_settings.settings.lcm_diffusion_setting.image_height = img_height
+
+
+def on_change_num_images(num_images):
+    app_settings.settings.lcm_diffusion_setting.number_of_images = num_images
+
+
+def on_change_guidance_scale(guidance_scale):
+    app_settings.settings.lcm_diffusion_setting.guidance_scale = guidance_scale
+
+
+def on_change_seed_value(seed):
+    app_settings.settings.lcm_diffusion_setting.seed = seed
+
+
+def on_change_seed_checkbox(seed_checkbox):
+    app_settings.settings.lcm_diffusion_setting.use_seed = seed_checkbox
+
+
+def on_change_safety_checker_checkbox(safety_checker_checkbox):
+    app_settings.settings.lcm_diffusion_setting.use_safety_checker = (
+        safety_checker_checkbox
+    )
+
+
+def on_change_tiny_auto_encoder_checkbox(tiny_auto_encoder_checkbox):
+    app_settings.settings.lcm_diffusion_setting.use_tiny_auto_encoder = (
+        tiny_auto_encoder_checkbox
+    )
+
+
+def on_offline_checkbox(offline_checkbox):
+    app_settings.settings.lcm_diffusion_setting.use_offline_model = offline_checkbox
+
+
+def get_generation_settings_ui() -> None:
+    with gr.Blocks():
+        with gr.Row():
+            with gr.Column():
+                num_inference_steps = gr.Slider(
+                    1,
+                    25,
+                    value=app_settings.settings.lcm_diffusion_setting.inference_steps,
+                    step=1,
+                    label="Inference Steps",
+                    interactive=True,
+                )
+
+                image_height = gr.Slider(
+                    256,
+                    1024,
+                    value=app_settings.settings.lcm_diffusion_setting.image_height,
+                    step=256,
+                    label="Image Height",
+                    interactive=True,
+                )
+                image_width = gr.Slider(
+                    256,
+                    1024,
+                    value=app_settings.settings.lcm_diffusion_setting.image_width,
+                    step=256,
+                    label="Image Width",
+                    interactive=True,
+                )
+                num_images = gr.Slider(
+                    1,
+                    50,
+                    value=app_settings.settings.lcm_diffusion_setting.number_of_images,
+                    step=1,
+                    label="Number of images to generate",
+                    interactive=True,
+                )
+                guidance_scale = gr.Slider(
+                    1.0,
+                    2.0,
+                    value=app_settings.settings.lcm_diffusion_setting.guidance_scale,
+                    step=0.1,
+                    label="Guidance Scale",
+                    interactive=True,
+                )
+
+                seed = gr.Slider(
+                    value=app_settings.settings.lcm_diffusion_setting.seed,
+                    minimum=0,
+                    maximum=999999999,
+                    label="Seed",
+                    step=1,
+                    interactive=True,
+                )
+                seed_checkbox = gr.Checkbox(
+                    label="Use seed",
+                    value=app_settings.settings.lcm_diffusion_setting.use_seed,
+                    interactive=True,
+                )
+
+                safety_checker_checkbox = gr.Checkbox(
+                    label="Use Safety Checker",
+                    value=app_settings.settings.lcm_diffusion_setting.use_safety_checker,
+                    interactive=True,
+                )
+                tiny_auto_encoder_checkbox = gr.Checkbox(
+                    label="Use tiny auto encoder for SD",
+                    value=app_settings.settings.lcm_diffusion_setting.use_tiny_auto_encoder,
+                    interactive=True,
+                )
+                offline_checkbox = gr.Checkbox(
+                    label="Use locally cached model or downloaded model folder(offline)",
+                    value=app_settings.settings.lcm_diffusion_setting.use_offline_model,
+                    interactive=True,
+                )
+
+        num_inference_steps.change(on_change_inference_steps, num_inference_steps)
+        image_height.change(on_change_image_height, image_height)
+        image_width.change(on_change_image_width, image_width)
+        num_images.change(on_change_num_images, num_images)
+        guidance_scale.change(on_change_guidance_scale, guidance_scale)
+        seed.change(on_change_seed_value, seed)
+        seed_checkbox.change(on_change_seed_checkbox, seed_checkbox)
+        safety_checker_checkbox.change(
+            on_change_safety_checker_checkbox, safety_checker_checkbox
+        )
+        tiny_auto_encoder_checkbox.change(
+            on_change_tiny_auto_encoder_checkbox, tiny_auto_encoder_checkbox
+        )
+        offline_checkbox.change(on_change_tiny_auto_encoder_checkbox, offline_checkbox)
diff --git a/testlcm/frontend/webui/image_to_image_ui.py b/testlcm/frontend/webui/image_to_image_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b061fae8f383c9ac00a0cb353605b1fd65768c
--- /dev/null
+++ b/testlcm/frontend/webui/image_to_image_ui.py
@@ -0,0 +1,124 @@
+from typing import Any
+import gradio as gr
+from backend.models.lcmdiffusion_setting import DiffusionTask
+from context import Context
+from models.interface_types import InterfaceType
+from frontend.utils import is_reshape_required
+from constants import DEVICE
+from state import get_settings
+from concurrent.futures import ThreadPoolExecutor
+
+app_settings = get_settings()
+
+context = Context(InterfaceType.WEBUI)
+previous_width = 0
+previous_height = 0
+previous_model_id = ""
+previous_num_of_images = 0
+
+
+def generate_image_to_image(
+    prompt,
+    negative_prompt,
+    init_image,
+    strength,
+) -> Any:
+    global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
+
+    app_settings.settings.lcm_diffusion_setting.prompt = prompt
+    app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
+    app_settings.settings.lcm_diffusion_setting.init_image = init_image
+    app_settings.settings.lcm_diffusion_setting.strength = strength
+
+    app_settings.settings.lcm_diffusion_setting.diffusion_task = (
+        DiffusionTask.image_to_image.value
+    )
+    model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
+    reshape = False
+    image_width = app_settings.settings.lcm_diffusion_setting.image_width
+    image_height = app_settings.settings.lcm_diffusion_setting.image_height
+    num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
+    if app_settings.settings.lcm_diffusion_setting.use_openvino:
+        reshape = is_reshape_required(
+            previous_width,
+            image_width,
+            previous_height,
+            image_height,
+            previous_model_id,
+            model_id,
+            previous_num_of_images,
+            num_images,
+        )
+
+    with ThreadPoolExecutor(max_workers=1) as executor:
+        future = executor.submit(
+            context.generate_text_to_image,
+            app_settings.settings,
+            reshape,
+            DEVICE,
+        )
+        images = future.result()
+    # images = context.generate_text_to_image(
+    #     app_settings.settings,
+    #     reshape,
+    #     DEVICE,
+    # )
+    previous_width = image_width
+    previous_height = image_height
+    previous_model_id = model_id
+    previous_num_of_images = num_images
+    return images
+
+
+def get_image_to_image_ui() -> None:
+    with gr.Blocks():
+        with gr.Row():
+            with gr.Column():
+                input_image = gr.Image(label="Init image", type="pil")
+                with gr.Row():
+                    prompt = gr.Textbox(
+                        show_label=False,
+                        lines=3,
+                        placeholder="A fantasy landscape",
+                        container=False,
+                    )
+
+                    generate_btn = gr.Button(
+                        "Generate",
+                        elem_id="generate_button",
+                        scale=0,
+                    )
+                negative_prompt = gr.Textbox(
+                    label="Negative prompt (Works in LCM-LoRA mode, set guidance > 1.0):",
+                    lines=1,
+                    placeholder="",
+                )
+                strength = gr.Slider(
+                    0.1,
+                    1,
+                    value=app_settings.settings.lcm_diffusion_setting.strength,
+                    step=0.01,
+                    label="Strength",
+                )
+
+                input_params = [
+                    prompt,
+                    negative_prompt,
+                    input_image,
+                    strength,
+                ]
+
+            with gr.Column():
+                output = gr.Gallery(
+                    label="Generated images",
+                    show_label=True,
+                    elem_id="gallery",
+                    columns=2,
+                    height=512,
+                )
+
+    generate_btn.click(
+        fn=generate_image_to_image,
+        inputs=input_params,
+        outputs=output,
+    )
diff --git a/testlcm/frontend/webui/models_ui.py b/testlcm/frontend/webui/models_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5e8e6ca69a67422dd74773934dbd01532627d50
--- /dev/null
+++ b/testlcm/frontend/webui/models_ui.py
@@ -0,0 +1,85 @@
+from app_settings import AppSettings
+from typing import Any
+import gradio as gr
+from constants import LCM_DEFAULT_MODEL, LCM_DEFAULT_MODEL_OPENVINO
+from state import get_settings
+from frontend.utils import get_valid_model_id
+
+app_settings = get_settings()
+app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id = get_valid_model_id(
+    app_settings.openvino_lcm_models,
+    app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
+)
+
+
+def change_lcm_model_id(model_id):
+    app_settings.settings.lcm_diffusion_setting.lcm_model_id = model_id
+
+
+def change_lcm_lora_model_id(model_id):
+    app_settings.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = model_id
+
+
+def change_lcm_lora_base_model_id(model_id):
+    app_settings.settings.lcm_diffusion_setting.lcm_lora.base_model_id = model_id
+
+
+def change_openvino_lcm_model_id(model_id):
+    app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id = model_id
+
+
+def get_models_ui() -> None:
+    with gr.Blocks():
+        with gr.Row():
+            lcm_model_id = gr.Dropdown(
+                app_settings.lcm_models,
+                label="LCM model",
+                info="Diffusers LCM model ID",
+                value=get_valid_model_id(
+                    app_settings.lcm_models,
+                    app_settings.settings.lcm_diffusion_setting.lcm_model_id,
+                    LCM_DEFAULT_MODEL,
+                ),
+                interactive=True,
+            )
+        with gr.Row():
+            lcm_lora_model_id = gr.Dropdown(
+                app_settings.lcm_lora_models,
+                label="LCM LoRA model",
+                info="Diffusers LCM LoRA model ID",
+                value=get_valid_model_id(
+                    app_settings.lcm_lora_models,
+                    app_settings.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id,
+                ),
+                interactive=True,
+            )
+            lcm_lora_base_model_id = gr.Dropdown(
+                app_settings.stable_diffsuion_models,
+                label="LCM LoRA base model",
+                info="Diffusers LCM LoRA base model ID",
+                value=get_valid_model_id(
+                    app_settings.stable_diffsuion_models,
+                    app_settings.settings.lcm_diffusion_setting.lcm_lora.base_model_id,
+                ),
+                interactive=True,
+            )
+        with gr.Row():
+            lcm_openvino_model_id = gr.Dropdown(
+                app_settings.openvino_lcm_models,
+                label="LCM OpenVINO model",
+                info="OpenVINO LCM-LoRA fused model ID",
+                value=get_valid_model_id(
+                    app_settings.openvino_lcm_models,
+                    app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
+                ),
+                interactive=True,
+            )
+
+        lcm_model_id.change(change_lcm_model_id, lcm_model_id)
+        lcm_lora_model_id.change(change_lcm_lora_model_id, lcm_lora_model_id)
+        lcm_lora_base_model_id.change(
+            change_lcm_lora_base_model_id, lcm_lora_base_model_id
+        )
+        lcm_openvino_model_id.change(
+            change_openvino_lcm_model_id, lcm_openvino_model_id
+        )
diff --git a/testlcm/frontend/webui/realtime_ui.py b/testlcm/frontend/webui/realtime_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a1214ddacc288cd83faff33992c7913be990ee7
--- /dev/null
+++ b/testlcm/frontend/webui/realtime_ui.py
@@ -0,0 +1,144 @@
+import gradio as gr
+from backend.lcm_text_to_image import LCMTextToImage
+from backend.models.lcmdiffusion_setting import LCMLora, LCMDiffusionSetting
+from constants import DEVICE, LCM_DEFAULT_MODEL_OPENVINO
+from time import perf_counter
+import numpy as np
+from cv2 import imencode
+import base64
+from backend.device import get_device_name
+from constants import APP_VERSION
+from backend.device import is_openvino_device
+
+lcm_text_to_image = LCMTextToImage()
+lcm_lora = LCMLora(
+    base_model_id="Lykon/dreamshaper-8",
+    lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
+)
+
+
+# https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
+def encode_pil_to_base64_new(pil_image):
+    image_arr = np.asarray(pil_image)[:, :, ::-1]
+    _, byte_data = imencode(".png", image_arr)
+    base64_data = base64.b64encode(byte_data)
+    base64_string_opencv = base64_data.decode("utf-8")
+    return "data:image/png;base64," + base64_string_opencv
+
+
+# monkey patching encode pil
+gr.processing_utils.encode_pil_to_base64 = encode_pil_to_base64_new
+
+
+def predict(
+    prompt,
+    steps,
+    seed,
+):
+    lcm_diffusion_setting = LCMDiffusionSetting()
+    lcm_diffusion_setting.openvino_lcm_model_id = "rupeshs/LCM-dreamshaper-v7-openvino"
+    lcm_diffusion_setting.prompt = prompt
+    lcm_diffusion_setting.guidance_scale = 1.0
+    lcm_diffusion_setting.inference_steps = steps
+    lcm_diffusion_setting.seed = seed
+    lcm_diffusion_setting.use_seed = True
+    lcm_diffusion_setting.image_width = 256 if is_openvino_device() else 512
+    lcm_diffusion_setting.image_height = 256 if is_openvino_device() else 512
+    lcm_diffusion_setting.use_openvino = True if is_openvino_device() else False
+    lcm_text_to_image.init(
+        DEVICE,
+        lcm_diffusion_setting,
+    )
+    start = perf_counter()
+
+    images = lcm_text_to_image.generate(lcm_diffusion_setting)
+    latency = perf_counter() - start
+    print(f"Latency: {latency:.2f} seconds")
+    return images[0]
+
+
+css = """
+#container{
+    margin: 0 auto;
+    max-width: 40rem;
+}
+#intro{
+    max-width: 100%;
+    text-align: center;
+    margin: 0 auto;
+}
+#generate_button {
+    color: white;
+    border-color: #007bff;
+    background: #007bff;
+    width: 200px;
+    height: 50px;
+}
+footer {
+    visibility: hidden
+}
+"""
+
+
+def _get_footer_message() -> str:
+    version = f"<center><p> {APP_VERSION} "
+    footer_msg = version + (
+        '  © 2023 <a href="https://github.com/rupeshs">'
+        " Rupesh Sreeraman</a></p></center>"
+    )
+    return footer_msg
+
+
+with gr.Blocks(css=css) as demo:
+    with gr.Column(elem_id="container"):
+        use_openvino = "- OpenVINO" if is_openvino_device() else ""
+        gr.Markdown(
+            f"""# Realtime FastSD CPU {use_openvino}
+               **Device : {DEVICE} , {get_device_name()}**
+            """,
+            elem_id="intro",
+        )
+
+        with gr.Row():
+            with gr.Row():
+                prompt = gr.Textbox(
+                    placeholder="Describe the image you'd like to see",
+                    scale=5,
+                    container=False,
+                )
+                generate_btn = gr.Button(
+                    "Generate",
+                    scale=1,
+                    elem_id="generate_button",
+                )
+
+        image = gr.Image(type="filepath")
+        with gr.Accordion("Advanced options", open=False):
+            steps = gr.Slider(
+                label="Steps",
+                value=4 if is_openvino_device() else 3,
+                minimum=1,
+                maximum=6,
+                step=1,
+            )
+            seed = gr.Slider(
+                randomize=True,
+                minimum=0,
+                maximum=999999999,
+                label="Seed",
+                step=1,
+            )
+        gr.HTML(_get_footer_message())
+
+        inputs = [prompt, steps, seed]
+        prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)
+        generate_btn.click(
+            fn=predict, inputs=inputs, outputs=image, show_progress=False
+        )
+        steps.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
+        seed.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
+
+
+def start_realtime_text_to_image(share=False):
+    demo.queue()
+    demo.launch(share=share)
diff --git a/testlcm/frontend/webui/text_to_image_ui.py b/testlcm/frontend/webui/text_to_image_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d218b52e969aa0aed2e7fed2f8ef077259b27eb
--- /dev/null
+++ b/testlcm/frontend/webui/text_to_image_ui.py
@@ -0,0 +1,104 @@
+import gradio as gr
+from typing import Any
+from backend.models.lcmdiffusion_setting import DiffusionTask
+from context import Context
+from models.interface_types import InterfaceType
+from constants import DEVICE
+from state import get_settings
+from frontend.utils import is_reshape_required
+from concurrent.futures import ThreadPoolExecutor
+from pprint import pprint
+
+app_settings = get_settings()
+context = Context(InterfaceType.WEBUI)
+previous_width = 0
+previous_height = 0
+previous_model_id = ""
+previous_num_of_images = 0
+
+
+def generate_text_to_image(
+    prompt,
+    neg_prompt,
+) -> Any:
+    global previous_height, previous_width, previous_model_id, previous_num_of_images, app_settings
+    app_settings.settings.lcm_diffusion_setting.prompt = prompt
+    app_settings.settings.lcm_diffusion_setting.negative_prompt = neg_prompt
+    app_settings.settings.lcm_diffusion_setting.diffusion_task = (
+        DiffusionTask.text_to_image.value
+    )
+    model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
+    reshape = False
+    image_width = app_settings.settings.lcm_diffusion_setting.image_width
+    image_height = app_settings.settings.lcm_diffusion_setting.image_height
+    num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
+    if app_settings.settings.lcm_diffusion_setting.use_openvino:
+        reshape = is_reshape_required(
+            previous_width,
+            image_width,
+            previous_height,
+            image_height,
+            previous_model_id,
+            model_id,
+            previous_num_of_images,
+            num_images,
+        )
+
+    with ThreadPoolExecutor(max_workers=1) as executor:
+        future = executor.submit(
+            context.generate_text_to_image,
+            app_settings.settings,
+            reshape,
+            DEVICE,
+        )
+        images = future.result()
+    # images = context.generate_text_to_image(
+    #     app_settings.settings,
+    #     reshape,
+    #     DEVICE,
+    # )
+    previous_width = image_width
+    previous_height = image_height
+    previous_model_id = model_id
+    previous_num_of_images = num_images
+    return images
+
+
+def get_text_to_image_ui() -> None:
+    with gr.Blocks():
+        with gr.Row():
+            with gr.Column():
+                with gr.Row():
+                    prompt = gr.Textbox(
+                        show_label=False,
+                        lines=3,
+                        placeholder="A fantasy landscape",
+                        container=False,
+                    )
+
+                    generate_btn = gr.Button(
+                        "Generate",
+                        elem_id="generate_button",
+                        scale=0,
+                    )
+                negative_prompt = gr.Textbox(
+                    label="Negative prompt (Works in LCM-LoRA mode, set guidance > 1.0) :",
+                    lines=1,
+                    placeholder="",
+                )
+
+                input_params = [prompt, negative_prompt]
+
+            with gr.Column():
+                output = gr.Gallery(
+                    label="Generated images",
+                    show_label=True,
+                    elem_id="gallery",
+                    columns=2,
+                    height=512,
+                )
+    generate_btn.click(
+        fn=generate_text_to_image,
+        inputs=input_params,
+        outputs=output,
+    )
diff --git a/testlcm/frontend/webui/ui.py b/testlcm/frontend/webui/ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..76480e9dfc66efbc30f282f9ce20b881584c99bc
--- /dev/null
+++ b/testlcm/frontend/webui/ui.py
@@ -0,0 +1,70 @@
+import gradio as gr
+from constants import APP_VERSION
+from frontend.webui.text_to_image_ui import get_text_to_image_ui
+from frontend.webui.image_to_image_ui import get_image_to_image_ui
+from frontend.webui.generation_settings_ui import get_generation_settings_ui
+from frontend.webui.models_ui import get_models_ui
+from paths import FastStableDiffusionPaths
+from state import get_settings
+
+app_settings = get_settings()
+
+
+def _get_footer_message() -> str:
+    version = f"<center><p> {APP_VERSION} "
+    footer_msg = version + (
+        '  © 2023 <a href="https://github.com/rupeshs">'
+        " Rupesh Sreeraman</a></p></center>"
+    )
+    return footer_msg
+
+
+def get_web_ui() -> gr.Blocks:
+    def change_mode(mode):
+        global app_settings
+        app_settings.settings.lcm_diffusion_setting.use_lcm_lora = False
+        app_settings.settings.lcm_diffusion_setting.use_openvino = False
+        if mode == "LCM-LoRA":
+            app_settings.settings.lcm_diffusion_setting.use_lcm_lora = True
+        elif mode == "LCM-OpenVINO":
+            app_settings.settings.lcm_diffusion_setting.use_openvino = True
+
+    with gr.Blocks(
+        css=FastStableDiffusionPaths.get_css_path(),
+        title="FastSD CPU",
+    ) as fastsd_web_ui:
+        gr.HTML("<center><H1>FastSD CPU</H1></center>")
+        current_mode = "LCM"
+        if app_settings.settings.lcm_diffusion_setting.use_openvino:
+            current_mode = "LCM-OpenVINO"
+        elif app_settings.settings.lcm_diffusion_setting.use_lcm_lora:
+            current_mode = "LCM-LoRA"
+
+        mode = gr.Radio(
+            ["LCM", "LCM-LoRA", "LCM-OpenVINO"],
+            label="Mode",
+            info="Current working mode",
+            value=current_mode,
+        )
+        mode.change(change_mode, inputs=mode)
+
+        with gr.Tabs():
+            with gr.TabItem("Text to Image"):
+                get_text_to_image_ui()
+            with gr.TabItem("Image to Image"):
+                get_image_to_image_ui()
+            with gr.TabItem("Generation Settings"):
+                get_generation_settings_ui()
+            with gr.TabItem("Models"):
+                get_models_ui()
+
+        gr.HTML(_get_footer_message())
+
+    return fastsd_web_ui
+
+
+def start_webui(
+    share: bool = False,
+):
+    webui = get_web_ui()
+    webui.launch(share=share)
diff --git a/testlcm/image_ops.py b/testlcm/image_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..b60e911d37616bf29592b15fca9901f404a6e397
--- /dev/null
+++ b/testlcm/image_ops.py
@@ -0,0 +1,15 @@
+from PIL import Image
+
+
+def resize_pil_image(
+    pil_image: Image,
+    image_width,
+    image_height,
+):
+    return pil_image.convert("RGB").resize(
+        (
+            image_width,
+            image_height,
+        ),
+        Image.Resampling.LANCZOS,
+    )
diff --git a/testlcm/models/__pycache__/interface_types.cpython-310.pyc b/testlcm/models/__pycache__/interface_types.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e85589d162c88b34fd79cfb1b9397c0f469ab11e
Binary files /dev/null and b/testlcm/models/__pycache__/interface_types.cpython-310.pyc differ
diff --git a/testlcm/models/__pycache__/settings.cpython-310.pyc b/testlcm/models/__pycache__/settings.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8df531e9d2ec779e0f6e25c62b424bb0849e7e7b
Binary files /dev/null and b/testlcm/models/__pycache__/settings.cpython-310.pyc differ
diff --git a/testlcm/models/interface_types.py b/testlcm/models/interface_types.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcab144facabbf7342331129410183ea0b6075ee
--- /dev/null
+++ b/testlcm/models/interface_types.py
@@ -0,0 +1,7 @@
+from enum import Enum
+
+
+class InterfaceType(Enum):
+    WEBUI = "Web User Interface"
+    GUI = "Graphical User Interface"
+    CLI = "Command Line Interface"
diff --git a/testlcm/models/settings.py b/testlcm/models/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..f72dde5e7d27ff8b53e7d7497fdb05f702df23ea
--- /dev/null
+++ b/testlcm/models/settings.py
@@ -0,0 +1,8 @@
+from pydantic import BaseModel
+from backend.models.lcmdiffusion_setting import LCMDiffusionSetting, LCMLora
+from paths import FastStableDiffusionPaths
+
+
+class Settings(BaseModel):
+    results_path: str = FastStableDiffusionPaths().get_results_path()
+    lcm_diffusion_setting: LCMDiffusionSetting = LCMDiffusionSetting(lcm_lora=LCMLora())
diff --git a/testlcm/paths.py b/testlcm/paths.py
new file mode 100644
index 0000000000000000000000000000000000000000..2de5631faffd165999fcb75dc3302d89f6c52a1b
--- /dev/null
+++ b/testlcm/paths.py
@@ -0,0 +1,61 @@
+import os
+import constants
+
+
+def join_paths(
+    first_path: str,
+    second_path: str,
+) -> str:
+    return os.path.join(first_path, second_path)
+
+
+def get_app_path() -> str:
+    app_dir = os.path.dirname(__file__)
+    work_dir = os.path.dirname(app_dir)
+    return work_dir
+
+
+def get_configs_path() -> str:
+    config_path = join_paths(get_app_path(), constants.CONFIG_DIRECTORY)
+    return config_path
+
+
+class FastStableDiffusionPaths:
+    @staticmethod
+    def get_app_settings_path() -> str:
+        configs_path = get_configs_path()
+        settings_path = join_paths(
+            configs_path,
+            constants.APP_SETTINGS_FILE,
+        )
+        return settings_path
+
+    @staticmethod
+    def get_results_path() -> str:
+        results_path = join_paths(get_app_path(), constants.RESULTS_DIRECTORY)
+        return results_path
+
+    @staticmethod
+    def get_css_path() -> str:
+        app_dir = os.path.dirname(__file__)
+        css_path = os.path.join(
+            app_dir,
+            "frontend",
+            "webui",
+            "css",
+            "style.css",
+        )
+        return css_path
+
+    @staticmethod
+    def get_models_config_path(model_config_file: str) -> str:
+        configs_path = get_configs_path()
+        models_path = join_paths(
+            configs_path,
+            model_config_file,
+        )
+        return models_path
+
+
+def get_base_folder_name(path: str) -> str:
+    return os.path.basename(path)
diff --git a/testlcm/requirements.txt b/testlcm/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..114292faff44f7742b9dd5bb662435d155c4d06e
--- /dev/null
+++ b/testlcm/requirements.txt
@@ -0,0 +1,16 @@
+accelerate==0.23.0
+diffusers==0.23.0
+transformers==4.35.0
+PyQt5
+Pillow==9.4.0
+openvino==2023.2.0
+optimum==1.14.0
+optimum-intel==1.12.1
+onnx==1.15.0
+onnxruntime==1.16.1
+pydantic==2.4.2
+typing-extensions==4.8.0
+pyyaml==6.0.1
+gradio==3.39.0
+peft==0.6.1
+opencv-python==4.8.1.78
\ No newline at end of file
diff --git a/testlcm/state.py b/testlcm/state.py
new file mode 100644
index 0000000000000000000000000000000000000000..d357eb999e638ecd4b6694842ed00d66352ff5a2
--- /dev/null
+++ b/testlcm/state.py
@@ -0,0 +1,21 @@
+from app_settings import AppSettings
+from typing import Optional
+
+
+class _AppState:
+    _instance: Optional["_AppState"] = None
+    settings: Optional[AppSettings] = None
+
+
+def get_state() -> _AppState:
+    if _AppState._instance is None:
+        _AppState._instance = _AppState()
+    return _AppState._instance
+
+
+def get_settings(skip_file: bool = False) -> AppSettings:
+    state = get_state()
+    if state.settings is None:
+        state.settings = AppSettings()
+        state.settings.load(skip_file)
+    return state.settings
diff --git a/testlcm/utils.py b/testlcm/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f10f89c207833a7fdc86d11e0e4d0fffd402a739
--- /dev/null
+++ b/testlcm/utils.py
@@ -0,0 +1,21 @@
+import platform
+from typing import List
+
+
+def show_system_info():
+    try:
+        print(f"Running on {platform.system()} platform")
+        print(f"OS: {platform.platform()}")
+        print(f"Processor: {platform.processor()}")
+    except Exception as ex:
+        print(f"Error ocurred while getting system information {ex}")
+
+
+def get_models_from_text_file(file_path: str) -> List:
+    models = []
+    with open(file_path, "r") as file:
+        lines = file.readlines()
+    for repo_id in lines:
+        if repo_id.strip() != "":
+            models.append(repo_id.strip())
+    return models
diff --git a/upload.py b/upload.py
new file mode 100644
index 0000000000000000000000000000000000000000..08707fe4350e828df4e7e26eec5b2b9821140335
--- /dev/null
+++ b/upload.py
@@ -0,0 +1,8 @@
+from huggingface_hub import HfApi
+api = HfApi()
+
+api.upload_folder(
+    folder_path="/workspaces/fastsdcpu/src",
+    repo_id="michaelj/testlcm",
+    repo_type="space",
+)
\ No newline at end of file