Surn commited on
Commit
faf797f
·
1 Parent(s): 8737092

Trellis V1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +242 -184
  2. requirements.txt +34 -9
  3. trellis/__init__.py +6 -0
  4. trellis/__pycache__/__init__.cpython-312.pyc +0 -0
  5. trellis/models/__init__.py +70 -0
  6. trellis/models/__pycache__/__init__.cpython-312.pyc +0 -0
  7. trellis/models/__pycache__/sparse_structure_flow.cpython-312.pyc +0 -0
  8. trellis/models/__pycache__/sparse_structure_vae.cpython-312.pyc +0 -0
  9. trellis/models/__pycache__/structured_latent_flow.cpython-312.pyc +0 -0
  10. trellis/models/sparse_structure_flow.py +200 -0
  11. trellis/models/sparse_structure_vae.py +306 -0
  12. trellis/models/structured_latent_flow.py +262 -0
  13. trellis/models/structured_latent_vae/__init__.py +4 -0
  14. trellis/models/structured_latent_vae/__pycache__/__init__.cpython-312.pyc +0 -0
  15. trellis/models/structured_latent_vae/__pycache__/base.cpython-312.pyc +0 -0
  16. trellis/models/structured_latent_vae/__pycache__/decoder_gs.cpython-312.pyc +0 -0
  17. trellis/models/structured_latent_vae/__pycache__/decoder_mesh.cpython-312.pyc +0 -0
  18. trellis/models/structured_latent_vae/__pycache__/decoder_rf.cpython-312.pyc +0 -0
  19. trellis/models/structured_latent_vae/__pycache__/encoder.cpython-312.pyc +0 -0
  20. trellis/models/structured_latent_vae/base.py +117 -0
  21. trellis/models/structured_latent_vae/decoder_gs.py +122 -0
  22. trellis/models/structured_latent_vae/decoder_mesh.py +167 -0
  23. trellis/models/structured_latent_vae/decoder_rf.py +104 -0
  24. trellis/models/structured_latent_vae/encoder.py +72 -0
  25. trellis/modules/__pycache__/norm.cpython-312.pyc +0 -0
  26. trellis/modules/__pycache__/spatial.cpython-312.pyc +0 -0
  27. trellis/modules/__pycache__/utils.cpython-312.pyc +0 -0
  28. trellis/modules/attention/__init__.py +36 -0
  29. trellis/modules/attention/__pycache__/__init__.cpython-312.pyc +0 -0
  30. trellis/modules/attention/__pycache__/full_attn.cpython-312.pyc +0 -0
  31. trellis/modules/attention/__pycache__/modules.cpython-312.pyc +0 -0
  32. trellis/modules/attention/full_attn.py +140 -0
  33. trellis/modules/attention/modules.py +146 -0
  34. trellis/modules/norm.py +25 -0
  35. trellis/modules/sparse/__init__.py +102 -0
  36. trellis/modules/sparse/__pycache__/__init__.cpython-312.pyc +0 -0
  37. trellis/modules/sparse/__pycache__/basic.cpython-312.pyc +0 -0
  38. trellis/modules/sparse/__pycache__/linear.cpython-312.pyc +0 -0
  39. trellis/modules/sparse/__pycache__/nonlinearity.cpython-312.pyc +0 -0
  40. trellis/modules/sparse/__pycache__/norm.cpython-312.pyc +0 -0
  41. trellis/modules/sparse/__pycache__/spatial.cpython-312.pyc +0 -0
  42. trellis/modules/sparse/attention/__init__.py +4 -0
  43. trellis/modules/sparse/attention/__pycache__/__init__.cpython-312.pyc +0 -0
  44. trellis/modules/sparse/attention/__pycache__/full_attn.cpython-312.pyc +0 -0
  45. trellis/modules/sparse/attention/__pycache__/modules.cpython-312.pyc +0 -0
  46. trellis/modules/sparse/attention/__pycache__/serialized_attn.cpython-312.pyc +0 -0
  47. trellis/modules/sparse/attention/__pycache__/windowed_attn.cpython-312.pyc +0 -0
  48. trellis/modules/sparse/attention/full_attn.py +215 -0
  49. trellis/modules/sparse/attention/modules.py +139 -0
  50. trellis/modules/sparse/attention/serialized_attn.py +193 -0
app.py CHANGED
@@ -1,22 +1,25 @@
1
  import gradio as gr
2
  import os
3
 
4
- # Import constants
5
  import numpy as np
 
 
6
  import torch
7
- from typing import Optional, Union, List, Tuple
8
-
9
  from PIL import Image, ImageFilter
10
- import cv2
11
  import utils.constants as constants
12
-
13
  from haishoku.haishoku import Haishoku
14
 
15
  from tempfile import NamedTemporaryFile
16
  import atexit
17
  import random
18
  #import accelerate
19
- from transformers import AutoTokenizer , DPTImageProcessor, DPTForDepthEstimation
 
 
 
20
  from pathlib import Path
21
 
22
  import logging
@@ -33,7 +36,13 @@ from utils.color_utils import (
33
  detect_color_format,
34
  update_color_opacity,
35
  )
36
- from utils.misc import (get_filename, pause, convert_ratio_to_dimensions) #install_cuda_toolkit,install_torch, _get_output, setup_runtime_env)
 
 
 
 
 
 
37
 
38
  from utils.image_utils import (
39
  change_color,
@@ -95,10 +104,20 @@ import spaces
95
 
96
  input_image_palette = []
97
  current_prerendered_image = gr.State("./images/images/Beeuty-1.png")
 
98
 
99
  # Register the cleanup function
100
  atexit.register(cleanup_temp_files)
101
 
 
 
 
 
 
 
 
 
 
102
  def hex_create(hex_size, border_size, input_image_path, start_x, start_y, end_x, end_y, rotation, background_color_hex, background_opacity, border_color_hex, border_opacity, fill_hex, excluded_colors_var, filter_color, x_spacing, y_spacing, add_hex_text_option=None, custom_text_list=None, custom_text_color_list=None):
103
  global input_image_palette
104
 
@@ -504,8 +523,7 @@ def generate_ai_image_local (
504
 
505
 
506
  def generate_input_image_click(image_input, map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=(8/3), progress=gr.Progress(track_tqdm=True)):
507
- if randomize_seed:
508
- seed = random.randint(0, constants.MAX_SEED)
509
 
510
  # Get the model and LoRA weights
511
  model, lora_weights = get_model_and_lora(model_textbox_value)
@@ -600,175 +618,165 @@ def add_border(image, mask_width, mask_height, blank_color):
600
  return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
601
 
602
 
603
- ################################## DEPTH ESTIMATION ##################################
604
- #-------------- ------------------------------------------------MODEL INITIALIZATION------------------------------------------------------------#
605
- # Load models once during module import
606
- image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large",)
607
- depth_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large", ignore_mismatched_sizes=True)
608
 
609
- def create_3d_obj(rgb_image, raw_depth, image_path, depth=10, z_scale=200):
610
- """
611
- Creates a 3D object from RGB and depth images.
612
 
 
 
 
613
  Args:
614
- rgb_image (np.ndarray): The RGB image as a NumPy array.
615
- raw_depth (np.ndarray): The raw depth data.
616
- image_path (Path): The path to the original image.
617
- depth (int, optional): Depth parameter for Poisson reconstruction. Defaults to 10.
618
- z_scale (float, optional): Scaling factor for the Z-axis. Defaults to 200.
619
-
620
  Returns:
621
- str: The file path to the saved GLTF model.
622
  """
623
- import open3d as o3d
624
- # Normalize the depth image
625
- depth_image = ((raw_depth - raw_depth.min()) / (raw_depth.max() - raw_depth.min()) * 255).astype("uint8")
626
- depth_o3d = o3d.geometry.Image(depth_image)
627
- image_o3d = o3d.geometry.Image(rgb_image)
628
-
629
- # Create RGBD image
630
- rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
631
- image_o3d, depth_o3d, convert_rgb_to_intensity=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
632
  )
633
-
634
- height, width = depth_image.shape
635
-
636
- # Define camera intrinsics
637
- camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(
638
- width,
639
- height,
640
- fx=z_scale,
641
- fy=z_scale,
642
- cx=width / 2.0,
643
- cy=height / 2.0,
644
  )
645
 
646
- # Generate point cloud from RGBD image
647
- pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, camera_intrinsic)
 
648
 
649
- # Scale the Z dimension
650
- points = np.asarray(pcd.points)
651
- depth_scaled = ((raw_depth - raw_depth.min()) / (raw_depth.max() - raw_depth.min())) * (z_scale*100)
652
- z_values = depth_scaled.flatten()[:len(points)]
653
- points[:, 2] *= z_values
654
- pcd.points = o3d.utility.Vector3dVector(points)
 
 
 
 
 
655
 
656
- # Estimate and orient normals
657
- pcd.estimate_normals(
658
- search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=60)
659
- )
660
- pcd.orient_normals_towards_camera_location(camera_location=np.array([0.0, 0.0, 1.5 ]))
661
-
662
- # Apply transformations
663
- pcd.transform([[1, 0, 0, 0],
664
- [0, -1, 0, 0],
665
- [0, 0, -1, 0],
666
- [0, 0, 0, 1]])
667
- pcd.transform([[-1, 0, 0, 0],
668
- [0, 1, 0, 0],
669
- [0, 0, 1, 0],
670
- [0, 0, 0, 1]])
671
-
672
- # Perform Poisson surface reconstruction
673
- print(f"Running Poisson surface reconstruction with depth {depth}")
674
- mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
675
- pcd, depth=depth, width=0, scale=1.1, linear_fit=True
676
- )
677
- print(f"Raw mesh vertices: {len(mesh_raw.vertices)}, triangles: {len(mesh_raw.triangles)}")
678
 
679
- # Simplify the mesh using vertex clustering
680
- voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / (max(width, height) * 0.8)
681
- mesh = mesh_raw.simplify_vertex_clustering(
682
- voxel_size=voxel_size,
683
- contraction=o3d.geometry.SimplificationContraction.Average,
684
- )
685
- print(f"Simplified mesh vertices: {len(mesh.vertices)}, triangles: {len(mesh.triangles)}")
686
 
687
- # Crop the mesh to the bounding box of the point cloud
688
- bbox = pcd.get_axis_aligned_bounding_box()
689
- mesh_crop = mesh.crop(bbox)
690
 
691
- # Save the mesh as a GLTF file
692
- temp_dir = Path.cwd() / "models"
693
- temp_dir.mkdir(exist_ok=True)
694
- gltf_path = str(temp_dir / f"{image_path.stem}.gltf")
695
- o3d.io.write_triangle_mesh(gltf_path, mesh_crop, write_triangle_uvs=True)
696
- return gltf_path
697
 
698
- @spaces.GPU()
699
- def depth_process_image(image_path, resized_width=800, z_scale=208):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
700
  """
701
- Processes the input image to generate a depth map and a 3D mesh reconstruction.
702
 
703
  Args:
704
- image_path (str): The file path to the input image.
 
 
705
 
706
  Returns:
707
- list: A list containing the depth image, 3D mesh reconstruction, and GLTF file path.
708
  """
 
 
 
 
 
 
 
709
 
710
- image_path = Path(image_path)
711
- if not image_path.exists():
712
- raise ValueError("Image file not found")
713
-
714
- # Load and resize the image
715
- image_raw = Image.open(image_path).convert("RGB")
716
- print(f"Original size: {image_raw.size}")
717
- resized_height = int(resized_width * image_raw.size[1] / image_raw.size[0])
718
- image = image_raw.resize((resized_width, resized_height), Image.Resampling.LANCZOS)
719
- print(f"Resized size: {image.size}")
720
-
721
- # Prepare image for the model
722
- encoding = image_processor(image, return_tensors="pt")
723
-
724
- # Perform depth estimation
725
- with torch.no_grad():
726
- outputs = depth_model(**encoding)
727
- predicted_depth = outputs.predicted_depth
728
-
729
- # Interpolate depth to match the image size
730
- prediction = torch.nn.functional.interpolate(
731
- predicted_depth.unsqueeze(1),
732
- size=(image.height, image.width),
733
- mode="bicubic",
734
- align_corners=False,
735
- ).squeeze()
736
-
737
- # Normalize the depth image to 8-bit
738
- if torch.cuda.is_available():
739
- prediction = prediction.numpy()
740
- else:
741
- prediction = prediction.cpu().numpy()
742
- depth_min, depth_max = prediction.min(), prediction.max()
743
- depth_image = ((prediction - depth_min) / (depth_max - depth_min) * 255).astype("uint8")
744
-
745
- try:
746
- gltf_path = create_3d_obj(np.array(image), prediction, image_path, depth=10, z_scale=z_scale)
747
- except Exception:
748
- gltf_path = create_3d_obj(np.array(image), prediction, image_path, depth=8, z_scale=z_scale)
749
-
750
- img = Image.fromarray(depth_image)
751
-
752
- if torch.cuda.is_available():
753
- torch.cuda.empty_cache()
754
- torch.cuda.ipc_collect()
755
- return [img, gltf_path, gltf_path]
756
-
757
- def generate_depth_and_3d(input_image_path, resize_width=800, z_scale=1.0):
758
- return depth_process_image(input_image_path, resize_width, z_scale)
759
 
 
 
760
 
761
- def generate_depth_button_click(depth_image_source, resize_width, z_scale, input_image, output_image, overlay_image, bordered_image_output):
762
- if depth_image_source == "Input Image":
763
- image_path = input_image
764
- elif depth_image_source == "Output Image":
765
- image_path = output_image
766
- elif depth_image_source == "Image with Margins":
767
- image_path = bordered_image_output
768
- else:
769
- image_path = overlay_image
770
 
771
- return generate_depth_and_3d(image_path, resize_width, z_scale)
772
 
773
  @spaces.GPU()
774
  def getVersions():
@@ -831,7 +839,7 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
831
  Join the hive and start creating with HexaGrid Creator today!
832
 
833
  """, elem_classes="intro")
834
- with gr.Row():
835
  with gr.Column(scale=2):
836
  input_image = gr.Image(
837
  label="Input Image",
@@ -1072,35 +1080,38 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
1072
  with gr.Row():
1073
  bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True)
1074
 
1075
- with gr.Accordion("Height Maps and 3D", open = False):
1076
  with gr.Row():
1077
  with gr.Column():
1078
- resized_width_slider = gr.Slider(
1079
- minimum=256,
1080
- maximum=1760,
1081
- step=16,
1082
- value=800,
1083
- label="Resized Width",
1084
- info="Adjust the width to which the input image is resized."
1085
- )
1086
- z_scale_slider = gr.Slider(
1087
- minimum=0.2,
1088
- maximum=3.0,
1089
- step=0.01,
1090
- value=0.5,
1091
- label="Z-Scale",
1092
- info="Adjust the scaling factor for the Z-axis in the 3D model."
1093
- )
1094
  with gr.Column():
1095
- depth_image_source = gr.Radio(label="Depth Image Source", choices=["Input Image", "Output Image", "Overlay Image","Image with Margins"], value="Input Image")
 
 
 
 
 
 
1096
  with gr.Row():
1097
- generate_depth_button = gr.Button("Generate Depth Map and 3D Model From Selected Image", elem_classes="solid", variant="secondary")
 
1098
  with gr.Row():
1099
- with gr.Column(scale=1):
1100
- depth_map_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgDepth",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True)
1101
- with gr.Column(scale=2):
1102
- model_output = gr.Model3D(label="3D Model", clear_color=[1.0, 1.0, 1.0, 1.0], key="Img3D", elem_classes="centered solid imgcontainer",interactive=True)
 
 
 
 
 
 
 
1103
  model_file = gr.File(label="3D GLTF", elem_classes="solid small centered")
 
 
1104
  with gr.Row():
1105
  gr.Examples(examples=[
1106
  ["assets//examples//hex_map_p1.png", False, True, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 15],
@@ -1116,6 +1127,10 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
1116
  with gr.Row():
1117
  gr.HTML(value=getVersions(), visible=True, elem_id="versions")
1118
 
 
 
 
 
1119
  color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
1120
  color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
1121
 
@@ -1133,11 +1148,7 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
1133
  inputs=[input_image,map_options, prompt_textbox, negative_prompt_textbox, model_textbox, randomize_seed, seed_slider, gr.State(False), gr.State(0.5), image_size_ratio],
1134
  outputs=[input_image, seed_slider], scroll_to_output=True
1135
  )
1136
- generate_depth_button.click(
1137
- fn=generate_depth_button_click,
1138
- inputs=[depth_image_source, resized_width_slider, z_scale_slider, input_image, output_image, overlay_image, bordered_image_output],
1139
- outputs=[depth_map_output, model_output, model_file], scroll_to_output=True
1140
- )
1141
  model_textbox.change(
1142
  fn=update_prompt_notes,
1143
  inputs=model_textbox,
@@ -1202,6 +1213,43 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
1202
  outputs=[bordered_image_output],
1203
  scroll_to_output=True
1204
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1205
 
1206
  if __name__ == "__main__":
1207
  constants.load_env_vars(constants.dotenv_path)
@@ -1219,6 +1267,16 @@ if __name__ == "__main__":
1219
  # setup_runtime_env()
1220
  #main(os.getenv("DEBUG") == "1")
1221
  #main()
 
 
 
 
 
 
 
 
 
 
1222
  hexaGrid.queue(default_concurrency_limit=1,max_size=12,api_open=False)
1223
- hexaGrid.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered"], favicon_path="./assets/favicon.ico", max_file_size="10mb")
1224
 
 
1
  import gradio as gr
2
  import os
3
 
 
4
  import numpy as np
5
+ os.environ['SPCONV_ALGO'] = 'native'
6
+ from typing import *
7
  import torch
8
+ import imageio
9
+ import shutil
10
  from PIL import Image, ImageFilter
11
+ from easydict import EasyDict as edict
12
  import utils.constants as constants
 
13
  from haishoku.haishoku import Haishoku
14
 
15
  from tempfile import NamedTemporaryFile
16
  import atexit
17
  import random
18
  #import accelerate
19
+ from transformers import AutoTokenizer
20
+ from trellis.pipelines import TrellisImageTo3DPipeline
21
+ from trellis.representations import Gaussian, MeshExtractResult
22
+ from trellis.utils import render_utils, postprocessing_utils
23
  from pathlib import Path
24
 
25
  import logging
 
36
  detect_color_format,
37
  update_color_opacity,
38
  )
39
+ from utils.misc import (
40
+ get_filename,
41
+ pause,
42
+ convert_ratio_to_dimensions,
43
+ get_seed,
44
+ get_output_name
45
+ ) #install_cuda_toolkit,install_torch, _get_output, setup_runtime_env)
46
 
47
  from utils.image_utils import (
48
  change_color,
 
104
 
105
  input_image_palette = []
106
  current_prerendered_image = gr.State("./images/images/Beeuty-1.png")
107
+ user_dir = constants.TMPDIR
108
 
109
  # Register the cleanup function
110
  atexit.register(cleanup_temp_files)
111
 
112
+ def start_session(req: gr.Request):
113
+ user_dir = os.path.join(constants.TMPDIR, str(req.session_hash))
114
+ os.makedirs(user_dir, exist_ok=True)
115
+
116
+
117
+ def end_session(req: gr.Request):
118
+ user_dir = os.path.join(constants.TMPDIR, str(req.session_hash))
119
+ shutil.rmtree(user_dir)
120
+
121
  def hex_create(hex_size, border_size, input_image_path, start_x, start_y, end_x, end_y, rotation, background_color_hex, background_opacity, border_color_hex, border_opacity, fill_hex, excluded_colors_var, filter_color, x_spacing, y_spacing, add_hex_text_option=None, custom_text_list=None, custom_text_color_list=None):
122
  global input_image_palette
123
 
 
523
 
524
 
525
  def generate_input_image_click(image_input, map_option, prompt_textbox_value, negative_prompt_textbox_value, model_textbox_value, randomize_seed=True, seed=None, use_conditioned_image=False, strength=0.5, image_format="16:9", scale_factor=(8/3), progress=gr.Progress(track_tqdm=True)):
526
+ seed = get_seed(randomize_seed, seed)
 
527
 
528
  # Get the model and LoRA weights
529
  model, lora_weights = get_model_and_lora(model_textbox_value)
 
618
  return shrink_and_paste_on_blank(bordered_image_output, mask_width, mask_height, margin_color)
619
 
620
 
621
+ ####################################### DEPTH ESTIMATION #######################################
 
 
 
 
622
 
 
 
 
623
 
624
+ def preprocess_image(image: Image.Image) -> Image.Image:
625
+ """
626
+ Preprocess the input image.
627
  Args:
628
+ image (Image.Image): The input image.
 
 
 
 
 
629
  Returns:
630
+ Image.Image: The preprocessed image.
631
  """
632
+ processed_image = TRELLIS_PIPELINE.preprocess_image(image)
633
+ return processed_image
634
+
635
+
636
+ def pack_state(gs: Gaussian, mesh: MeshExtractResult, name: str) -> dict:
637
+ return {
638
+ 'gaussian': {
639
+ **gs.init_params,
640
+ '_xyz': gs._xyz.cpu().numpy(),
641
+ '_features_dc': gs._features_dc.cpu().numpy(),
642
+ '_scaling': gs._scaling.cpu().numpy(),
643
+ '_rotation': gs._rotation.cpu().numpy(),
644
+ '_opacity': gs._opacity.cpu().numpy(),
645
+ },
646
+ 'mesh': {
647
+ 'vertices': mesh.vertices.cpu().numpy(),
648
+ 'faces': mesh.faces.cpu().numpy(),
649
+ },
650
+ 'name': name
651
+ }
652
+
653
+
654
+ def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
655
+ gs = Gaussian(
656
+ aabb=state['gaussian']['aabb'],
657
+ sh_degree=state['gaussian']['sh_degree'],
658
+ mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
659
+ scaling_bias=state['gaussian']['scaling_bias'],
660
+ opacity_bias=state['gaussian']['opacity_bias'],
661
+ scaling_activation=state['gaussian']['scaling_activation'],
662
  )
663
+ gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
664
+ gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
665
+ gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
666
+ gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
667
+ gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
668
+
669
+ mesh = edict(
670
+ vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
671
+ faces=torch.tensor(state['mesh']['faces'], device='cuda'),
 
 
672
  )
673
 
674
+ name = state['name']
675
+
676
+ return gs, mesh, name
677
 
678
+ @spaces.GPU(duration=150,progress=gr.Progress(track_tqdm=True))
679
+ def generate_3d_asset(depth_image_source, randomize_seed, seed, input_image, output_image, overlay_image, bordered_image_output, req: gr.Request, progress=gr.Progress(track_tqdm=True)):
680
+ # Choose the image based on source
681
+ if depth_image_source == "Input Image":
682
+ image_path = input_image
683
+ elif depth_image_source == "Output Image":
684
+ image_path = output_image
685
+ elif depth_image_source == "Image with Margins":
686
+ image_path = bordered_image_output
687
+ else: # "Overlay Image"
688
+ image_path = overlay_image
689
 
690
+ output_name = get_output_name(input_image, output_image, overlay_image, bordered_image_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
691
 
692
+ # Ensure the file exists
693
+ if not Path(image_path).exists():
694
+ raise ValueError("Image file not found.")
 
 
 
 
695
 
696
+ # Determine the final seed using default MAX_SEED from constants
697
+ final_seed = np.random.randint(0, constants.MAX_SEED) if randomize_seed else seed
 
698
 
699
+ # Open image using standardized defaults
700
+ image_raw = Image.open(image_path).convert("RGB")
 
 
 
 
701
 
702
+ # Preprocess and run the Trellis pipeline with fixed sampler settings
703
+ # Returns:
704
+ # dict: The information of the generated 3D model.
705
+ # str: The path to the video of the 3D model.
706
+ processed_image = TRELLIS_PIPELINE.preprocess_image(image_raw, max_resolution=1536)
707
+ outputs = TRELLIS_PIPELINE.run(
708
+ processed_image,
709
+ seed=final_seed,
710
+ formats=["gaussian", "mesh"],
711
+ preprocess_image=False,
712
+ sparse_structure_sampler_params={
713
+ "steps": 12,
714
+ "cfg_strength": 7.5,
715
+ },
716
+ slat_sampler_params={
717
+ "steps": 12,
718
+ "cfg_strength": 3.0,
719
+ },
720
+ )
721
+ # Save the video to a temporary file
722
+ user_dir = os.path.join(constants.TMPDIR, str(req.session_hash))
723
+ os.makedirs(user_dir, exist_ok=True)
724
+
725
+ video = render_utils.render_video(outputs['gaussian'][0], resolution=576, num_frames=60, r=1)['color']
726
+ snapshot_results = render_utils.render_snapshot(outputs['gaussian'][0], resolution=576)
727
+ depth_snapshot = snapshot_results['depth'][0]
728
+ video_geo = render_utils.render_video(outputs['mesh'][0], resolution=576, num_frames=30, r=1)['normal']
729
+ video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
730
+ video_path = os.path.join(user_dir, f'{output_name}.mp4')
731
+ imageio.mimsave(video_path, video, fps=15)
732
+ state = pack_state(outputs['gaussian'][0], outputs['mesh'][0], output_name)
733
+ torch.cuda.empty_cache()
734
+ return [state, video_path, depth_snapshot]
735
+
736
+ @spaces.GPU(duration=90,progress=gr.Progress(track_tqdm=True))
737
+ def extract_glb(
738
+ state: dict,
739
+ mesh_simplify: float,
740
+ texture_size: int,
741
+ req: gr.Request,progress=gr.Progress(track_tqdm=True)
742
+ ) -> Tuple[str, str]:
743
  """
744
+ Extract a GLB file from the 3D model.
745
 
746
  Args:
747
+ state (dict): The state of the generated 3D model.
748
+ mesh_simplify (float): The mesh simplification factor.
749
+ texture_size (int): The texture resolution.
750
 
751
  Returns:
752
+ str: The path to the extracted GLB file.
753
  """
754
+ user_dir = os.path.join(constants.TMPDIR, str(req.session_hash))
755
+ gs, mesh, name = unpack_state(state)
756
+ glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
757
+ glb_path = os.path.join(user_dir, f'{name}.glb')
758
+ glb.export(glb_path)
759
+ torch.cuda.empty_cache()
760
+ return glb_path, glb_path
761
 
762
+ @spaces.GPU(progress=gr.Progress(track_tqdm=True))
763
+ def extract_gaussian(state: dict, req: gr.Request, progress=gr.Progress(track_tqdm=True)) -> Tuple[str, str]:
764
+ """
765
+ Extract a Gaussian file from the 3D model.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
766
 
767
+ Args:
768
+ state (dict): The state of the generated 3D model.
769
 
770
+ Returns:
771
+ str: The path to the extracted Gaussian file.
772
+ """
773
+ user_dir = os.path.join(constants.TMPDIR, str(req.session_hash))
774
+ gs, _, name = unpack_state(state)
775
+ gaussian_path = os.path.join(user_dir, f'{name}.ply')
776
+ gs.save_ply(gaussian_path)
777
+ torch.cuda.empty_cache()
778
+ return gaussian_path, gaussian_path
779
 
 
780
 
781
  @spaces.GPU()
782
  def getVersions():
 
839
  Join the hive and start creating with HexaGrid Creator today!
840
 
841
  """, elem_classes="intro")
842
+ with gr.Row():
843
  with gr.Column(scale=2):
844
  input_image = gr.Image(
845
  label="Input Image",
 
1080
  with gr.Row():
1081
  bordered_image_output = gr.Image(label="Image with Margins", image_mode="RGBA", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="ImgBordered",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True)
1082
 
1083
+ with gr.Accordion("Height Maps and 3D", open=False):
1084
  with gr.Row():
1085
  with gr.Column():
1086
+ # Use standard seed settings only
1087
+ seed_3d = gr.Slider(0, constants.MAX_SEED, label="Seed (3D Generation)", value=0, step=1)
1088
+ randomize_seed_3d = gr.Checkbox(label="Randomize Seed (3D Generation)", value=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
1089
  with gr.Column():
1090
+ depth_image_source = gr.Radio(
1091
+ label="Depth Image Source",
1092
+ choices=["Input Image", "Output Image", "Overlay Image", "Image with Margins"],
1093
+ value="Input Image"
1094
+ )
1095
+ with gr.Row():
1096
+ generate_3d_asset_button = gr.Button("Generate 3D Asset", elem_classes="solid", variant="secondary")
1097
  with gr.Row():
1098
+ # For display: video output and 3D model preview (GLTF)
1099
+ video_output = gr.Video(label="3D Asset Video", autoplay=True, loop=True, height=400)
1100
  with gr.Row():
1101
+ depth_output = gr.Image(label="Depth Map", image_mode="L", elem_classes="centered solid imgcontainer", format="PNG", type="filepath", key="DepthOutput",interactive=False, show_download_button=True, show_fullscreen_button=True, show_share_button=True)
1102
+ with gr.Accordion("GLB Extraction Settings", open=False):
1103
+ with gr.Row():
1104
+ mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
1105
+ texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
1106
+ with gr.Row():
1107
+ extract_glb_btn = gr.Button("Extract GLB", interactive=False)
1108
+ extract_gaussian_btn = gr.Button("Extract Gaussian", interactive=False)
1109
+ with gr.Row():
1110
+ model_output = gr.Model3D(label="Extracted 3D Model", clear_color=[1.0, 1.0, 1.0, 1.0],
1111
+ elem_classes="centered solid imgcontainer", interactive=True)
1112
  model_file = gr.File(label="3D GLTF", elem_classes="solid small centered")
1113
+ is_multiimage = gr.State(False)
1114
+ output_buf = gr.State()
1115
  with gr.Row():
1116
  gr.Examples(examples=[
1117
  ["assets//examples//hex_map_p1.png", False, True, -32,-31,80,80,-1.8,0,35,0,1,"#FFD0D0", 15],
 
1127
  with gr.Row():
1128
  gr.HTML(value=getVersions(), visible=True, elem_id="versions")
1129
 
1130
+ # Handlers
1131
+ hexaGrid.load(start_session)
1132
+ hexaGrid.unload(end_session)
1133
+
1134
  color_display.select(on_color_display_select,inputs=[color_display], outputs=[selected_row])
1135
  color_display.input(on_input,inputs=[color_display], outputs=[color_display, gr.State(excluded_color_list)])
1136
 
 
1148
  inputs=[input_image,map_options, prompt_textbox, negative_prompt_textbox, model_textbox, randomize_seed, seed_slider, gr.State(False), gr.State(0.5), image_size_ratio],
1149
  outputs=[input_image, seed_slider], scroll_to_output=True
1150
  )
1151
+
 
 
 
 
1152
  model_textbox.change(
1153
  fn=update_prompt_notes,
1154
  inputs=model_textbox,
 
1213
  outputs=[bordered_image_output],
1214
  scroll_to_output=True
1215
  )
1216
+ # 3D Generation
1217
+
1218
+ # generate_depth_button.click(
1219
+ # fn=generate_depth_button_click,
1220
+ # inputs=[depth_image_source, resized_width_slider, z_scale_slider, input_image, output_image, overlay_image, bordered_image_output],
1221
+ # outputs=[depth_map_output, model_output, model_file], scroll_to_output=True
1222
+ # )
1223
+
1224
+ # Chain the buttons
1225
+ generate_3d_asset_button.click(
1226
+ fn=generate_3d_asset,
1227
+ inputs=[depth_image_source, randomize_seed_3d, seed_3d, input_image, output_image, overlay_image, bordered_image_output],
1228
+ outputs=[output_buf, video_output, depth_output],
1229
+ scroll_to_output=True
1230
+ ).then(
1231
+ lambda: (gr.Button(interactive=True), gr.Button(interactive=True)),
1232
+ outputs=[extract_glb_btn, extract_gaussian_btn]
1233
+ )
1234
+
1235
+ # Extraction callbacks remain unchanged from previous behavior
1236
+ extract_glb_btn.click(
1237
+ fn=extract_glb,
1238
+ inputs=[output_buf, mesh_simplify, texture_size],
1239
+ outputs=[model_output, model_file]
1240
+ ).then(
1241
+ lambda: gr.Button(interactive=True),
1242
+ outputs=[model_file]
1243
+ )
1244
+
1245
+ extract_gaussian_btn.click(
1246
+ fn=extract_gaussian,
1247
+ inputs=[output_buf],
1248
+ outputs=[model_output, model_file]
1249
+ ).then(
1250
+ lambda: gr.Button(interactive=True),
1251
+ outputs=[model_file]
1252
+ )
1253
 
1254
  if __name__ == "__main__":
1255
  constants.load_env_vars(constants.dotenv_path)
 
1267
  # setup_runtime_env()
1268
  #main(os.getenv("DEBUG") == "1")
1269
  #main()
1270
+
1271
+
1272
+ #-------------- ------------------------------------------------MODEL INITIALIZATION------------------------------------------------------------#
1273
+ # Load models once during module import
1274
+ TRELLIS_PIPELINE = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
1275
+ TRELLIS_PIPELINE.cuda()
1276
+ try:
1277
+ TRELLIS_PIPELINE.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))) # Preload rembg
1278
+ except:
1279
+ pass
1280
  hexaGrid.queue(default_concurrency_limit=1,max_size=12,api_open=False)
1281
+ hexaGrid.launch(allowed_paths=["assets","/","./assets","images","./images", "./images/prerendered", 'e:/TMP'], favicon_path="./assets/favicon.ico", max_file_size="10mb")
1282
 
requirements.txt CHANGED
@@ -5,9 +5,9 @@ transformers
5
  accelerate
6
  safetensors
7
  sentencepiece
8
- invisible_watermark
9
 
10
- # Updated versions 2.4.0+cu124
11
  #--extra-index-url https://download.pytorch.org/whl/cu124
12
  #torch==2.6.0 --index-url https://download.pytorch.org/whl/cu124/torch-2.4.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=2cb28155635e3d3d0be198e3f3e7457a1d7b99e8c2eedc73fe22fab574d11a4c
13
  #torchvision==0.21.0 --index-url https://download.pytorch.org/whl/cu124/torchvision-0.19.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=82cf10450537aeb9584ceaf53633f177bb809d563c5d64526f4b9be7668b2769
@@ -17,7 +17,7 @@ invisible_watermark
17
 
18
  #generic Torch versions
19
  --extra-index-url https://download.pytorch.org/whl/cu124
20
- torch
21
  torchvision
22
  #xformers #==0.0.29.post3
23
 
@@ -26,16 +26,14 @@ Haishoku
26
  pybind11>=2.12
27
  huggingface_hub
28
  # git+https://github.com/huggingface/[email protected]#egg=transformers
 
29
  #gradio[oauth]
30
- Pillow>=11.1.0
31
- numpy
32
  requests
33
-
34
  peft
35
  opencv-python
36
  protobuf #==3.20.3
37
- safetensors
38
- sentencepiece
39
  git+https://github.com/asomoza/image_gen_aux.git
40
  #git+https://github.com/huggingface/optimum.git
41
  #git+https://github.com/triton-lang/triton.git #-not windows supported --disable in environment variable
@@ -49,4 +47,31 @@ pangocairocffi
49
  #tensorflow
50
  cairosvg
51
  python-dotenv
52
- #git+https://github.com/gradio-app/[email protected]#egg=gradio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  accelerate
6
  safetensors
7
  sentencepiece
8
+ #invisible_watermark
9
 
10
+ # Updated versions 2.6.0+cu124
11
  #--extra-index-url https://download.pytorch.org/whl/cu124
12
  #torch==2.6.0 --index-url https://download.pytorch.org/whl/cu124/torch-2.4.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=2cb28155635e3d3d0be198e3f3e7457a1d7b99e8c2eedc73fe22fab574d11a4c
13
  #torchvision==0.21.0 --index-url https://download.pytorch.org/whl/cu124/torchvision-0.19.0%2Bcu124-cp310-cp310-linux_x86_64.whl#sha256=82cf10450537aeb9584ceaf53633f177bb809d563c5d64526f4b9be7668b2769
 
17
 
18
  #generic Torch versions
19
  --extra-index-url https://download.pytorch.org/whl/cu124
20
+ torch==2.6.0
21
  torchvision
22
  #xformers #==0.0.29.post3
23
 
 
26
  pybind11>=2.12
27
  huggingface_hub
28
  # git+https://github.com/huggingface/[email protected]#egg=transformers
29
+ #git+https://github.com/gradio-app/[email protected]
30
  #gradio[oauth]
31
+ Pillow
32
+ numpy==1.26.4
33
  requests
 
34
  peft
35
  opencv-python
36
  protobuf #==3.20.3
 
 
37
  git+https://github.com/asomoza/image_gen_aux.git
38
  #git+https://github.com/huggingface/optimum.git
39
  #git+https://github.com/triton-lang/triton.git #-not windows supported --disable in environment variable
 
47
  #tensorflow
48
  cairosvg
49
  python-dotenv
50
+
51
+
52
+ #####3D for Trellis#####
53
+
54
+ imageio==2.36.1
55
+ imageio-ffmpeg==0.5.1
56
+ tqdm==4.67.1
57
+ easydict==1.13
58
+ opencv-python-headless==4.10.0.84
59
+ scipy==1.14.1
60
+ rembg==2.0.60
61
+ onnxruntime==1.20.1
62
+ trimesh==4.5.3
63
+ xatlas==0.0.9
64
+ pyvista==0.44.2
65
+ pymeshfix==0.17.0
66
+ igraph==0.11.8
67
+ git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8
68
+ spconv-cu124==2.3.8
69
+ gradio_litmodel3d==0.0.1
70
+ #linux only
71
+ #https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flash_attn-2.7.0.post2+cu12torch2.4cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
72
+ #https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
73
+ #https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl?download=true
74
+ #Windows only
75
+ #https://huggingface.co/spaces/Surn/HexaGrid/main/wheels/flash_attn-2.7.4.post1-cp312-cp312-win_amd64.whl?download=true
76
+ #https://huggingface.co/spaces/Surn/HexaGrid/main/wheels/diff_gaussian_rasterization-0.0.0-cp312-cp312-win_amd64.whl?download=true
77
+ #https://huggingface.co/spaces/Surn/HexaGrid/main/wheels/nvdiffrast-0.3.3-py3-none-any.whl?download=true
trellis/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import models
2
+ from . import modules
3
+ from . import pipelines
4
+ from . import renderers
5
+ from . import representations
6
+ from . import utils
trellis/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (343 Bytes). View file
 
trellis/models/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __attributes = {
4
+ 'SparseStructureEncoder': 'sparse_structure_vae',
5
+ 'SparseStructureDecoder': 'sparse_structure_vae',
6
+ 'SparseStructureFlowModel': 'sparse_structure_flow',
7
+ 'SLatEncoder': 'structured_latent_vae',
8
+ 'SLatGaussianDecoder': 'structured_latent_vae',
9
+ 'SLatRadianceFieldDecoder': 'structured_latent_vae',
10
+ 'SLatMeshDecoder': 'structured_latent_vae',
11
+ 'SLatFlowModel': 'structured_latent_flow',
12
+ }
13
+
14
+ __submodules = []
15
+
16
+ __all__ = list(__attributes.keys()) + __submodules
17
+
18
+ def __getattr__(name):
19
+ if name not in globals():
20
+ if name in __attributes:
21
+ module_name = __attributes[name]
22
+ module = importlib.import_module(f".{module_name}", __name__)
23
+ globals()[name] = getattr(module, name)
24
+ elif name in __submodules:
25
+ module = importlib.import_module(f".{name}", __name__)
26
+ globals()[name] = module
27
+ else:
28
+ raise AttributeError(f"module {__name__} has no attribute {name}")
29
+ return globals()[name]
30
+
31
+
32
+ def from_pretrained(path: str, **kwargs):
33
+ """
34
+ Load a model from a pretrained checkpoint.
35
+
36
+ Args:
37
+ path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
38
+ NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
39
+ **kwargs: Additional arguments for the model constructor.
40
+ """
41
+ import os
42
+ import json
43
+ from safetensors.torch import load_file
44
+ is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
45
+
46
+ if is_local:
47
+ config_file = f"{path}.json"
48
+ model_file = f"{path}.safetensors"
49
+ else:
50
+ from huggingface_hub import hf_hub_download
51
+ path_parts = path.split('/')
52
+ repo_id = f'{path_parts[0]}/{path_parts[1]}'
53
+ model_name = '/'.join(path_parts[2:])
54
+ config_file = hf_hub_download(repo_id, f"{model_name}.json")
55
+ model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
56
+
57
+ with open(config_file, 'r') as f:
58
+ config = json.load(f)
59
+ model = __getattr__(config['name'])(**config['args'], **kwargs)
60
+ model.load_state_dict(load_file(model_file))
61
+
62
+ return model
63
+
64
+
65
+ # For Pylance
66
+ if __name__ == '__main__':
67
+ from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
68
+ from .sparse_structure_flow import SparseStructureFlowModel
69
+ from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatRadianceFieldDecoder, SLatMeshDecoder
70
+ from .structured_latent_flow import SLatFlowModel
trellis/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (3.22 kB). View file
 
trellis/models/__pycache__/sparse_structure_flow.cpython-312.pyc ADDED
Binary file (12.1 kB). View file
 
trellis/models/__pycache__/sparse_structure_vae.cpython-312.pyc ADDED
Binary file (15.1 kB). View file
 
trellis/models/__pycache__/structured_latent_flow.cpython-312.pyc ADDED
Binary file (14.7 kB). View file
 
trellis/models/sparse_structure_flow.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
8
+ from ..modules.spatial import patchify, unpatchify
9
+
10
+
11
+ class TimestepEmbedder(nn.Module):
12
+ """
13
+ Embeds scalar timesteps into vector representations.
14
+ """
15
+ def __init__(self, hidden_size, frequency_embedding_size=256):
16
+ super().__init__()
17
+ self.mlp = nn.Sequential(
18
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
19
+ nn.SiLU(),
20
+ nn.Linear(hidden_size, hidden_size, bias=True),
21
+ )
22
+ self.frequency_embedding_size = frequency_embedding_size
23
+
24
+ @staticmethod
25
+ def timestep_embedding(t, dim, max_period=10000):
26
+ """
27
+ Create sinusoidal timestep embeddings.
28
+
29
+ Args:
30
+ t: a 1-D Tensor of N indices, one per batch element.
31
+ These may be fractional.
32
+ dim: the dimension of the output.
33
+ max_period: controls the minimum frequency of the embeddings.
34
+
35
+ Returns:
36
+ an (N, D) Tensor of positional embeddings.
37
+ """
38
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
39
+ half = dim // 2
40
+ freqs = torch.exp(
41
+ -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
42
+ ).to(device=t.device)
43
+ args = t[:, None].float() * freqs[None]
44
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
45
+ if dim % 2:
46
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
47
+ return embedding
48
+
49
+ def forward(self, t):
50
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
51
+ t_emb = self.mlp(t_freq)
52
+ return t_emb
53
+
54
+
55
+ class SparseStructureFlowModel(nn.Module):
56
+ def __init__(
57
+ self,
58
+ resolution: int,
59
+ in_channels: int,
60
+ model_channels: int,
61
+ cond_channels: int,
62
+ out_channels: int,
63
+ num_blocks: int,
64
+ num_heads: Optional[int] = None,
65
+ num_head_channels: Optional[int] = 64,
66
+ mlp_ratio: float = 4,
67
+ patch_size: int = 2,
68
+ pe_mode: Literal["ape", "rope"] = "ape",
69
+ use_fp16: bool = False,
70
+ use_checkpoint: bool = False,
71
+ share_mod: bool = False,
72
+ qk_rms_norm: bool = False,
73
+ qk_rms_norm_cross: bool = False,
74
+ ):
75
+ super().__init__()
76
+ self.resolution = resolution
77
+ self.in_channels = in_channels
78
+ self.model_channels = model_channels
79
+ self.cond_channels = cond_channels
80
+ self.out_channels = out_channels
81
+ self.num_blocks = num_blocks
82
+ self.num_heads = num_heads or model_channels // num_head_channels
83
+ self.mlp_ratio = mlp_ratio
84
+ self.patch_size = patch_size
85
+ self.pe_mode = pe_mode
86
+ self.use_fp16 = use_fp16
87
+ self.use_checkpoint = use_checkpoint
88
+ self.share_mod = share_mod
89
+ self.qk_rms_norm = qk_rms_norm
90
+ self.qk_rms_norm_cross = qk_rms_norm_cross
91
+ self.dtype = torch.float16 if use_fp16 else torch.float32
92
+
93
+ self.t_embedder = TimestepEmbedder(model_channels)
94
+ if share_mod:
95
+ self.adaLN_modulation = nn.Sequential(
96
+ nn.SiLU(),
97
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
98
+ )
99
+
100
+ if pe_mode == "ape":
101
+ pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
102
+ coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
103
+ coords = torch.stack(coords, dim=-1).reshape(-1, 3)
104
+ pos_emb = pos_embedder(coords)
105
+ self.register_buffer("pos_emb", pos_emb)
106
+
107
+ self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
108
+
109
+ self.blocks = nn.ModuleList([
110
+ ModulatedTransformerCrossBlock(
111
+ model_channels,
112
+ cond_channels,
113
+ num_heads=self.num_heads,
114
+ mlp_ratio=self.mlp_ratio,
115
+ attn_mode='full',
116
+ use_checkpoint=self.use_checkpoint,
117
+ use_rope=(pe_mode == "rope"),
118
+ share_mod=share_mod,
119
+ qk_rms_norm=self.qk_rms_norm,
120
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
121
+ )
122
+ for _ in range(num_blocks)
123
+ ])
124
+
125
+ self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
126
+
127
+ self.initialize_weights()
128
+ if use_fp16:
129
+ self.convert_to_fp16()
130
+
131
+ @property
132
+ def device(self) -> torch.device:
133
+ """
134
+ Return the device of the model.
135
+ """
136
+ return next(self.parameters()).device
137
+
138
+ def convert_to_fp16(self) -> None:
139
+ """
140
+ Convert the torso of the model to float16.
141
+ """
142
+ self.blocks.apply(convert_module_to_f16)
143
+
144
+ def convert_to_fp32(self) -> None:
145
+ """
146
+ Convert the torso of the model to float32.
147
+ """
148
+ self.blocks.apply(convert_module_to_f32)
149
+
150
+ def initialize_weights(self) -> None:
151
+ # Initialize transformer layers:
152
+ def _basic_init(module):
153
+ if isinstance(module, nn.Linear):
154
+ torch.nn.init.xavier_uniform_(module.weight)
155
+ if module.bias is not None:
156
+ nn.init.constant_(module.bias, 0)
157
+ self.apply(_basic_init)
158
+
159
+ # Initialize timestep embedding MLP:
160
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
161
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
162
+
163
+ # Zero-out adaLN modulation layers in DiT blocks:
164
+ if self.share_mod:
165
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
166
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
167
+ else:
168
+ for block in self.blocks:
169
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
170
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
171
+
172
+ # Zero-out output layers:
173
+ nn.init.constant_(self.out_layer.weight, 0)
174
+ nn.init.constant_(self.out_layer.bias, 0)
175
+
176
+ def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
177
+ assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
178
+ f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
179
+
180
+ h = patchify(x, self.patch_size)
181
+ h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
182
+
183
+ h = self.input_layer(h)
184
+ h = h + self.pos_emb[None]
185
+ t_emb = self.t_embedder(t)
186
+ if self.share_mod:
187
+ t_emb = self.adaLN_modulation(t_emb)
188
+ t_emb = t_emb.type(self.dtype)
189
+ h = h.type(self.dtype)
190
+ cond = cond.type(self.dtype)
191
+ for block in self.blocks:
192
+ h = block(h, t_emb, cond)
193
+ h = h.type(x.dtype)
194
+ h = F.layer_norm(h, h.shape[-1:])
195
+ h = self.out_layer(h)
196
+
197
+ h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
198
+ h = unpatchify(h, self.patch_size).contiguous()
199
+
200
+ return h
trellis/models/sparse_structure_vae.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ..modules.norm import GroupNorm32, ChannelLayerNorm32
6
+ from ..modules.spatial import pixel_shuffle_3d
7
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
8
+
9
+
10
+ def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
11
+ """
12
+ Return a normalization layer.
13
+ """
14
+ if norm_type == "group":
15
+ return GroupNorm32(32, *args, **kwargs)
16
+ elif norm_type == "layer":
17
+ return ChannelLayerNorm32(*args, **kwargs)
18
+ else:
19
+ raise ValueError(f"Invalid norm type {norm_type}")
20
+
21
+
22
+ class ResBlock3d(nn.Module):
23
+ def __init__(
24
+ self,
25
+ channels: int,
26
+ out_channels: Optional[int] = None,
27
+ norm_type: Literal["group", "layer"] = "layer",
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.out_channels = out_channels or channels
32
+
33
+ self.norm1 = norm_layer(norm_type, channels)
34
+ self.norm2 = norm_layer(norm_type, self.out_channels)
35
+ self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
36
+ self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
37
+ self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ h = self.norm1(x)
41
+ h = F.silu(h)
42
+ h = self.conv1(h)
43
+ h = self.norm2(h)
44
+ h = F.silu(h)
45
+ h = self.conv2(h)
46
+ h = h + self.skip_connection(x)
47
+ return h
48
+
49
+
50
+ class DownsampleBlock3d(nn.Module):
51
+ def __init__(
52
+ self,
53
+ in_channels: int,
54
+ out_channels: int,
55
+ mode: Literal["conv", "avgpool"] = "conv",
56
+ ):
57
+ assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
58
+
59
+ super().__init__()
60
+ self.in_channels = in_channels
61
+ self.out_channels = out_channels
62
+
63
+ if mode == "conv":
64
+ self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
65
+ elif mode == "avgpool":
66
+ assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
67
+
68
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
69
+ if hasattr(self, "conv"):
70
+ return self.conv(x)
71
+ else:
72
+ return F.avg_pool3d(x, 2)
73
+
74
+
75
+ class UpsampleBlock3d(nn.Module):
76
+ def __init__(
77
+ self,
78
+ in_channels: int,
79
+ out_channels: int,
80
+ mode: Literal["conv", "nearest"] = "conv",
81
+ ):
82
+ assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
83
+
84
+ super().__init__()
85
+ self.in_channels = in_channels
86
+ self.out_channels = out_channels
87
+
88
+ if mode == "conv":
89
+ self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
90
+ elif mode == "nearest":
91
+ assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ if hasattr(self, "conv"):
95
+ x = self.conv(x)
96
+ return pixel_shuffle_3d(x, 2)
97
+ else:
98
+ return F.interpolate(x, scale_factor=2, mode="nearest")
99
+
100
+
101
+ class SparseStructureEncoder(nn.Module):
102
+ """
103
+ Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
104
+
105
+ Args:
106
+ in_channels (int): Channels of the input.
107
+ latent_channels (int): Channels of the latent representation.
108
+ num_res_blocks (int): Number of residual blocks at each resolution.
109
+ channels (List[int]): Channels of the encoder blocks.
110
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
111
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
112
+ use_fp16 (bool): Whether to use FP16.
113
+ """
114
+ def __init__(
115
+ self,
116
+ in_channels: int,
117
+ latent_channels: int,
118
+ num_res_blocks: int,
119
+ channels: List[int],
120
+ num_res_blocks_middle: int = 2,
121
+ norm_type: Literal["group", "layer"] = "layer",
122
+ use_fp16: bool = False,
123
+ ):
124
+ super().__init__()
125
+ self.in_channels = in_channels
126
+ self.latent_channels = latent_channels
127
+ self.num_res_blocks = num_res_blocks
128
+ self.channels = channels
129
+ self.num_res_blocks_middle = num_res_blocks_middle
130
+ self.norm_type = norm_type
131
+ self.use_fp16 = use_fp16
132
+ self.dtype = torch.float16 if use_fp16 else torch.float32
133
+
134
+ self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
135
+
136
+ self.blocks = nn.ModuleList([])
137
+ for i, ch in enumerate(channels):
138
+ self.blocks.extend([
139
+ ResBlock3d(ch, ch)
140
+ for _ in range(num_res_blocks)
141
+ ])
142
+ if i < len(channels) - 1:
143
+ self.blocks.append(
144
+ DownsampleBlock3d(ch, channels[i+1])
145
+ )
146
+
147
+ self.middle_block = nn.Sequential(*[
148
+ ResBlock3d(channels[-1], channels[-1])
149
+ for _ in range(num_res_blocks_middle)
150
+ ])
151
+
152
+ self.out_layer = nn.Sequential(
153
+ norm_layer(norm_type, channels[-1]),
154
+ nn.SiLU(),
155
+ nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
156
+ )
157
+
158
+ if use_fp16:
159
+ self.convert_to_fp16()
160
+
161
+ @property
162
+ def device(self) -> torch.device:
163
+ """
164
+ Return the device of the model.
165
+ """
166
+ return next(self.parameters()).device
167
+
168
+ def convert_to_fp16(self) -> None:
169
+ """
170
+ Convert the torso of the model to float16.
171
+ """
172
+ self.use_fp16 = True
173
+ self.dtype = torch.float16
174
+ self.blocks.apply(convert_module_to_f16)
175
+ self.middle_block.apply(convert_module_to_f16)
176
+
177
+ def convert_to_fp32(self) -> None:
178
+ """
179
+ Convert the torso of the model to float32.
180
+ """
181
+ self.use_fp16 = False
182
+ self.dtype = torch.float32
183
+ self.blocks.apply(convert_module_to_f32)
184
+ self.middle_block.apply(convert_module_to_f32)
185
+
186
+ def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
187
+ h = self.input_layer(x)
188
+ h = h.type(self.dtype)
189
+
190
+ for block in self.blocks:
191
+ h = block(h)
192
+ h = self.middle_block(h)
193
+
194
+ h = h.type(x.dtype)
195
+ h = self.out_layer(h)
196
+
197
+ mean, logvar = h.chunk(2, dim=1)
198
+
199
+ if sample_posterior:
200
+ std = torch.exp(0.5 * logvar)
201
+ z = mean + std * torch.randn_like(std)
202
+ else:
203
+ z = mean
204
+
205
+ if return_raw:
206
+ return z, mean, logvar
207
+ return z
208
+
209
+
210
+ class SparseStructureDecoder(nn.Module):
211
+ """
212
+ Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
213
+
214
+ Args:
215
+ out_channels (int): Channels of the output.
216
+ latent_channels (int): Channels of the latent representation.
217
+ num_res_blocks (int): Number of residual blocks at each resolution.
218
+ channels (List[int]): Channels of the decoder blocks.
219
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
220
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
221
+ use_fp16 (bool): Whether to use FP16.
222
+ """
223
+ def __init__(
224
+ self,
225
+ out_channels: int,
226
+ latent_channels: int,
227
+ num_res_blocks: int,
228
+ channels: List[int],
229
+ num_res_blocks_middle: int = 2,
230
+ norm_type: Literal["group", "layer"] = "layer",
231
+ use_fp16: bool = False,
232
+ ):
233
+ super().__init__()
234
+ self.out_channels = out_channels
235
+ self.latent_channels = latent_channels
236
+ self.num_res_blocks = num_res_blocks
237
+ self.channels = channels
238
+ self.num_res_blocks_middle = num_res_blocks_middle
239
+ self.norm_type = norm_type
240
+ self.use_fp16 = use_fp16
241
+ self.dtype = torch.float16 if use_fp16 else torch.float32
242
+
243
+ self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
244
+
245
+ self.middle_block = nn.Sequential(*[
246
+ ResBlock3d(channels[0], channels[0])
247
+ for _ in range(num_res_blocks_middle)
248
+ ])
249
+
250
+ self.blocks = nn.ModuleList([])
251
+ for i, ch in enumerate(channels):
252
+ self.blocks.extend([
253
+ ResBlock3d(ch, ch)
254
+ for _ in range(num_res_blocks)
255
+ ])
256
+ if i < len(channels) - 1:
257
+ self.blocks.append(
258
+ UpsampleBlock3d(ch, channels[i+1])
259
+ )
260
+
261
+ self.out_layer = nn.Sequential(
262
+ norm_layer(norm_type, channels[-1]),
263
+ nn.SiLU(),
264
+ nn.Conv3d(channels[-1], out_channels, 3, padding=1)
265
+ )
266
+
267
+ if use_fp16:
268
+ self.convert_to_fp16()
269
+
270
+ @property
271
+ def device(self) -> torch.device:
272
+ """
273
+ Return the device of the model.
274
+ """
275
+ return next(self.parameters()).device
276
+
277
+ def convert_to_fp16(self) -> None:
278
+ """
279
+ Convert the torso of the model to float16.
280
+ """
281
+ self.use_fp16 = True
282
+ self.dtype = torch.float16
283
+ self.blocks.apply(convert_module_to_f16)
284
+ self.middle_block.apply(convert_module_to_f16)
285
+
286
+ def convert_to_fp32(self) -> None:
287
+ """
288
+ Convert the torso of the model to float32.
289
+ """
290
+ self.use_fp16 = False
291
+ self.dtype = torch.float32
292
+ self.blocks.apply(convert_module_to_f32)
293
+ self.middle_block.apply(convert_module_to_f32)
294
+
295
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
296
+ h = self.input_layer(x)
297
+
298
+ h = h.type(self.dtype)
299
+
300
+ h = self.middle_block(h)
301
+ for block in self.blocks:
302
+ h = block(h)
303
+
304
+ h = h.type(x.dtype)
305
+ h = self.out_layer(h)
306
+ return h
trellis/models/structured_latent_flow.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder
8
+ from ..modules.norm import LayerNorm32
9
+ from ..modules import sparse as sp
10
+ from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
11
+ from .sparse_structure_flow import TimestepEmbedder
12
+
13
+
14
+ class SparseResBlock3d(nn.Module):
15
+ def __init__(
16
+ self,
17
+ channels: int,
18
+ emb_channels: int,
19
+ out_channels: Optional[int] = None,
20
+ downsample: bool = False,
21
+ upsample: bool = False,
22
+ ):
23
+ super().__init__()
24
+ self.channels = channels
25
+ self.emb_channels = emb_channels
26
+ self.out_channels = out_channels or channels
27
+ self.downsample = downsample
28
+ self.upsample = upsample
29
+
30
+ assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
31
+
32
+ self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
33
+ self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
34
+ self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
35
+ self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
36
+ self.emb_layers = nn.Sequential(
37
+ nn.SiLU(),
38
+ nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
39
+ )
40
+ self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
41
+ self.updown = None
42
+ if self.downsample:
43
+ self.updown = sp.SparseDownsample(2)
44
+ elif self.upsample:
45
+ self.updown = sp.SparseUpsample(2)
46
+
47
+ def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
48
+ if self.updown is not None:
49
+ x = self.updown(x)
50
+ return x
51
+
52
+ def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
53
+ emb_out = self.emb_layers(emb).type(x.dtype)
54
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
55
+
56
+ x = self._updown(x)
57
+ h = x.replace(self.norm1(x.feats))
58
+ h = h.replace(F.silu(h.feats))
59
+ h = self.conv1(h)
60
+ h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
61
+ h = h.replace(F.silu(h.feats))
62
+ h = self.conv2(h)
63
+ h = h + self.skip_connection(x)
64
+
65
+ return h
66
+
67
+
68
+ class SLatFlowModel(nn.Module):
69
+ def __init__(
70
+ self,
71
+ resolution: int,
72
+ in_channels: int,
73
+ model_channels: int,
74
+ cond_channels: int,
75
+ out_channels: int,
76
+ num_blocks: int,
77
+ num_heads: Optional[int] = None,
78
+ num_head_channels: Optional[int] = 64,
79
+ mlp_ratio: float = 4,
80
+ patch_size: int = 2,
81
+ num_io_res_blocks: int = 2,
82
+ io_block_channels: List[int] = None,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = False,
85
+ use_checkpoint: bool = False,
86
+ use_skip_connection: bool = True,
87
+ share_mod: bool = False,
88
+ qk_rms_norm: bool = False,
89
+ qk_rms_norm_cross: bool = False,
90
+ ):
91
+ super().__init__()
92
+ self.resolution = resolution
93
+ self.in_channels = in_channels
94
+ self.model_channels = model_channels
95
+ self.cond_channels = cond_channels
96
+ self.out_channels = out_channels
97
+ self.num_blocks = num_blocks
98
+ self.num_heads = num_heads or model_channels // num_head_channels
99
+ self.mlp_ratio = mlp_ratio
100
+ self.patch_size = patch_size
101
+ self.num_io_res_blocks = num_io_res_blocks
102
+ self.io_block_channels = io_block_channels
103
+ self.pe_mode = pe_mode
104
+ self.use_fp16 = use_fp16
105
+ self.use_checkpoint = use_checkpoint
106
+ self.use_skip_connection = use_skip_connection
107
+ self.share_mod = share_mod
108
+ self.qk_rms_norm = qk_rms_norm
109
+ self.qk_rms_norm_cross = qk_rms_norm_cross
110
+ self.dtype = torch.float16 if use_fp16 else torch.float32
111
+
112
+ assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
113
+ assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
114
+
115
+ self.t_embedder = TimestepEmbedder(model_channels)
116
+ if share_mod:
117
+ self.adaLN_modulation = nn.Sequential(
118
+ nn.SiLU(),
119
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
120
+ )
121
+
122
+ if pe_mode == "ape":
123
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
124
+
125
+ self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
126
+ self.input_blocks = nn.ModuleList([])
127
+ for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
128
+ self.input_blocks.extend([
129
+ SparseResBlock3d(
130
+ chs,
131
+ model_channels,
132
+ out_channels=chs,
133
+ )
134
+ for _ in range(num_io_res_blocks-1)
135
+ ])
136
+ self.input_blocks.append(
137
+ SparseResBlock3d(
138
+ chs,
139
+ model_channels,
140
+ out_channels=next_chs,
141
+ downsample=True,
142
+ )
143
+ )
144
+
145
+ self.blocks = nn.ModuleList([
146
+ ModulatedSparseTransformerCrossBlock(
147
+ model_channels,
148
+ cond_channels,
149
+ num_heads=self.num_heads,
150
+ mlp_ratio=self.mlp_ratio,
151
+ attn_mode='full',
152
+ use_checkpoint=self.use_checkpoint,
153
+ use_rope=(pe_mode == "rope"),
154
+ share_mod=self.share_mod,
155
+ qk_rms_norm=self.qk_rms_norm,
156
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
157
+ )
158
+ for _ in range(num_blocks)
159
+ ])
160
+
161
+ self.out_blocks = nn.ModuleList([])
162
+ for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
163
+ self.out_blocks.append(
164
+ SparseResBlock3d(
165
+ prev_chs * 2 if self.use_skip_connection else prev_chs,
166
+ model_channels,
167
+ out_channels=chs,
168
+ upsample=True,
169
+ )
170
+ )
171
+ self.out_blocks.extend([
172
+ SparseResBlock3d(
173
+ chs * 2 if self.use_skip_connection else chs,
174
+ model_channels,
175
+ out_channels=chs,
176
+ )
177
+ for _ in range(num_io_res_blocks-1)
178
+ ])
179
+ self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
180
+
181
+ self.initialize_weights()
182
+ if use_fp16:
183
+ self.convert_to_fp16()
184
+
185
+ @property
186
+ def device(self) -> torch.device:
187
+ """
188
+ Return the device of the model.
189
+ """
190
+ return next(self.parameters()).device
191
+
192
+ def convert_to_fp16(self) -> None:
193
+ """
194
+ Convert the torso of the model to float16.
195
+ """
196
+ self.input_blocks.apply(convert_module_to_f16)
197
+ self.blocks.apply(convert_module_to_f16)
198
+ self.out_blocks.apply(convert_module_to_f16)
199
+
200
+ def convert_to_fp32(self) -> None:
201
+ """
202
+ Convert the torso of the model to float32.
203
+ """
204
+ self.input_blocks.apply(convert_module_to_f32)
205
+ self.blocks.apply(convert_module_to_f32)
206
+ self.out_blocks.apply(convert_module_to_f32)
207
+
208
+ def initialize_weights(self) -> None:
209
+ # Initialize transformer layers:
210
+ def _basic_init(module):
211
+ if isinstance(module, nn.Linear):
212
+ torch.nn.init.xavier_uniform_(module.weight)
213
+ if module.bias is not None:
214
+ nn.init.constant_(module.bias, 0)
215
+ self.apply(_basic_init)
216
+
217
+ # Initialize timestep embedding MLP:
218
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
219
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
220
+
221
+ # Zero-out adaLN modulation layers in DiT blocks:
222
+ if self.share_mod:
223
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
224
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
225
+ else:
226
+ for block in self.blocks:
227
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
228
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
229
+
230
+ # Zero-out output layers:
231
+ nn.init.constant_(self.out_layer.weight, 0)
232
+ nn.init.constant_(self.out_layer.bias, 0)
233
+
234
+ def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
235
+ h = self.input_layer(x).type(self.dtype)
236
+ t_emb = self.t_embedder(t)
237
+ if self.share_mod:
238
+ t_emb = self.adaLN_modulation(t_emb)
239
+ t_emb = t_emb.type(self.dtype)
240
+ cond = cond.type(self.dtype)
241
+
242
+ skips = []
243
+ # pack with input blocks
244
+ for block in self.input_blocks:
245
+ h = block(h, t_emb)
246
+ skips.append(h.feats)
247
+
248
+ if self.pe_mode == "ape":
249
+ h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
250
+ for block in self.blocks:
251
+ h = block(h, t_emb, cond)
252
+
253
+ # unpack with output blocks
254
+ for block, skip in zip(self.out_blocks, reversed(skips)):
255
+ if self.use_skip_connection:
256
+ h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
257
+ else:
258
+ h = block(h, t_emb)
259
+
260
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
261
+ h = self.out_layer(h.type(x.dtype))
262
+ return h
trellis/models/structured_latent_vae/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .encoder import SLatEncoder
2
+ from .decoder_gs import SLatGaussianDecoder
3
+ from .decoder_rf import SLatRadianceFieldDecoder
4
+ from .decoder_mesh import SLatMeshDecoder
trellis/models/structured_latent_vae/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (387 Bytes). View file
 
trellis/models/structured_latent_vae/__pycache__/base.cpython-312.pyc ADDED
Binary file (6.57 kB). View file
 
trellis/models/structured_latent_vae/__pycache__/decoder_gs.cpython-312.pyc ADDED
Binary file (7.94 kB). View file
 
trellis/models/structured_latent_vae/__pycache__/decoder_mesh.cpython-312.pyc ADDED
Binary file (8.24 kB). View file
 
trellis/models/structured_latent_vae/__pycache__/decoder_rf.cpython-312.pyc ADDED
Binary file (6.35 kB). View file
 
trellis/models/structured_latent_vae/__pycache__/encoder.cpython-312.pyc ADDED
Binary file (3.5 kB). View file
 
trellis/models/structured_latent_vae/base.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ...modules.utils import convert_module_to_f16, convert_module_to_f32
5
+ from ...modules import sparse as sp
6
+ from ...modules.transformer import AbsolutePositionEmbedder
7
+ from ...modules.sparse.transformer import SparseTransformerBlock
8
+
9
+
10
+ def block_attn_config(self):
11
+ """
12
+ Return the attention configuration of the model.
13
+ """
14
+ for i in range(self.num_blocks):
15
+ if self.attn_mode == "shift_window":
16
+ yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
17
+ elif self.attn_mode == "shift_sequence":
18
+ yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
19
+ elif self.attn_mode == "shift_order":
20
+ yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
21
+ elif self.attn_mode == "full":
22
+ yield "full", None, None, None, None
23
+ elif self.attn_mode == "swin":
24
+ yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
25
+
26
+
27
+ class SparseTransformerBase(nn.Module):
28
+ """
29
+ Sparse Transformer without output layers.
30
+ Serve as the base class for encoder and decoder.
31
+ """
32
+ def __init__(
33
+ self,
34
+ in_channels: int,
35
+ model_channels: int,
36
+ num_blocks: int,
37
+ num_heads: Optional[int] = None,
38
+ num_head_channels: Optional[int] = 64,
39
+ mlp_ratio: float = 4.0,
40
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
41
+ window_size: Optional[int] = None,
42
+ pe_mode: Literal["ape", "rope"] = "ape",
43
+ use_fp16: bool = False,
44
+ use_checkpoint: bool = False,
45
+ qk_rms_norm: bool = False,
46
+ ):
47
+ super().__init__()
48
+ self.in_channels = in_channels
49
+ self.model_channels = model_channels
50
+ self.num_blocks = num_blocks
51
+ self.window_size = window_size
52
+ self.num_heads = num_heads or model_channels // num_head_channels
53
+ self.mlp_ratio = mlp_ratio
54
+ self.attn_mode = attn_mode
55
+ self.pe_mode = pe_mode
56
+ self.use_fp16 = use_fp16
57
+ self.use_checkpoint = use_checkpoint
58
+ self.qk_rms_norm = qk_rms_norm
59
+ self.dtype = torch.float16 if use_fp16 else torch.float32
60
+
61
+ if pe_mode == "ape":
62
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
63
+
64
+ self.input_layer = sp.SparseLinear(in_channels, model_channels)
65
+ self.blocks = nn.ModuleList([
66
+ SparseTransformerBlock(
67
+ model_channels,
68
+ num_heads=self.num_heads,
69
+ mlp_ratio=self.mlp_ratio,
70
+ attn_mode=attn_mode,
71
+ window_size=window_size,
72
+ shift_sequence=shift_sequence,
73
+ shift_window=shift_window,
74
+ serialize_mode=serialize_mode,
75
+ use_checkpoint=self.use_checkpoint,
76
+ use_rope=(pe_mode == "rope"),
77
+ qk_rms_norm=self.qk_rms_norm,
78
+ )
79
+ for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
80
+ ])
81
+
82
+ @property
83
+ def device(self) -> torch.device:
84
+ """
85
+ Return the device of the model.
86
+ """
87
+ return next(self.parameters()).device
88
+
89
+ def convert_to_fp16(self) -> None:
90
+ """
91
+ Convert the torso of the model to float16.
92
+ """
93
+ self.blocks.apply(convert_module_to_f16)
94
+
95
+ def convert_to_fp32(self) -> None:
96
+ """
97
+ Convert the torso of the model to float32.
98
+ """
99
+ self.blocks.apply(convert_module_to_f32)
100
+
101
+ def initialize_weights(self) -> None:
102
+ # Initialize transformer layers:
103
+ def _basic_init(module):
104
+ if isinstance(module, nn.Linear):
105
+ torch.nn.init.xavier_uniform_(module.weight)
106
+ if module.bias is not None:
107
+ nn.init.constant_(module.bias, 0)
108
+ self.apply(_basic_init)
109
+
110
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
111
+ h = self.input_layer(x)
112
+ if self.pe_mode == "ape":
113
+ h = h + self.pos_embedder(x.coords[:, 1:])
114
+ h = h.type(self.dtype)
115
+ for block in self.blocks:
116
+ h = block(h)
117
+ return h
trellis/models/structured_latent_vae/decoder_gs.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from ...utils.random_utils import hammersley_sequence
7
+ from .base import SparseTransformerBase
8
+ from ...representations import Gaussian
9
+
10
+
11
+ class SLatGaussianDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+ self._build_perturbation()
48
+
49
+ self.initialize_weights()
50
+ if use_fp16:
51
+ self.convert_to_fp16()
52
+
53
+ def initialize_weights(self) -> None:
54
+ super().initialize_weights()
55
+ # Zero-out output layers:
56
+ nn.init.constant_(self.out_layer.weight, 0)
57
+ nn.init.constant_(self.out_layer.bias, 0)
58
+
59
+ def _build_perturbation(self) -> None:
60
+ perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])]
61
+ perturbation = torch.tensor(perturbation).float() * 2 - 1
62
+ perturbation = perturbation / self.rep_config['voxel_size']
63
+ perturbation = torch.atanh(perturbation).to(self.device)
64
+ self.register_buffer('offset_perturbation', perturbation)
65
+
66
+ def _calc_layout(self) -> None:
67
+ self.layout = {
68
+ '_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
69
+ '_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3},
70
+ '_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
71
+ '_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4},
72
+ '_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']},
73
+ }
74
+ start = 0
75
+ for k, v in self.layout.items():
76
+ v['range'] = (start, start + v['size'])
77
+ start += v['size']
78
+ self.out_channels = start
79
+
80
+ def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]:
81
+ """
82
+ Convert a batch of network outputs to 3D representations.
83
+
84
+ Args:
85
+ x: The [N x * x C] sparse tensor output by the network.
86
+
87
+ Returns:
88
+ list of representations
89
+ """
90
+ ret = []
91
+ for i in range(x.shape[0]):
92
+ representation = Gaussian(
93
+ sh_degree=0,
94
+ aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0],
95
+ mininum_kernel_size = self.rep_config['3d_filter_kernel_size'],
96
+ scaling_bias = self.rep_config['scaling_bias'],
97
+ opacity_bias = self.rep_config['opacity_bias'],
98
+ scaling_activation = self.rep_config['scaling_activation']
99
+ )
100
+ xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
101
+ for k, v in self.layout.items():
102
+ if k == '_xyz':
103
+ offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])
104
+ offset = offset * self.rep_config['lr'][k]
105
+ if self.rep_config['perturb_offset']:
106
+ offset = offset + self.offset_perturbation
107
+ offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size']
108
+ _xyz = xyz.unsqueeze(1) + offset
109
+ setattr(representation, k, _xyz.flatten(0, 1))
110
+ else:
111
+ feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1)
112
+ feats = feats * self.rep_config['lr'][k]
113
+ setattr(representation, k, feats)
114
+ ret.append(representation)
115
+ return ret
116
+
117
+ def forward(self, x: sp.SparseTensor) -> List[Gaussian]:
118
+ h = super().forward(x)
119
+ h = h.type(x.dtype)
120
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
121
+ h = self.out_layer(h)
122
+ return self.to_representation(h)
trellis/models/structured_latent_vae/decoder_mesh.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ...modules import sparse as sp
8
+ from .base import SparseTransformerBase
9
+ from ...representations import MeshExtractResult
10
+ from ...representations.mesh import SparseFeatures2Mesh
11
+
12
+
13
+ class SparseSubdivideBlock3d(nn.Module):
14
+ """
15
+ A 3D subdivide block that can subdivide the sparse tensor.
16
+
17
+ Args:
18
+ channels: channels in the inputs and outputs.
19
+ out_channels: if specified, the number of output channels.
20
+ num_groups: the number of groups for the group norm.
21
+ """
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ resolution: int,
26
+ out_channels: Optional[int] = None,
27
+ num_groups: int = 32
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.resolution = resolution
32
+ self.out_resolution = resolution * 2
33
+ self.out_channels = out_channels or channels
34
+
35
+ self.act_layers = nn.Sequential(
36
+ sp.SparseGroupNorm32(num_groups, channels),
37
+ sp.SparseSiLU()
38
+ )
39
+
40
+ self.sub = sp.SparseSubdivide()
41
+
42
+ self.out_layers = nn.Sequential(
43
+ sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
44
+ sp.SparseGroupNorm32(num_groups, self.out_channels),
45
+ sp.SparseSiLU(),
46
+ zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
47
+ )
48
+
49
+ if self.out_channels == channels:
50
+ self.skip_connection = nn.Identity()
51
+ else:
52
+ self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
53
+
54
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
55
+ """
56
+ Apply the block to a Tensor, conditioned on a timestep embedding.
57
+
58
+ Args:
59
+ x: an [N x C x ...] Tensor of features.
60
+ Returns:
61
+ an [N x C x ...] Tensor of outputs.
62
+ """
63
+ h = self.act_layers(x)
64
+ h = self.sub(h)
65
+ x = self.sub(x)
66
+ h = self.out_layers(h)
67
+ h = h + self.skip_connection(x)
68
+ return h
69
+
70
+
71
+ class SLatMeshDecoder(SparseTransformerBase):
72
+ def __init__(
73
+ self,
74
+ resolution: int,
75
+ model_channels: int,
76
+ latent_channels: int,
77
+ num_blocks: int,
78
+ num_heads: Optional[int] = None,
79
+ num_head_channels: Optional[int] = 64,
80
+ mlp_ratio: float = 4,
81
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
82
+ window_size: int = 8,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = False,
85
+ use_checkpoint: bool = False,
86
+ qk_rms_norm: bool = False,
87
+ representation_config: dict = None,
88
+ ):
89
+ super().__init__(
90
+ in_channels=latent_channels,
91
+ model_channels=model_channels,
92
+ num_blocks=num_blocks,
93
+ num_heads=num_heads,
94
+ num_head_channels=num_head_channels,
95
+ mlp_ratio=mlp_ratio,
96
+ attn_mode=attn_mode,
97
+ window_size=window_size,
98
+ pe_mode=pe_mode,
99
+ use_fp16=use_fp16,
100
+ use_checkpoint=use_checkpoint,
101
+ qk_rms_norm=qk_rms_norm,
102
+ )
103
+ self.resolution = resolution
104
+ self.rep_config = representation_config
105
+ self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
106
+ self.out_channels = self.mesh_extractor.feats_channels
107
+ self.upsample = nn.ModuleList([
108
+ SparseSubdivideBlock3d(
109
+ channels=model_channels,
110
+ resolution=resolution,
111
+ out_channels=model_channels // 4
112
+ ),
113
+ SparseSubdivideBlock3d(
114
+ channels=model_channels // 4,
115
+ resolution=resolution * 2,
116
+ out_channels=model_channels // 8
117
+ )
118
+ ])
119
+ self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
120
+
121
+ self.initialize_weights()
122
+ if use_fp16:
123
+ self.convert_to_fp16()
124
+
125
+ def initialize_weights(self) -> None:
126
+ super().initialize_weights()
127
+ # Zero-out output layers:
128
+ nn.init.constant_(self.out_layer.weight, 0)
129
+ nn.init.constant_(self.out_layer.bias, 0)
130
+
131
+ def convert_to_fp16(self) -> None:
132
+ """
133
+ Convert the torso of the model to float16.
134
+ """
135
+ super().convert_to_fp16()
136
+ self.upsample.apply(convert_module_to_f16)
137
+
138
+ def convert_to_fp32(self) -> None:
139
+ """
140
+ Convert the torso of the model to float32.
141
+ """
142
+ super().convert_to_fp32()
143
+ self.upsample.apply(convert_module_to_f32)
144
+
145
+ def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
146
+ """
147
+ Convert a batch of network outputs to 3D representations.
148
+
149
+ Args:
150
+ x: The [N x * x C] sparse tensor output by the network.
151
+
152
+ Returns:
153
+ list of representations
154
+ """
155
+ ret = []
156
+ for i in range(x.shape[0]):
157
+ mesh = self.mesh_extractor(x[i], training=self.training)
158
+ ret.append(mesh)
159
+ return ret
160
+
161
+ def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
162
+ h = super().forward(x)
163
+ for block in self.upsample:
164
+ h = block(h)
165
+ h = h.type(x.dtype)
166
+ h = self.out_layer(h)
167
+ return self.to_representation(h)
trellis/models/structured_latent_vae/decoder_rf.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules import sparse as sp
7
+ from .base import SparseTransformerBase
8
+ from ...representations import Strivec
9
+
10
+
11
+ class SLatRadianceFieldDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+
48
+ self.initialize_weights()
49
+ if use_fp16:
50
+ self.convert_to_fp16()
51
+
52
+ def initialize_weights(self) -> None:
53
+ super().initialize_weights()
54
+ # Zero-out output layers:
55
+ nn.init.constant_(self.out_layer.weight, 0)
56
+ nn.init.constant_(self.out_layer.bias, 0)
57
+
58
+ def _calc_layout(self) -> None:
59
+ self.layout = {
60
+ 'trivec': {'shape': (self.rep_config['rank'], 3, self.rep_config['dim']), 'size': self.rep_config['rank'] * 3 * self.rep_config['dim']},
61
+ 'density': {'shape': (self.rep_config['rank'],), 'size': self.rep_config['rank']},
62
+ 'features_dc': {'shape': (self.rep_config['rank'], 1, 3), 'size': self.rep_config['rank'] * 3},
63
+ }
64
+ start = 0
65
+ for k, v in self.layout.items():
66
+ v['range'] = (start, start + v['size'])
67
+ start += v['size']
68
+ self.out_channels = start
69
+
70
+ def to_representation(self, x: sp.SparseTensor) -> List[Strivec]:
71
+ """
72
+ Convert a batch of network outputs to 3D representations.
73
+
74
+ Args:
75
+ x: The [N x * x C] sparse tensor output by the network.
76
+
77
+ Returns:
78
+ list of representations
79
+ """
80
+ ret = []
81
+ for i in range(x.shape[0]):
82
+ representation = Strivec(
83
+ sh_degree=0,
84
+ resolution=self.resolution,
85
+ aabb=[-0.5, -0.5, -0.5, 1, 1, 1],
86
+ rank=self.rep_config['rank'],
87
+ dim=self.rep_config['dim'],
88
+ device='cuda',
89
+ )
90
+ representation.density_shift = 0.0
91
+ representation.position = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
92
+ representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(self.resolution)), dtype=torch.uint8, device='cuda')
93
+ for k, v in self.layout.items():
94
+ setattr(representation, k, x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']))
95
+ representation.trivec = representation.trivec + 1
96
+ ret.append(representation)
97
+ return ret
98
+
99
+ def forward(self, x: sp.SparseTensor) -> List[Strivec]:
100
+ h = super().forward(x)
101
+ h = h.type(x.dtype)
102
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
103
+ h = self.out_layer(h)
104
+ return self.to_representation(h)
trellis/models/structured_latent_vae/encoder.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from .base import SparseTransformerBase
7
+
8
+
9
+ class SLatEncoder(SparseTransformerBase):
10
+ def __init__(
11
+ self,
12
+ resolution: int,
13
+ in_channels: int,
14
+ model_channels: int,
15
+ latent_channels: int,
16
+ num_blocks: int,
17
+ num_heads: Optional[int] = None,
18
+ num_head_channels: Optional[int] = 64,
19
+ mlp_ratio: float = 4,
20
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
21
+ window_size: int = 8,
22
+ pe_mode: Literal["ape", "rope"] = "ape",
23
+ use_fp16: bool = False,
24
+ use_checkpoint: bool = False,
25
+ qk_rms_norm: bool = False,
26
+ ):
27
+ super().__init__(
28
+ in_channels=in_channels,
29
+ model_channels=model_channels,
30
+ num_blocks=num_blocks,
31
+ num_heads=num_heads,
32
+ num_head_channels=num_head_channels,
33
+ mlp_ratio=mlp_ratio,
34
+ attn_mode=attn_mode,
35
+ window_size=window_size,
36
+ pe_mode=pe_mode,
37
+ use_fp16=use_fp16,
38
+ use_checkpoint=use_checkpoint,
39
+ qk_rms_norm=qk_rms_norm,
40
+ )
41
+ self.resolution = resolution
42
+ self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
43
+
44
+ self.initialize_weights()
45
+ if use_fp16:
46
+ self.convert_to_fp16()
47
+
48
+ def initialize_weights(self) -> None:
49
+ super().initialize_weights()
50
+ # Zero-out output layers:
51
+ nn.init.constant_(self.out_layer.weight, 0)
52
+ nn.init.constant_(self.out_layer.bias, 0)
53
+
54
+ def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
55
+ h = super().forward(x)
56
+ h = h.type(x.dtype)
57
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
58
+ h = self.out_layer(h)
59
+
60
+ # Sample from the posterior distribution
61
+ mean, logvar = h.feats.chunk(2, dim=-1)
62
+ if sample_posterior:
63
+ std = torch.exp(0.5 * logvar)
64
+ z = mean + std * torch.randn_like(std)
65
+ else:
66
+ z = mean
67
+ z = h.replace(z)
68
+
69
+ if return_raw:
70
+ return z, mean, logvar
71
+ else:
72
+ return z
trellis/modules/__pycache__/norm.cpython-312.pyc ADDED
Binary file (2.29 kB). View file
 
trellis/modules/__pycache__/spatial.cpython-312.pyc ADDED
Binary file (3.85 kB). View file
 
trellis/modules/__pycache__/utils.cpython-312.pyc ADDED
Binary file (2.42 kB). View file
 
trellis/modules/attention/__init__.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'flash_attn'
4
+ DEBUG = False
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global BACKEND
10
+ global DEBUG
11
+
12
+ env_attn_backend = os.environ.get('ATTN_BACKEND')
13
+ env_sttn_debug = os.environ.get('ATTN_DEBUG')
14
+
15
+ if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']:
16
+ BACKEND = env_attn_backend
17
+ if env_sttn_debug is not None:
18
+ DEBUG = env_sttn_debug == '1'
19
+
20
+ print(f"[ATTENTION] Using backend: {BACKEND}")
21
+
22
+
23
+ __from_env()
24
+
25
+
26
+ def set_backend(backend: Literal['xformers', 'flash_attn']):
27
+ global BACKEND
28
+ BACKEND = backend
29
+
30
+ def set_debug(debug: bool):
31
+ global DEBUG
32
+ DEBUG = debug
33
+
34
+
35
+ from .full_attn import *
36
+ from .modules import *
trellis/modules/attention/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (1.17 kB). View file
 
trellis/modules/attention/__pycache__/full_attn.cpython-312.pyc ADDED
Binary file (7.38 kB). View file
 
trellis/modules/attention/__pycache__/modules.cpython-312.pyc ADDED
Binary file (10.5 kB). View file
 
trellis/modules/attention/full_attn.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from . import DEBUG, BACKEND
5
+
6
+ if BACKEND == 'xformers':
7
+ import xformers.ops as xops
8
+ elif BACKEND == 'flash_attn':
9
+ import flash_attn
10
+ elif BACKEND == 'sdpa':
11
+ from torch.nn.functional import scaled_dot_product_attention as sdpa
12
+ elif BACKEND == 'naive':
13
+ pass
14
+ else:
15
+ raise ValueError(f"Unknown attention backend: {BACKEND}")
16
+
17
+
18
+ __all__ = [
19
+ 'scaled_dot_product_attention',
20
+ ]
21
+
22
+
23
+ def _naive_sdpa(q, k, v):
24
+ """
25
+ Naive implementation of scaled dot product attention.
26
+ """
27
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
28
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
29
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
30
+ scale_factor = 1 / math.sqrt(q.size(-1))
31
+ attn_weight = q @ k.transpose(-2, -1) * scale_factor
32
+ attn_weight = torch.softmax(attn_weight, dim=-1)
33
+ out = attn_weight @ v
34
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
35
+ return out
36
+
37
+
38
+ @overload
39
+ def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor:
40
+ """
41
+ Apply scaled dot product attention.
42
+
43
+ Args:
44
+ qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs.
45
+ """
46
+ ...
47
+
48
+ @overload
49
+ def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor:
50
+ """
51
+ Apply scaled dot product attention.
52
+
53
+ Args:
54
+ q (torch.Tensor): A [N, L, H, C] tensor containing Qs.
55
+ kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs.
56
+ """
57
+ ...
58
+
59
+ @overload
60
+ def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
61
+ """
62
+ Apply scaled dot product attention.
63
+
64
+ Args:
65
+ q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs.
66
+ k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks.
67
+ v (torch.Tensor): A [N, L, H, Co] tensor containing Vs.
68
+
69
+ Note:
70
+ k and v are assumed to have the same coordinate map.
71
+ """
72
+ ...
73
+
74
+ def scaled_dot_product_attention(*args, **kwargs):
75
+ arg_names_dict = {
76
+ 1: ['qkv'],
77
+ 2: ['q', 'kv'],
78
+ 3: ['q', 'k', 'v']
79
+ }
80
+ num_all_args = len(args) + len(kwargs)
81
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
82
+ for key in arg_names_dict[num_all_args][len(args):]:
83
+ assert key in kwargs, f"Missing argument {key}"
84
+
85
+ if num_all_args == 1:
86
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
87
+ assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]"
88
+ device = qkv.device
89
+
90
+ elif num_all_args == 2:
91
+ q = args[0] if len(args) > 0 else kwargs['q']
92
+ kv = args[1] if len(args) > 1 else kwargs['kv']
93
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
94
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
95
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
96
+ device = q.device
97
+
98
+ elif num_all_args == 3:
99
+ q = args[0] if len(args) > 0 else kwargs['q']
100
+ k = args[1] if len(args) > 1 else kwargs['k']
101
+ v = args[2] if len(args) > 2 else kwargs['v']
102
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
103
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
104
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
105
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
106
+ device = q.device
107
+
108
+ if BACKEND == 'xformers':
109
+ if num_all_args == 1:
110
+ q, k, v = qkv.unbind(dim=2)
111
+ elif num_all_args == 2:
112
+ k, v = kv.unbind(dim=2)
113
+ out = xops.memory_efficient_attention(q, k, v)
114
+ elif BACKEND == 'flash_attn':
115
+ if num_all_args == 1:
116
+ out = flash_attn.flash_attn_qkvpacked_func(qkv)
117
+ elif num_all_args == 2:
118
+ out = flash_attn.flash_attn_kvpacked_func(q, kv)
119
+ elif num_all_args == 3:
120
+ out = flash_attn.flash_attn_func(q, k, v)
121
+ elif BACKEND == 'sdpa':
122
+ if num_all_args == 1:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ elif num_all_args == 2:
125
+ k, v = kv.unbind(dim=2)
126
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
127
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
128
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
129
+ out = sdpa(q, k, v) # [N, H, L, C]
130
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
131
+ elif BACKEND == 'naive':
132
+ if num_all_args == 1:
133
+ q, k, v = qkv.unbind(dim=2)
134
+ elif num_all_args == 2:
135
+ k, v = kv.unbind(dim=2)
136
+ out = _naive_sdpa(q, k, v)
137
+ else:
138
+ raise ValueError(f"Unknown attention module: {BACKEND}")
139
+
140
+ return out
trellis/modules/attention/modules.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .full_attn import scaled_dot_product_attention
6
+
7
+
8
+ class MultiHeadRMSNorm(nn.Module):
9
+ def __init__(self, dim: int, heads: int):
10
+ super().__init__()
11
+ self.scale = dim ** 0.5
12
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
13
+
14
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
15
+ return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype)
16
+
17
+
18
+ class RotaryPositionEmbedder(nn.Module):
19
+ def __init__(self, hidden_size: int, in_channels: int = 3):
20
+ super().__init__()
21
+ assert hidden_size % 2 == 0, "Hidden size must be divisible by 2"
22
+ self.hidden_size = hidden_size
23
+ self.in_channels = in_channels
24
+ self.freq_dim = hidden_size // in_channels // 2
25
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
26
+ self.freqs = 1.0 / (10000 ** self.freqs)
27
+
28
+ def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
29
+ self.freqs = self.freqs.to(indices.device)
30
+ phases = torch.outer(indices, self.freqs)
31
+ phases = torch.polar(torch.ones_like(phases), phases)
32
+ return phases
33
+
34
+ def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
35
+ x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
36
+ x_rotated = x_complex * phases
37
+ x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
38
+ return x_embed
39
+
40
+ def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
41
+ """
42
+ Args:
43
+ q (sp.SparseTensor): [..., N, D] tensor of queries
44
+ k (sp.SparseTensor): [..., N, D] tensor of keys
45
+ indices (torch.Tensor): [..., N, C] tensor of spatial positions
46
+ """
47
+ if indices is None:
48
+ indices = torch.arange(q.shape[-2], device=q.device)
49
+ if len(q.shape) > 2:
50
+ indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,))
51
+
52
+ phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1)
53
+ if phases.shape[1] < self.hidden_size // 2:
54
+ phases = torch.cat([phases, torch.polar(
55
+ torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device),
56
+ torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device)
57
+ )], dim=-1)
58
+ q_embed = self._rotary_embedding(q, phases)
59
+ k_embed = self._rotary_embedding(k, phases)
60
+ return q_embed, k_embed
61
+
62
+
63
+ class MultiHeadAttention(nn.Module):
64
+ def __init__(
65
+ self,
66
+ channels: int,
67
+ num_heads: int,
68
+ ctx_channels: Optional[int]=None,
69
+ type: Literal["self", "cross"] = "self",
70
+ attn_mode: Literal["full", "windowed"] = "full",
71
+ window_size: Optional[int] = None,
72
+ shift_window: Optional[Tuple[int, int, int]] = None,
73
+ qkv_bias: bool = True,
74
+ use_rope: bool = False,
75
+ qk_rms_norm: bool = False,
76
+ ):
77
+ super().__init__()
78
+ assert channels % num_heads == 0
79
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
80
+ assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
81
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
82
+
83
+ if attn_mode == "windowed":
84
+ raise NotImplementedError("Windowed attention is not yet implemented")
85
+
86
+ self.channels = channels
87
+ self.head_dim = channels // num_heads
88
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
89
+ self.num_heads = num_heads
90
+ self._type = type
91
+ self.attn_mode = attn_mode
92
+ self.window_size = window_size
93
+ self.shift_window = shift_window
94
+ self.use_rope = use_rope
95
+ self.qk_rms_norm = qk_rms_norm
96
+
97
+ if self._type == "self":
98
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
99
+ else:
100
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
101
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
102
+
103
+ if self.qk_rms_norm:
104
+ self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
105
+ self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
106
+
107
+ self.to_out = nn.Linear(channels, channels)
108
+
109
+ if use_rope:
110
+ self.rope = RotaryPositionEmbedder(channels)
111
+
112
+ def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
113
+ B, L, C = x.shape
114
+ if self._type == "self":
115
+ qkv = self.to_qkv(x)
116
+ qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
117
+ if self.use_rope:
118
+ q, k, v = qkv.unbind(dim=2)
119
+ q, k = self.rope(q, k, indices)
120
+ qkv = torch.stack([q, k, v], dim=2)
121
+ if self.attn_mode == "full":
122
+ if self.qk_rms_norm:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ q = self.q_rms_norm(q)
125
+ k = self.k_rms_norm(k)
126
+ h = scaled_dot_product_attention(q, k, v)
127
+ else:
128
+ h = scaled_dot_product_attention(qkv)
129
+ elif self.attn_mode == "windowed":
130
+ raise NotImplementedError("Windowed attention is not yet implemented")
131
+ else:
132
+ Lkv = context.shape[1]
133
+ q = self.to_q(x)
134
+ kv = self.to_kv(context)
135
+ q = q.reshape(B, L, self.num_heads, -1)
136
+ kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
137
+ if self.qk_rms_norm:
138
+ q = self.q_rms_norm(q)
139
+ k, v = kv.unbind(dim=2)
140
+ k = self.k_rms_norm(k)
141
+ h = scaled_dot_product_attention(q, k, v)
142
+ else:
143
+ h = scaled_dot_product_attention(q, kv)
144
+ h = h.reshape(B, L, -1)
145
+ h = self.to_out(h)
146
+ return h
trellis/modules/norm.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class LayerNorm32(nn.LayerNorm):
6
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
7
+ return super().forward(x.float()).type(x.dtype)
8
+
9
+
10
+ class GroupNorm32(nn.GroupNorm):
11
+ """
12
+ A GroupNorm layer that converts to float32 before the forward pass.
13
+ """
14
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
15
+ return super().forward(x.float()).type(x.dtype)
16
+
17
+
18
+ class ChannelLayerNorm32(LayerNorm32):
19
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
20
+ DIM = x.dim()
21
+ x = x.permute(0, *range(2, DIM), 1).contiguous()
22
+ x = super().forward(x)
23
+ x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous()
24
+ return x
25
+
trellis/modules/sparse/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'spconv'
4
+ DEBUG = False
5
+ ATTN = 'flash_attn'
6
+
7
+ def __from_env():
8
+ import os
9
+
10
+ global BACKEND
11
+ global DEBUG
12
+ global ATTN
13
+
14
+ env_sparse_backend = os.environ.get('SPARSE_BACKEND')
15
+ env_sparse_debug = os.environ.get('SPARSE_DEBUG')
16
+ env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND')
17
+ if env_sparse_attn is None:
18
+ env_sparse_attn = os.environ.get('ATTN_BACKEND')
19
+
20
+ if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']:
21
+ BACKEND = env_sparse_backend
22
+ if env_sparse_debug is not None:
23
+ DEBUG = env_sparse_debug == '1'
24
+ if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']:
25
+ ATTN = env_sparse_attn
26
+
27
+ print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}")
28
+
29
+
30
+ __from_env()
31
+
32
+
33
+ def set_backend(backend: Literal['spconv', 'torchsparse']):
34
+ global BACKEND
35
+ BACKEND = backend
36
+
37
+ def set_debug(debug: bool):
38
+ global DEBUG
39
+ DEBUG = debug
40
+
41
+ def set_attn(attn: Literal['xformers', 'flash_attn']):
42
+ global ATTN
43
+ ATTN = attn
44
+
45
+
46
+ import importlib
47
+
48
+ __attributes = {
49
+ 'SparseTensor': 'basic',
50
+ 'sparse_batch_broadcast': 'basic',
51
+ 'sparse_batch_op': 'basic',
52
+ 'sparse_cat': 'basic',
53
+ 'sparse_unbind': 'basic',
54
+ 'SparseGroupNorm': 'norm',
55
+ 'SparseLayerNorm': 'norm',
56
+ 'SparseGroupNorm32': 'norm',
57
+ 'SparseLayerNorm32': 'norm',
58
+ 'SparseReLU': 'nonlinearity',
59
+ 'SparseSiLU': 'nonlinearity',
60
+ 'SparseGELU': 'nonlinearity',
61
+ 'SparseActivation': 'nonlinearity',
62
+ 'SparseLinear': 'linear',
63
+ 'sparse_scaled_dot_product_attention': 'attention',
64
+ 'SerializeMode': 'attention',
65
+ 'sparse_serialized_scaled_dot_product_self_attention': 'attention',
66
+ 'sparse_windowed_scaled_dot_product_self_attention': 'attention',
67
+ 'SparseMultiHeadAttention': 'attention',
68
+ 'SparseConv3d': 'conv',
69
+ 'SparseInverseConv3d': 'conv',
70
+ 'SparseDownsample': 'spatial',
71
+ 'SparseUpsample': 'spatial',
72
+ 'SparseSubdivide' : 'spatial'
73
+ }
74
+
75
+ __submodules = ['transformer']
76
+
77
+ __all__ = list(__attributes.keys()) + __submodules
78
+
79
+ def __getattr__(name):
80
+ if name not in globals():
81
+ if name in __attributes:
82
+ module_name = __attributes[name]
83
+ module = importlib.import_module(f".{module_name}", __name__)
84
+ globals()[name] = getattr(module, name)
85
+ elif name in __submodules:
86
+ module = importlib.import_module(f".{name}", __name__)
87
+ globals()[name] = module
88
+ else:
89
+ raise AttributeError(f"module {__name__} has no attribute {name}")
90
+ return globals()[name]
91
+
92
+
93
+ # For Pylance
94
+ if __name__ == '__main__':
95
+ from .basic import *
96
+ from .norm import *
97
+ from .nonlinearity import *
98
+ from .linear import *
99
+ from .attention import *
100
+ from .conv import *
101
+ from .spatial import *
102
+ import transformer
trellis/modules/sparse/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (3.57 kB). View file
 
trellis/modules/sparse/__pycache__/basic.cpython-312.pyc ADDED
Binary file (27.8 kB). View file
 
trellis/modules/sparse/__pycache__/linear.cpython-312.pyc ADDED
Binary file (1.09 kB). View file
 
trellis/modules/sparse/__pycache__/nonlinearity.cpython-312.pyc ADDED
Binary file (2.4 kB). View file
 
trellis/modules/sparse/__pycache__/norm.cpython-312.pyc ADDED
Binary file (4.48 kB). View file
 
trellis/modules/sparse/__pycache__/spatial.cpython-312.pyc ADDED
Binary file (8.59 kB). View file
 
trellis/modules/sparse/attention/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .full_attn import *
2
+ from .serialized_attn import *
3
+ from .windowed_attn import *
4
+ from .modules import *
trellis/modules/sparse/attention/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (280 Bytes). View file
 
trellis/modules/sparse/attention/__pycache__/full_attn.cpython-312.pyc ADDED
Binary file (13.7 kB). View file
 
trellis/modules/sparse/attention/__pycache__/modules.cpython-312.pyc ADDED
Binary file (9.64 kB). View file
 
trellis/modules/sparse/attention/__pycache__/serialized_attn.cpython-312.pyc ADDED
Binary file (10.5 kB). View file
 
trellis/modules/sparse/attention/__pycache__/windowed_attn.cpython-312.pyc ADDED
Binary file (8.53 kB). View file
 
trellis/modules/sparse/attention/full_attn.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ from .. import SparseTensor
4
+ from .. import DEBUG, ATTN
5
+
6
+ if ATTN == 'xformers':
7
+ import xformers.ops as xops
8
+ elif ATTN == 'flash_attn':
9
+ import flash_attn
10
+ else:
11
+ raise ValueError(f"Unknown attention module: {ATTN}")
12
+
13
+
14
+ __all__ = [
15
+ 'sparse_scaled_dot_product_attention',
16
+ ]
17
+
18
+
19
+ @overload
20
+ def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor:
21
+ """
22
+ Apply scaled dot product attention to a sparse tensor.
23
+
24
+ Args:
25
+ qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
26
+ """
27
+ ...
28
+
29
+ @overload
30
+ def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor:
31
+ """
32
+ Apply scaled dot product attention to a sparse tensor.
33
+
34
+ Args:
35
+ q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs.
36
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs.
37
+ """
38
+ ...
39
+
40
+ @overload
41
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor:
42
+ """
43
+ Apply scaled dot product attention to a sparse tensor.
44
+
45
+ Args:
46
+ q (SparseTensor): A [N, L, H, C] dense tensor containing Qs.
47
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs.
48
+ """
49
+ ...
50
+
51
+ @overload
52
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor:
53
+ """
54
+ Apply scaled dot product attention to a sparse tensor.
55
+
56
+ Args:
57
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
58
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
59
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
60
+
61
+ Note:
62
+ k and v are assumed to have the same coordinate map.
63
+ """
64
+ ...
65
+
66
+ @overload
67
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor:
68
+ """
69
+ Apply scaled dot product attention to a sparse tensor.
70
+
71
+ Args:
72
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
73
+ k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks.
74
+ v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs.
75
+ """
76
+ ...
77
+
78
+ @overload
79
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor:
80
+ """
81
+ Apply scaled dot product attention to a sparse tensor.
82
+
83
+ Args:
84
+ q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs.
85
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
86
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
87
+ """
88
+ ...
89
+
90
+ def sparse_scaled_dot_product_attention(*args, **kwargs):
91
+ arg_names_dict = {
92
+ 1: ['qkv'],
93
+ 2: ['q', 'kv'],
94
+ 3: ['q', 'k', 'v']
95
+ }
96
+ num_all_args = len(args) + len(kwargs)
97
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
98
+ for key in arg_names_dict[num_all_args][len(args):]:
99
+ assert key in kwargs, f"Missing argument {key}"
100
+
101
+ if num_all_args == 1:
102
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
103
+ assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}"
104
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
105
+ device = qkv.device
106
+
107
+ s = qkv
108
+ q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
109
+ kv_seqlen = q_seqlen
110
+ qkv = qkv.feats # [T, 3, H, C]
111
+
112
+ elif num_all_args == 2:
113
+ q = args[0] if len(args) > 0 else kwargs['q']
114
+ kv = args[1] if len(args) > 1 else kwargs['kv']
115
+ assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \
116
+ isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \
117
+ f"Invalid types, got {type(q)} and {type(kv)}"
118
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
119
+ device = q.device
120
+
121
+ if isinstance(q, SparseTensor):
122
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]"
123
+ s = q
124
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
125
+ q = q.feats # [T_Q, H, C]
126
+ else:
127
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
128
+ s = None
129
+ N, L, H, C = q.shape
130
+ q_seqlen = [L] * N
131
+ q = q.reshape(N * L, H, C) # [T_Q, H, C]
132
+
133
+ if isinstance(kv, SparseTensor):
134
+ assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]"
135
+ kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
136
+ kv = kv.feats # [T_KV, 2, H, C]
137
+ else:
138
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
139
+ N, L, _, H, C = kv.shape
140
+ kv_seqlen = [L] * N
141
+ kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
142
+
143
+ elif num_all_args == 3:
144
+ q = args[0] if len(args) > 0 else kwargs['q']
145
+ k = args[1] if len(args) > 1 else kwargs['k']
146
+ v = args[2] if len(args) > 2 else kwargs['v']
147
+ assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \
148
+ isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \
149
+ f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}"
150
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
151
+ device = q.device
152
+
153
+ if isinstance(q, SparseTensor):
154
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]"
155
+ s = q
156
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
157
+ q = q.feats # [T_Q, H, Ci]
158
+ else:
159
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
160
+ s = None
161
+ N, L, H, CI = q.shape
162
+ q_seqlen = [L] * N
163
+ q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
164
+
165
+ if isinstance(k, SparseTensor):
166
+ assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]"
167
+ assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]"
168
+ kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
169
+ k = k.feats # [T_KV, H, Ci]
170
+ v = v.feats # [T_KV, H, Co]
171
+ else:
172
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
173
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
174
+ N, L, H, CI, CO = *k.shape, v.shape[-1]
175
+ kv_seqlen = [L] * N
176
+ k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
177
+ v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
178
+
179
+ if DEBUG:
180
+ if s is not None:
181
+ for i in range(s.shape[0]):
182
+ assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch"
183
+ if num_all_args in [2, 3]:
184
+ assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch"
185
+ if num_all_args == 3:
186
+ assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch"
187
+ assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch"
188
+
189
+ if ATTN == 'xformers':
190
+ if num_all_args == 1:
191
+ q, k, v = qkv.unbind(dim=1)
192
+ elif num_all_args == 2:
193
+ k, v = kv.unbind(dim=1)
194
+ q = q.unsqueeze(0)
195
+ k = k.unsqueeze(0)
196
+ v = v.unsqueeze(0)
197
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
198
+ out = xops.memory_efficient_attention(q, k, v, mask)[0]
199
+ elif ATTN == 'flash_attn':
200
+ cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
201
+ if num_all_args in [2, 3]:
202
+ cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
203
+ if num_all_args == 1:
204
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
205
+ elif num_all_args == 2:
206
+ out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
207
+ elif num_all_args == 3:
208
+ out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
209
+ else:
210
+ raise ValueError(f"Unknown attention module: {ATTN}")
211
+
212
+ if s is not None:
213
+ return s.replace(out)
214
+ else:
215
+ return out.reshape(N, L, H, -1)
trellis/modules/sparse/attention/modules.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .. import SparseTensor
6
+ from .full_attn import sparse_scaled_dot_product_attention
7
+ from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention
8
+ from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention
9
+ from ...attention import RotaryPositionEmbedder
10
+
11
+
12
+ class SparseMultiHeadRMSNorm(nn.Module):
13
+ def __init__(self, dim: int, heads: int):
14
+ super().__init__()
15
+ self.scale = dim ** 0.5
16
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
17
+
18
+ def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
19
+ x_type = x.dtype
20
+ x = x.float()
21
+ if isinstance(x, SparseTensor):
22
+ x = x.replace(F.normalize(x.feats, dim=-1))
23
+ else:
24
+ x = F.normalize(x, dim=-1)
25
+ return (x * self.gamma * self.scale).to(x_type)
26
+
27
+
28
+ class SparseMultiHeadAttention(nn.Module):
29
+ def __init__(
30
+ self,
31
+ channels: int,
32
+ num_heads: int,
33
+ ctx_channels: Optional[int] = None,
34
+ type: Literal["self", "cross"] = "self",
35
+ attn_mode: Literal["full", "serialized", "windowed"] = "full",
36
+ window_size: Optional[int] = None,
37
+ shift_sequence: Optional[int] = None,
38
+ shift_window: Optional[Tuple[int, int, int]] = None,
39
+ serialize_mode: Optional[SerializeMode] = None,
40
+ qkv_bias: bool = True,
41
+ use_rope: bool = False,
42
+ qk_rms_norm: bool = False,
43
+ ):
44
+ super().__init__()
45
+ assert channels % num_heads == 0
46
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
47
+ assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
48
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
49
+ assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
50
+ self.channels = channels
51
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
52
+ self.num_heads = num_heads
53
+ self._type = type
54
+ self.attn_mode = attn_mode
55
+ self.window_size = window_size
56
+ self.shift_sequence = shift_sequence
57
+ self.shift_window = shift_window
58
+ self.serialize_mode = serialize_mode
59
+ self.use_rope = use_rope
60
+ self.qk_rms_norm = qk_rms_norm
61
+
62
+ if self._type == "self":
63
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
64
+ else:
65
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
66
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
67
+
68
+ if self.qk_rms_norm:
69
+ self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
70
+ self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
71
+
72
+ self.to_out = nn.Linear(channels, channels)
73
+
74
+ if use_rope:
75
+ self.rope = RotaryPositionEmbedder(channels)
76
+
77
+ @staticmethod
78
+ def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
79
+ if isinstance(x, SparseTensor):
80
+ return x.replace(module(x.feats))
81
+ else:
82
+ return module(x)
83
+
84
+ @staticmethod
85
+ def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
86
+ if isinstance(x, SparseTensor):
87
+ return x.reshape(*shape)
88
+ else:
89
+ return x.reshape(*x.shape[:2], *shape)
90
+
91
+ def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
92
+ if isinstance(x, SparseTensor):
93
+ x_feats = x.feats.unsqueeze(0)
94
+ else:
95
+ x_feats = x
96
+ x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
97
+ return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
98
+
99
+ def _rope(self, qkv: SparseTensor) -> SparseTensor:
100
+ q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
101
+ q, k = self.rope(q, k, qkv.coords[:, 1:])
102
+ qkv = qkv.replace(torch.stack([q, k, v], dim=1))
103
+ return qkv
104
+
105
+ def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
106
+ if self._type == "self":
107
+ qkv = self._linear(self.to_qkv, x)
108
+ qkv = self._fused_pre(qkv, num_fused=3)
109
+ if self.use_rope:
110
+ qkv = self._rope(qkv)
111
+ if self.qk_rms_norm:
112
+ q, k, v = qkv.unbind(dim=1)
113
+ q = self.q_rms_norm(q)
114
+ k = self.k_rms_norm(k)
115
+ qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
116
+ if self.attn_mode == "full":
117
+ h = sparse_scaled_dot_product_attention(qkv)
118
+ elif self.attn_mode == "serialized":
119
+ h = sparse_serialized_scaled_dot_product_self_attention(
120
+ qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
121
+ )
122
+ elif self.attn_mode == "windowed":
123
+ h = sparse_windowed_scaled_dot_product_self_attention(
124
+ qkv, self.window_size, shift_window=self.shift_window
125
+ )
126
+ else:
127
+ q = self._linear(self.to_q, x)
128
+ q = self._reshape_chs(q, (self.num_heads, -1))
129
+ kv = self._linear(self.to_kv, context)
130
+ kv = self._fused_pre(kv, num_fused=2)
131
+ if self.qk_rms_norm:
132
+ q = self.q_rms_norm(q)
133
+ k, v = kv.unbind(dim=1)
134
+ k = self.k_rms_norm(k)
135
+ kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
136
+ h = sparse_scaled_dot_product_attention(q, kv)
137
+ h = self._reshape_chs(h, (-1,))
138
+ h = self._linear(self.to_out, h)
139
+ return h
trellis/modules/sparse/attention/serialized_attn.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from enum import Enum
3
+ import torch
4
+ import math
5
+ from .. import SparseTensor
6
+ from .. import DEBUG, ATTN
7
+
8
+ if ATTN == 'xformers':
9
+ import xformers.ops as xops
10
+ elif ATTN == 'flash_attn':
11
+ import flash_attn
12
+ else:
13
+ raise ValueError(f"Unknown attention module: {ATTN}")
14
+
15
+
16
+ __all__ = [
17
+ 'sparse_serialized_scaled_dot_product_self_attention',
18
+ ]
19
+
20
+
21
+ class SerializeMode(Enum):
22
+ Z_ORDER = 0
23
+ Z_ORDER_TRANSPOSED = 1
24
+ HILBERT = 2
25
+ HILBERT_TRANSPOSED = 3
26
+
27
+
28
+ SerializeModes = [
29
+ SerializeMode.Z_ORDER,
30
+ SerializeMode.Z_ORDER_TRANSPOSED,
31
+ SerializeMode.HILBERT,
32
+ SerializeMode.HILBERT_TRANSPOSED
33
+ ]
34
+
35
+
36
+ def calc_serialization(
37
+ tensor: SparseTensor,
38
+ window_size: int,
39
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
40
+ shift_sequence: int = 0,
41
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
42
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
43
+ """
44
+ Calculate serialization and partitioning for a set of coordinates.
45
+
46
+ Args:
47
+ tensor (SparseTensor): The input tensor.
48
+ window_size (int): The window size to use.
49
+ serialize_mode (SerializeMode): The serialization mode to use.
50
+ shift_sequence (int): The shift of serialized sequence.
51
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
52
+
53
+ Returns:
54
+ (torch.Tensor, torch.Tensor): Forwards and backwards indices.
55
+ """
56
+ fwd_indices = []
57
+ bwd_indices = []
58
+ seq_lens = []
59
+ seq_batch_indices = []
60
+ offsets = [0]
61
+
62
+ if 'vox2seq' not in globals():
63
+ import vox2seq
64
+
65
+ # Serialize the input
66
+ serialize_coords = tensor.coords[:, 1:].clone()
67
+ serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3)
68
+ if serialize_mode == SerializeMode.Z_ORDER:
69
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2])
70
+ elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED:
71
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2])
72
+ elif serialize_mode == SerializeMode.HILBERT:
73
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2])
74
+ elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED:
75
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2])
76
+ else:
77
+ raise ValueError(f"Unknown serialize mode: {serialize_mode}")
78
+
79
+ for bi, s in enumerate(tensor.layout):
80
+ num_points = s.stop - s.start
81
+ num_windows = (num_points + window_size - 1) // window_size
82
+ valid_window_size = num_points / num_windows
83
+ to_ordered = torch.argsort(code[s.start:s.stop])
84
+ if num_windows == 1:
85
+ fwd_indices.append(to_ordered)
86
+ bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device)))
87
+ fwd_indices[-1] += s.start
88
+ bwd_indices[-1] += offsets[-1]
89
+ seq_lens.append(num_points)
90
+ seq_batch_indices.append(bi)
91
+ offsets.append(offsets[-1] + seq_lens[-1])
92
+ else:
93
+ # Partition the input
94
+ offset = 0
95
+ mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)]
96
+ split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)]
97
+ bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device)
98
+ for i in range(num_windows):
99
+ mid = mids[i]
100
+ valid_start = split[i]
101
+ valid_end = split[i + 1]
102
+ padded_start = math.floor(mid - 0.5 * window_size)
103
+ padded_end = padded_start + window_size
104
+ fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points])
105
+ offset += valid_start - padded_start
106
+ bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device))
107
+ offset += padded_end - valid_start
108
+ fwd_indices[-1] += s.start
109
+ seq_lens.extend([window_size] * num_windows)
110
+ seq_batch_indices.extend([bi] * num_windows)
111
+ bwd_indices.append(bwd_index + offsets[-1])
112
+ offsets.append(offsets[-1] + num_windows * window_size)
113
+
114
+ fwd_indices = torch.cat(fwd_indices)
115
+ bwd_indices = torch.cat(bwd_indices)
116
+
117
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
118
+
119
+
120
+ def sparse_serialized_scaled_dot_product_self_attention(
121
+ qkv: SparseTensor,
122
+ window_size: int,
123
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
124
+ shift_sequence: int = 0,
125
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
126
+ ) -> SparseTensor:
127
+ """
128
+ Apply serialized scaled dot product self attention to a sparse tensor.
129
+
130
+ Args:
131
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
132
+ window_size (int): The window size to use.
133
+ serialize_mode (SerializeMode): The serialization mode to use.
134
+ shift_sequence (int): The shift of serialized sequence.
135
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
136
+ shift (int): The shift to use.
137
+ """
138
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
139
+
140
+ serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}'
141
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
142
+ if serialization_spatial_cache is None:
143
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window)
144
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
145
+ else:
146
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
147
+
148
+ M = fwd_indices.shape[0]
149
+ T = qkv.feats.shape[0]
150
+ H = qkv.feats.shape[2]
151
+ C = qkv.feats.shape[3]
152
+
153
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
154
+
155
+ if DEBUG:
156
+ start = 0
157
+ qkv_coords = qkv.coords[fwd_indices]
158
+ for i in range(len(seq_lens)):
159
+ assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
160
+ start += seq_lens[i]
161
+
162
+ if all([seq_len == window_size for seq_len in seq_lens]):
163
+ B = len(seq_lens)
164
+ N = window_size
165
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
166
+ if ATTN == 'xformers':
167
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
168
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
169
+ elif ATTN == 'flash_attn':
170
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
171
+ else:
172
+ raise ValueError(f"Unknown attention module: {ATTN}")
173
+ out = out.reshape(B * N, H, C) # [M, H, C]
174
+ else:
175
+ if ATTN == 'xformers':
176
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
177
+ q = q.unsqueeze(0) # [1, M, H, C]
178
+ k = k.unsqueeze(0) # [1, M, H, C]
179
+ v = v.unsqueeze(0) # [1, M, H, C]
180
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
181
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
182
+ elif ATTN == 'flash_attn':
183
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
184
+ .to(qkv.device).int()
185
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
186
+
187
+ out = out[bwd_indices] # [T, H, C]
188
+
189
+ if DEBUG:
190
+ qkv_coords = qkv_coords[bwd_indices]
191
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
192
+
193
+ return qkv.replace(out)