Commit 
							
							·
						
						db6a3b7
	
1
								Parent(s):
							
							ceccc26
								
Upload
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- app.py +152 -0
- assets/example_image/T.png +0 -0
- assets/example_image/typical_building_building.png +0 -0
- assets/example_image/typical_building_castle.png +0 -0
- assets/example_image/typical_building_colorful_cottage.png +0 -0
- assets/example_image/typical_building_maya_pyramid.png +0 -0
- assets/example_image/typical_building_mushroom.png +0 -0
- assets/example_image/typical_building_space_station.png +0 -0
- assets/example_image/typical_creature_dragon.png +0 -0
- assets/example_image/typical_creature_elephant.png +0 -0
- assets/example_image/typical_creature_furry.png +0 -0
- assets/example_image/typical_creature_quadruped.png +0 -0
- assets/example_image/typical_creature_robot_crab.png +0 -0
- assets/example_image/typical_creature_robot_dinosour.png +0 -0
- assets/example_image/typical_creature_rock_monster.png +0 -0
- assets/example_image/typical_humanoid_block_robot.png +0 -0
- assets/example_image/typical_humanoid_dragonborn.png +0 -0
- assets/example_image/typical_humanoid_dwarf.png +0 -0
- assets/example_image/typical_humanoid_goblin.png +0 -0
- assets/example_image/typical_humanoid_mech.png +0 -0
- assets/example_image/typical_misc_crate.png +0 -0
- assets/example_image/typical_misc_fireplace.png +0 -0
- assets/example_image/typical_misc_gate.png +0 -0
- assets/example_image/typical_misc_lantern.png +0 -0
- assets/example_image/typical_misc_magicbook.png +0 -0
- assets/example_image/typical_misc_mailbox.png +0 -0
- assets/example_image/typical_misc_monster_chest.png +0 -0
- assets/example_image/typical_misc_paper_machine.png +0 -0
- assets/example_image/typical_misc_phonograph.png +0 -0
- assets/example_image/typical_misc_portal2.png +0 -0
- assets/example_image/typical_misc_storage_chest.png +0 -0
- assets/example_image/typical_misc_telephone.png +0 -0
- assets/example_image/typical_misc_television.png +0 -0
- assets/example_image/typical_misc_workbench.png +0 -0
- assets/example_image/typical_vehicle_biplane.png +0 -0
- assets/example_image/typical_vehicle_bulldozer.png +0 -0
- assets/example_image/typical_vehicle_cart.png +0 -0
- assets/example_image/typical_vehicle_excavator.png +0 -0
- assets/example_image/typical_vehicle_helicopter.png +0 -0
- assets/example_image/typical_vehicle_locomotive.png +0 -0
- assets/example_image/typical_vehicle_pirate_ship.png +0 -0
- assets/example_image/weatherworn_misc_paper_machine3.png +0 -0
- requirements.txt +28 -0
- trellis/__init__.py +6 -0
- trellis/models/__init__.py +70 -0
- trellis/models/sparse_structure_flow.py +200 -0
- trellis/models/sparse_structure_vae.py +306 -0
- trellis/models/structured_latent_flow.py +262 -0
- trellis/models/structured_latent_vae/__init__.py +4 -0
- trellis/models/structured_latent_vae/base.py +117 -0
    	
        app.py
    ADDED
    
    | @@ -0,0 +1,152 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import gradio as gr
         | 
| 2 | 
            +
            # from gradio_litmodel3d import LitModel3D
         | 
| 3 | 
            +
             | 
| 4 | 
            +
            import os
         | 
| 5 | 
            +
            from typing import *
         | 
| 6 | 
            +
            import imageio
         | 
| 7 | 
            +
            import uuid
         | 
| 8 | 
            +
            from PIL import Image
         | 
| 9 | 
            +
            from trellis.pipelines import TrellisImageTo3DPipeline
         | 
| 10 | 
            +
            from trellis.utils import render_utils, postprocessing_utils
         | 
| 11 | 
            +
             | 
| 12 | 
            +
             | 
| 13 | 
            +
            def preprocess_image(image: Image.Image) -> Image.Image:
         | 
| 14 | 
            +
                """
         | 
| 15 | 
            +
                Preprocess the input image.
         | 
| 16 | 
            +
             | 
| 17 | 
            +
                Args:
         | 
| 18 | 
            +
                    image (Image.Image): The input image.
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                Returns:
         | 
| 21 | 
            +
                    Image.Image: The preprocessed image.
         | 
| 22 | 
            +
                """
         | 
| 23 | 
            +
                return pipeline.preprocess_image(image)
         | 
| 24 | 
            +
             | 
| 25 | 
            +
             | 
| 26 | 
            +
            def image_to_3d(image: Image.Image) -> Tuple[dict, str]:
         | 
| 27 | 
            +
                """
         | 
| 28 | 
            +
                Convert an image to a 3D model.
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                Args:
         | 
| 31 | 
            +
                    image (Image.Image): The input image.
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                Returns:
         | 
| 34 | 
            +
                    dict: The information of the generated 3D model.
         | 
| 35 | 
            +
                    str: The path to the video of the 3D model.
         | 
| 36 | 
            +
                """
         | 
| 37 | 
            +
                outputs = pipeline(image, formats=["gaussian", "mesh"], preprocess_image=False)
         | 
| 38 | 
            +
                video = render_utils.render_video(outputs['gaussian'][0])['color']
         | 
| 39 | 
            +
                model_id = uuid.uuid4()
         | 
| 40 | 
            +
                video_path = f"/tmp/Trellis-demo/{model_id}.mp4"
         | 
| 41 | 
            +
                os.makedirs(os.path.dirname(video_path), exist_ok=True)
         | 
| 42 | 
            +
                imageio.mimsave(video_path, video, fps=30)
         | 
| 43 | 
            +
                model = {'gaussian': outputs['gaussian'][0], 'mesh': outputs['mesh'][0], 'model_id': model_id}
         | 
| 44 | 
            +
                return model, video_path
         | 
| 45 | 
            +
             | 
| 46 | 
            +
             | 
| 47 | 
            +
            def extract_glb(model: dict, mesh_simplify: float, texture_size: int) -> Tuple[str, str]:
         | 
| 48 | 
            +
                """
         | 
| 49 | 
            +
                Extract a GLB file from the 3D model.
         | 
| 50 | 
            +
             | 
| 51 | 
            +
                Args:
         | 
| 52 | 
            +
                    model (dict): The generated 3D model.
         | 
| 53 | 
            +
                    mesh_simplify (float): The mesh simplification factor.
         | 
| 54 | 
            +
                    texture_size (int): The texture resolution.
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                Returns:
         | 
| 57 | 
            +
                    str: The path to the extracted GLB file.
         | 
| 58 | 
            +
                """
         | 
| 59 | 
            +
                glb = postprocessing_utils.to_glb(model['gaussian'], model['mesh'], simplify=mesh_simplify, texture_size=texture_size)
         | 
| 60 | 
            +
                glb_path = f"/tmp/Trellis-demo/{model['model_id']}.glb"
         | 
| 61 | 
            +
                glb.export(glb_path)
         | 
| 62 | 
            +
                return glb_path, glb_path
         | 
| 63 | 
            +
             | 
| 64 | 
            +
             | 
| 65 | 
            +
            def activate_button() -> gr.Button:
         | 
| 66 | 
            +
                return gr.Button(interactive=True)
         | 
| 67 | 
            +
             | 
| 68 | 
            +
             | 
| 69 | 
            +
            def deactivate_button() -> gr.Button:
         | 
| 70 | 
            +
                return gr.Button(interactive=False)
         | 
| 71 | 
            +
             | 
| 72 | 
            +
             | 
| 73 | 
            +
            with gr.Blocks() as demo:
         | 
| 74 | 
            +
                with gr.Row():
         | 
| 75 | 
            +
                    with gr.Column():
         | 
| 76 | 
            +
                        image_prompt = gr.Image(label="Image Prompt", image_mode="RGBA", type="pil", height=300)
         | 
| 77 | 
            +
                        generate_btn = gr.Button("Generate", interactive=False)
         | 
| 78 | 
            +
             | 
| 79 | 
            +
                        mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
         | 
| 80 | 
            +
                        texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
         | 
| 81 | 
            +
                        extract_glb_btn = gr.Button("Extract GLB", interactive=False)
         | 
| 82 | 
            +
             | 
| 83 | 
            +
                    with gr.Column():
         | 
| 84 | 
            +
                        video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300)
         | 
| 85 | 
            +
                        model_output = gr.Model3D(label="Extracted GLB", height=300)
         | 
| 86 | 
            +
                        download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                # Example images at the bottom of the page
         | 
| 89 | 
            +
                with gr.Row():
         | 
| 90 | 
            +
                    examples = gr.Examples(
         | 
| 91 | 
            +
                        examples=[
         | 
| 92 | 
            +
                            f'assets/example_image/{image}'
         | 
| 93 | 
            +
                            for image in os.listdir("assets/example_image")
         | 
| 94 | 
            +
                        ],
         | 
| 95 | 
            +
                        inputs=[image_prompt],
         | 
| 96 | 
            +
                        fn=lambda image: (preprocess_image(image), gr.Button(interactive=True)),
         | 
| 97 | 
            +
                        outputs=[image_prompt, generate_btn],
         | 
| 98 | 
            +
                        run_on_click=True,
         | 
| 99 | 
            +
                        examples_per_page=64,
         | 
| 100 | 
            +
                    )
         | 
| 101 | 
            +
             | 
| 102 | 
            +
                model = gr.State()
         | 
| 103 | 
            +
             | 
| 104 | 
            +
                # Handlers
         | 
| 105 | 
            +
                image_prompt.upload(
         | 
| 106 | 
            +
                    preprocess_image,
         | 
| 107 | 
            +
                    inputs=[image_prompt],
         | 
| 108 | 
            +
                    outputs=[image_prompt],
         | 
| 109 | 
            +
                ).then(
         | 
| 110 | 
            +
                    activate_button,
         | 
| 111 | 
            +
                    outputs=[generate_btn],
         | 
| 112 | 
            +
                )
         | 
| 113 | 
            +
             | 
| 114 | 
            +
                image_prompt.clear(
         | 
| 115 | 
            +
                    deactivate_button,
         | 
| 116 | 
            +
                    outputs=[generate_btn],
         | 
| 117 | 
            +
                )
         | 
| 118 | 
            +
             | 
| 119 | 
            +
                generate_btn.click(
         | 
| 120 | 
            +
                    image_to_3d,
         | 
| 121 | 
            +
                    inputs=[image_prompt],
         | 
| 122 | 
            +
                    outputs=[model, video_output],
         | 
| 123 | 
            +
                ).then(
         | 
| 124 | 
            +
                    activate_button,
         | 
| 125 | 
            +
                    outputs=[extract_glb_btn],
         | 
| 126 | 
            +
                )
         | 
| 127 | 
            +
             | 
| 128 | 
            +
                video_output.clear(
         | 
| 129 | 
            +
                    deactivate_button,
         | 
| 130 | 
            +
                    outputs=[extract_glb_btn],
         | 
| 131 | 
            +
                )
         | 
| 132 | 
            +
             | 
| 133 | 
            +
                extract_glb_btn.click(
         | 
| 134 | 
            +
                    extract_glb,
         | 
| 135 | 
            +
                    inputs=[model, mesh_simplify, texture_size],
         | 
| 136 | 
            +
                    outputs=[model_output, download_glb],
         | 
| 137 | 
            +
                ).then(
         | 
| 138 | 
            +
                    activate_button,
         | 
| 139 | 
            +
                    outputs=[download_glb],
         | 
| 140 | 
            +
                )
         | 
| 141 | 
            +
             | 
| 142 | 
            +
                model_output.clear(
         | 
| 143 | 
            +
                    deactivate_button,
         | 
| 144 | 
            +
                    outputs=[download_glb],
         | 
| 145 | 
            +
                )
         | 
| 146 | 
            +
                
         | 
| 147 | 
            +
             | 
| 148 | 
            +
            # Launch the Gradio app
         | 
| 149 | 
            +
            if __name__ == "__main__":
         | 
| 150 | 
            +
                pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
         | 
| 151 | 
            +
                pipeline.cuda()
         | 
| 152 | 
            +
                demo.launch()
         | 
    	
        assets/example_image/T.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_building_building.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_building_castle.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_building_colorful_cottage.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_building_maya_pyramid.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_building_mushroom.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_building_space_station.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_dragon.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_elephant.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_furry.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_quadruped.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_robot_crab.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_robot_dinosour.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_creature_rock_monster.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_humanoid_block_robot.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_humanoid_dragonborn.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_humanoid_dwarf.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_humanoid_goblin.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_humanoid_mech.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_crate.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_fireplace.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_gate.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_lantern.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_magicbook.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_mailbox.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_monster_chest.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_paper_machine.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_phonograph.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_portal2.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_storage_chest.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_telephone.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_television.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_misc_workbench.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_biplane.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_bulldozer.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_cart.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_excavator.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_helicopter.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_locomotive.png
    ADDED
    
    |   | 
    	
        assets/example_image/typical_vehicle_pirate_ship.png
    ADDED
    
    |   | 
    	
        assets/example_image/weatherworn_misc_paper_machine3.png
    ADDED
    
    |   | 
    	
        requirements.txt
    ADDED
    
    | @@ -0,0 +1,28 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            --extra-index-url https://download.pytorch.org/whl/cu118
         | 
| 2 | 
            +
            --find-links https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.4.0_cu121.html
         | 
| 3 | 
            +
             | 
| 4 | 
            +
             | 
| 5 | 
            +
            torch==2.4.0
         | 
| 6 | 
            +
            torchvision==0.19.0
         | 
| 7 | 
            +
            pillow==10.4.0
         | 
| 8 | 
            +
            imageio==2.36.1
         | 
| 9 | 
            +
            imageio-ffmpeg==0.5.1
         | 
| 10 | 
            +
            tqdm==4.67.1
         | 
| 11 | 
            +
            easydict==1.13
         | 
| 12 | 
            +
            opencv-python-headless==4.10.0.84
         | 
| 13 | 
            +
            scipy==1.14.1
         | 
| 14 | 
            +
            rembg==2.0.60
         | 
| 15 | 
            +
            onnxruntime==1.20.1
         | 
| 16 | 
            +
            trimesh==4.5.3
         | 
| 17 | 
            +
            xatlas==0.0.9
         | 
| 18 | 
            +
            pyvista==0.44.2
         | 
| 19 | 
            +
            pymeshfix==0.17.0
         | 
| 20 | 
            +
            igraph==0.11.8
         | 
| 21 | 
            +
            git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8
         | 
| 22 | 
            +
            xformers==0.0.27.post2+cu118
         | 
| 23 | 
            +
            flash-attn==2.7.0.post2
         | 
| 24 | 
            +
            kaolin==0.17.0
         | 
| 25 | 
            +
            spconv-cu118==2.3.6
         | 
| 26 | 
            +
            transformers==4.46.3
         | 
| 27 | 
            +
            wheels/nvdiffrast-0.3.3-py3-none-any.whl
         | 
| 28 | 
            +
            wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
         | 
    	
        trellis/__init__.py
    ADDED
    
    | @@ -0,0 +1,6 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from . import models
         | 
| 2 | 
            +
            from . import modules
         | 
| 3 | 
            +
            from . import pipelines
         | 
| 4 | 
            +
            from . import renderers
         | 
| 5 | 
            +
            from . import representations
         | 
| 6 | 
            +
            from . import utils
         | 
    	
        trellis/models/__init__.py
    ADDED
    
    | @@ -0,0 +1,70 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import importlib
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            __attributes = {
         | 
| 4 | 
            +
                'SparseStructureEncoder': 'sparse_structure_vae',
         | 
| 5 | 
            +
                'SparseStructureDecoder': 'sparse_structure_vae',
         | 
| 6 | 
            +
                'SparseStructureFlowModel': 'sparse_structure_flow',
         | 
| 7 | 
            +
                'SLatEncoder': 'structured_latent_vae',
         | 
| 8 | 
            +
                'SLatGaussianDecoder': 'structured_latent_vae',
         | 
| 9 | 
            +
                'SLatRadianceFieldDecoder': 'structured_latent_vae',
         | 
| 10 | 
            +
                'SLatMeshDecoder': 'structured_latent_vae',
         | 
| 11 | 
            +
                'SLatFlowModel': 'structured_latent_flow',
         | 
| 12 | 
            +
            }
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            __submodules = []
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            __all__ = list(__attributes.keys()) + __submodules
         | 
| 17 | 
            +
             | 
| 18 | 
            +
            def __getattr__(name):
         | 
| 19 | 
            +
                if name not in globals():
         | 
| 20 | 
            +
                    if name in __attributes:
         | 
| 21 | 
            +
                        module_name = __attributes[name]
         | 
| 22 | 
            +
                        module = importlib.import_module(f".{module_name}", __name__)
         | 
| 23 | 
            +
                        globals()[name] = getattr(module, name)
         | 
| 24 | 
            +
                    elif name in __submodules:
         | 
| 25 | 
            +
                        module = importlib.import_module(f".{name}", __name__)
         | 
| 26 | 
            +
                        globals()[name] = module
         | 
| 27 | 
            +
                    else:
         | 
| 28 | 
            +
                        raise AttributeError(f"module {__name__} has no attribute {name}")
         | 
| 29 | 
            +
                return globals()[name]
         | 
| 30 | 
            +
             | 
| 31 | 
            +
             | 
| 32 | 
            +
            def from_pretrained(path: str, **kwargs):
         | 
| 33 | 
            +
                """
         | 
| 34 | 
            +
                Load a model from a pretrained checkpoint.
         | 
| 35 | 
            +
             | 
| 36 | 
            +
                Args:
         | 
| 37 | 
            +
                    path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
         | 
| 38 | 
            +
                          NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
         | 
| 39 | 
            +
                    **kwargs: Additional arguments for the model constructor.
         | 
| 40 | 
            +
                """
         | 
| 41 | 
            +
                import os
         | 
| 42 | 
            +
                import json
         | 
| 43 | 
            +
                from safetensors.torch import load_file
         | 
| 44 | 
            +
                is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
         | 
| 45 | 
            +
             | 
| 46 | 
            +
                if is_local:
         | 
| 47 | 
            +
                    config_file = f"{path}.json"
         | 
| 48 | 
            +
                    model_file = f"{path}.safetensors"
         | 
| 49 | 
            +
                else:
         | 
| 50 | 
            +
                    from huggingface_hub import hf_hub_download
         | 
| 51 | 
            +
                    path_parts = path.split('/')
         | 
| 52 | 
            +
                    repo_id = f'{path_parts[0]}/{path_parts[1]}'
         | 
| 53 | 
            +
                    model_name = '/'.join(path_parts[2:])
         | 
| 54 | 
            +
                    config_file = hf_hub_download(repo_id, f"{model_name}.json")
         | 
| 55 | 
            +
                    model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                with open(config_file, 'r') as f:
         | 
| 58 | 
            +
                    config = json.load(f)
         | 
| 59 | 
            +
                model = __getattr__(config['name'])(**config['args'], **kwargs)
         | 
| 60 | 
            +
                model.load_state_dict(load_file(model_file))
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                return model
         | 
| 63 | 
            +
             | 
| 64 | 
            +
             | 
| 65 | 
            +
            # For Pylance
         | 
| 66 | 
            +
            if __name__ == '__main__':
         | 
| 67 | 
            +
                from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
         | 
| 68 | 
            +
                from .sparse_structure_flow import SparseStructureFlowModel
         | 
| 69 | 
            +
                from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatRadianceFieldDecoder, SLatMeshDecoder
         | 
| 70 | 
            +
                from .structured_latent_flow import SLatFlowModel
         | 
    	
        trellis/models/sparse_structure_flow.py
    ADDED
    
    | @@ -0,0 +1,200 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from typing import *
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn as nn
         | 
| 4 | 
            +
            import torch.nn.functional as F
         | 
| 5 | 
            +
            import numpy as np
         | 
| 6 | 
            +
            from ..modules.utils import convert_module_to_f16, convert_module_to_f32
         | 
| 7 | 
            +
            from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
         | 
| 8 | 
            +
            from ..modules.spatial import patchify, unpatchify
         | 
| 9 | 
            +
             | 
| 10 | 
            +
             | 
| 11 | 
            +
            class TimestepEmbedder(nn.Module):
         | 
| 12 | 
            +
                """
         | 
| 13 | 
            +
                Embeds scalar timesteps into vector representations.
         | 
| 14 | 
            +
                """
         | 
| 15 | 
            +
                def __init__(self, hidden_size, frequency_embedding_size=256):
         | 
| 16 | 
            +
                    super().__init__()
         | 
| 17 | 
            +
                    self.mlp = nn.Sequential(
         | 
| 18 | 
            +
                        nn.Linear(frequency_embedding_size, hidden_size, bias=True),
         | 
| 19 | 
            +
                        nn.SiLU(),
         | 
| 20 | 
            +
                        nn.Linear(hidden_size, hidden_size, bias=True),
         | 
| 21 | 
            +
                    )
         | 
| 22 | 
            +
                    self.frequency_embedding_size = frequency_embedding_size
         | 
| 23 | 
            +
             | 
| 24 | 
            +
                @staticmethod
         | 
| 25 | 
            +
                def timestep_embedding(t, dim, max_period=10000):
         | 
| 26 | 
            +
                    """
         | 
| 27 | 
            +
                    Create sinusoidal timestep embeddings.
         | 
| 28 | 
            +
             | 
| 29 | 
            +
                    Args:
         | 
| 30 | 
            +
                        t: a 1-D Tensor of N indices, one per batch element.
         | 
| 31 | 
            +
                            These may be fractional.
         | 
| 32 | 
            +
                        dim: the dimension of the output.
         | 
| 33 | 
            +
                        max_period: controls the minimum frequency of the embeddings.
         | 
| 34 | 
            +
             | 
| 35 | 
            +
                    Returns:
         | 
| 36 | 
            +
                        an (N, D) Tensor of positional embeddings.
         | 
| 37 | 
            +
                    """
         | 
| 38 | 
            +
                    # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
         | 
| 39 | 
            +
                    half = dim // 2
         | 
| 40 | 
            +
                    freqs = torch.exp(
         | 
| 41 | 
            +
                        -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
         | 
| 42 | 
            +
                    ).to(device=t.device)
         | 
| 43 | 
            +
                    args = t[:, None].float() * freqs[None]
         | 
| 44 | 
            +
                    embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
         | 
| 45 | 
            +
                    if dim % 2:
         | 
| 46 | 
            +
                        embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
         | 
| 47 | 
            +
                    return embedding
         | 
| 48 | 
            +
             | 
| 49 | 
            +
                def forward(self, t):
         | 
| 50 | 
            +
                    t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
         | 
| 51 | 
            +
                    t_emb = self.mlp(t_freq)
         | 
| 52 | 
            +
                    return t_emb
         | 
| 53 | 
            +
             | 
| 54 | 
            +
             | 
| 55 | 
            +
            class SparseStructureFlowModel(nn.Module):
         | 
| 56 | 
            +
                def __init__(
         | 
| 57 | 
            +
                    self,
         | 
| 58 | 
            +
                    resolution: int,
         | 
| 59 | 
            +
                    in_channels: int,
         | 
| 60 | 
            +
                    model_channels: int,
         | 
| 61 | 
            +
                    cond_channels: int,
         | 
| 62 | 
            +
                    out_channels: int,
         | 
| 63 | 
            +
                    num_blocks: int,
         | 
| 64 | 
            +
                    num_heads: Optional[int] = None,
         | 
| 65 | 
            +
                    num_head_channels: Optional[int] = 64,
         | 
| 66 | 
            +
                    mlp_ratio: float = 4,
         | 
| 67 | 
            +
                    patch_size: int = 2,
         | 
| 68 | 
            +
                    pe_mode: Literal["ape", "rope"] = "ape",
         | 
| 69 | 
            +
                    use_fp16: bool = False,
         | 
| 70 | 
            +
                    use_checkpoint: bool = False,
         | 
| 71 | 
            +
                    share_mod: bool = False,
         | 
| 72 | 
            +
                    qk_rms_norm: bool = False,
         | 
| 73 | 
            +
                    qk_rms_norm_cross: bool = False,
         | 
| 74 | 
            +
                ):
         | 
| 75 | 
            +
                    super().__init__()
         | 
| 76 | 
            +
                    self.resolution = resolution
         | 
| 77 | 
            +
                    self.in_channels = in_channels
         | 
| 78 | 
            +
                    self.model_channels = model_channels
         | 
| 79 | 
            +
                    self.cond_channels = cond_channels
         | 
| 80 | 
            +
                    self.out_channels = out_channels
         | 
| 81 | 
            +
                    self.num_blocks = num_blocks
         | 
| 82 | 
            +
                    self.num_heads = num_heads or model_channels // num_head_channels
         | 
| 83 | 
            +
                    self.mlp_ratio = mlp_ratio
         | 
| 84 | 
            +
                    self.patch_size = patch_size
         | 
| 85 | 
            +
                    self.pe_mode = pe_mode
         | 
| 86 | 
            +
                    self.use_fp16 = use_fp16
         | 
| 87 | 
            +
                    self.use_checkpoint = use_checkpoint
         | 
| 88 | 
            +
                    self.share_mod = share_mod
         | 
| 89 | 
            +
                    self.qk_rms_norm = qk_rms_norm
         | 
| 90 | 
            +
                    self.qk_rms_norm_cross = qk_rms_norm_cross
         | 
| 91 | 
            +
                    self.dtype = torch.float16 if use_fp16 else torch.float32
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                    self.t_embedder = TimestepEmbedder(model_channels)
         | 
| 94 | 
            +
                    if share_mod:
         | 
| 95 | 
            +
                        self.adaLN_modulation = nn.Sequential(
         | 
| 96 | 
            +
                            nn.SiLU(),
         | 
| 97 | 
            +
                            nn.Linear(model_channels, 6 * model_channels, bias=True)
         | 
| 98 | 
            +
                        )
         | 
| 99 | 
            +
             | 
| 100 | 
            +
                    if pe_mode == "ape":
         | 
| 101 | 
            +
                        pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
         | 
| 102 | 
            +
                        coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
         | 
| 103 | 
            +
                        coords = torch.stack(coords, dim=-1).reshape(-1, 3)
         | 
| 104 | 
            +
                        pos_emb = pos_embedder(coords)
         | 
| 105 | 
            +
                        self.register_buffer("pos_emb", pos_emb)
         | 
| 106 | 
            +
             | 
| 107 | 
            +
                    self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
         | 
| 108 | 
            +
                        
         | 
| 109 | 
            +
                    self.blocks = nn.ModuleList([
         | 
| 110 | 
            +
                        ModulatedTransformerCrossBlock(
         | 
| 111 | 
            +
                            model_channels,
         | 
| 112 | 
            +
                            cond_channels,
         | 
| 113 | 
            +
                            num_heads=self.num_heads,
         | 
| 114 | 
            +
                            mlp_ratio=self.mlp_ratio,
         | 
| 115 | 
            +
                            attn_mode='full',
         | 
| 116 | 
            +
                            use_checkpoint=self.use_checkpoint,
         | 
| 117 | 
            +
                            use_rope=(pe_mode == "rope"),
         | 
| 118 | 
            +
                            share_mod=share_mod,
         | 
| 119 | 
            +
                            qk_rms_norm=self.qk_rms_norm,
         | 
| 120 | 
            +
                            qk_rms_norm_cross=self.qk_rms_norm_cross,
         | 
| 121 | 
            +
                        )
         | 
| 122 | 
            +
                        for _ in range(num_blocks)
         | 
| 123 | 
            +
                    ])
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                    self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
         | 
| 126 | 
            +
             | 
| 127 | 
            +
                    self.initialize_weights()
         | 
| 128 | 
            +
                    if use_fp16:
         | 
| 129 | 
            +
                        self.convert_to_fp16()
         | 
| 130 | 
            +
             | 
| 131 | 
            +
                @property
         | 
| 132 | 
            +
                def device(self) -> torch.device:
         | 
| 133 | 
            +
                    """
         | 
| 134 | 
            +
                    Return the device of the model.
         | 
| 135 | 
            +
                    """
         | 
| 136 | 
            +
                    return next(self.parameters()).device
         | 
| 137 | 
            +
             | 
| 138 | 
            +
                def convert_to_fp16(self) -> None:
         | 
| 139 | 
            +
                    """
         | 
| 140 | 
            +
                    Convert the torso of the model to float16.
         | 
| 141 | 
            +
                    """
         | 
| 142 | 
            +
                    self.blocks.apply(convert_module_to_f16)
         | 
| 143 | 
            +
             | 
| 144 | 
            +
                def convert_to_fp32(self) -> None:
         | 
| 145 | 
            +
                    """
         | 
| 146 | 
            +
                    Convert the torso of the model to float32.
         | 
| 147 | 
            +
                    """
         | 
| 148 | 
            +
                    self.blocks.apply(convert_module_to_f32)
         | 
| 149 | 
            +
             | 
| 150 | 
            +
                def initialize_weights(self) -> None:
         | 
| 151 | 
            +
                    # Initialize transformer layers:
         | 
| 152 | 
            +
                    def _basic_init(module):
         | 
| 153 | 
            +
                        if isinstance(module, nn.Linear):
         | 
| 154 | 
            +
                            torch.nn.init.xavier_uniform_(module.weight)
         | 
| 155 | 
            +
                            if module.bias is not None:
         | 
| 156 | 
            +
                                nn.init.constant_(module.bias, 0)
         | 
| 157 | 
            +
                    self.apply(_basic_init)
         | 
| 158 | 
            +
             | 
| 159 | 
            +
                    # Initialize timestep embedding MLP:
         | 
| 160 | 
            +
                    nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
         | 
| 161 | 
            +
                    nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
         | 
| 162 | 
            +
             | 
| 163 | 
            +
                    # Zero-out adaLN modulation layers in DiT blocks:
         | 
| 164 | 
            +
                    if self.share_mod:
         | 
| 165 | 
            +
                        nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
         | 
| 166 | 
            +
                        nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
         | 
| 167 | 
            +
                    else:
         | 
| 168 | 
            +
                        for block in self.blocks:
         | 
| 169 | 
            +
                            nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
         | 
| 170 | 
            +
                            nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
         | 
| 171 | 
            +
             | 
| 172 | 
            +
                    # Zero-out output layers:
         | 
| 173 | 
            +
                    nn.init.constant_(self.out_layer.weight, 0)
         | 
| 174 | 
            +
                    nn.init.constant_(self.out_layer.bias, 0)
         | 
| 175 | 
            +
             | 
| 176 | 
            +
                def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
         | 
| 177 | 
            +
                    assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
         | 
| 178 | 
            +
                            f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
         | 
| 179 | 
            +
             | 
| 180 | 
            +
                    h = patchify(x, self.patch_size)
         | 
| 181 | 
            +
                    h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
         | 
| 182 | 
            +
             | 
| 183 | 
            +
                    h = self.input_layer(h)
         | 
| 184 | 
            +
                    h = h + self.pos_emb[None]
         | 
| 185 | 
            +
                    t_emb = self.t_embedder(t)
         | 
| 186 | 
            +
                    if self.share_mod:
         | 
| 187 | 
            +
                        t_emb = self.adaLN_modulation(t_emb)
         | 
| 188 | 
            +
                    t_emb = t_emb.type(self.dtype)
         | 
| 189 | 
            +
                    h = h.type(self.dtype)
         | 
| 190 | 
            +
                    cond = cond.type(self.dtype)
         | 
| 191 | 
            +
                    for block in self.blocks:
         | 
| 192 | 
            +
                        h = block(h, t_emb, cond)
         | 
| 193 | 
            +
                    h = h.type(x.dtype)
         | 
| 194 | 
            +
                    h = F.layer_norm(h, h.shape[-1:])
         | 
| 195 | 
            +
                    h = self.out_layer(h)
         | 
| 196 | 
            +
             | 
| 197 | 
            +
                    h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
         | 
| 198 | 
            +
                    h = unpatchify(h, self.patch_size).contiguous()
         | 
| 199 | 
            +
             | 
| 200 | 
            +
                    return h
         | 
    	
        trellis/models/sparse_structure_vae.py
    ADDED
    
    | @@ -0,0 +1,306 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from typing import *
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn as nn
         | 
| 4 | 
            +
            import torch.nn.functional as F
         | 
| 5 | 
            +
            from ..modules.norm import GroupNorm32, ChannelLayerNorm32
         | 
| 6 | 
            +
            from ..modules.spatial import pixel_shuffle_3d
         | 
| 7 | 
            +
            from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
         | 
| 8 | 
            +
             | 
| 9 | 
            +
             | 
| 10 | 
            +
            def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
         | 
| 11 | 
            +
                """
         | 
| 12 | 
            +
                Return a normalization layer.
         | 
| 13 | 
            +
                """
         | 
| 14 | 
            +
                if norm_type == "group":
         | 
| 15 | 
            +
                    return GroupNorm32(32, *args, **kwargs)
         | 
| 16 | 
            +
                elif norm_type == "layer":
         | 
| 17 | 
            +
                    return ChannelLayerNorm32(*args, **kwargs)
         | 
| 18 | 
            +
                else:
         | 
| 19 | 
            +
                    raise ValueError(f"Invalid norm type {norm_type}")
         | 
| 20 | 
            +
             | 
| 21 | 
            +
             | 
| 22 | 
            +
            class ResBlock3d(nn.Module):
         | 
| 23 | 
            +
                def __init__(
         | 
| 24 | 
            +
                    self,
         | 
| 25 | 
            +
                    channels: int,
         | 
| 26 | 
            +
                    out_channels: Optional[int] = None,
         | 
| 27 | 
            +
                    norm_type: Literal["group", "layer"] = "layer",
         | 
| 28 | 
            +
                ):
         | 
| 29 | 
            +
                    super().__init__()
         | 
| 30 | 
            +
                    self.channels = channels
         | 
| 31 | 
            +
                    self.out_channels = out_channels or channels
         | 
| 32 | 
            +
             | 
| 33 | 
            +
                    self.norm1 = norm_layer(norm_type, channels)
         | 
| 34 | 
            +
                    self.norm2 = norm_layer(norm_type, self.out_channels)
         | 
| 35 | 
            +
                    self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
         | 
| 36 | 
            +
                    self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
         | 
| 37 | 
            +
                    self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
         | 
| 38 | 
            +
                
         | 
| 39 | 
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         | 
| 40 | 
            +
                    h = self.norm1(x)
         | 
| 41 | 
            +
                    h = F.silu(h)
         | 
| 42 | 
            +
                    h = self.conv1(h)
         | 
| 43 | 
            +
                    h = self.norm2(h)
         | 
| 44 | 
            +
                    h = F.silu(h)
         | 
| 45 | 
            +
                    h = self.conv2(h)
         | 
| 46 | 
            +
                    h = h + self.skip_connection(x)
         | 
| 47 | 
            +
                    return h
         | 
| 48 | 
            +
             | 
| 49 | 
            +
             | 
| 50 | 
            +
            class DownsampleBlock3d(nn.Module):
         | 
| 51 | 
            +
                def __init__(
         | 
| 52 | 
            +
                    self,
         | 
| 53 | 
            +
                    in_channels: int,
         | 
| 54 | 
            +
                    out_channels: int,
         | 
| 55 | 
            +
                    mode: Literal["conv", "avgpool"] = "conv",
         | 
| 56 | 
            +
                ):
         | 
| 57 | 
            +
                    assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
         | 
| 58 | 
            +
             | 
| 59 | 
            +
                    super().__init__()
         | 
| 60 | 
            +
                    self.in_channels = in_channels
         | 
| 61 | 
            +
                    self.out_channels = out_channels
         | 
| 62 | 
            +
             | 
| 63 | 
            +
                    if mode == "conv":
         | 
| 64 | 
            +
                        self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
         | 
| 65 | 
            +
                    elif mode == "avgpool":
         | 
| 66 | 
            +
                        assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
         | 
| 67 | 
            +
             | 
| 68 | 
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         | 
| 69 | 
            +
                    if hasattr(self, "conv"):
         | 
| 70 | 
            +
                        return self.conv(x)
         | 
| 71 | 
            +
                    else:
         | 
| 72 | 
            +
                        return F.avg_pool3d(x, 2)
         | 
| 73 | 
            +
             | 
| 74 | 
            +
             | 
| 75 | 
            +
            class UpsampleBlock3d(nn.Module):
         | 
| 76 | 
            +
                def __init__(
         | 
| 77 | 
            +
                    self,
         | 
| 78 | 
            +
                    in_channels: int,
         | 
| 79 | 
            +
                    out_channels: int,
         | 
| 80 | 
            +
                    mode: Literal["conv", "nearest"] = "conv",
         | 
| 81 | 
            +
                ):
         | 
| 82 | 
            +
                    assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
         | 
| 83 | 
            +
             | 
| 84 | 
            +
                    super().__init__()
         | 
| 85 | 
            +
                    self.in_channels = in_channels
         | 
| 86 | 
            +
                    self.out_channels = out_channels
         | 
| 87 | 
            +
             | 
| 88 | 
            +
                    if mode == "conv":
         | 
| 89 | 
            +
                        self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
         | 
| 90 | 
            +
                    elif mode == "nearest":
         | 
| 91 | 
            +
                        assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
         | 
| 92 | 
            +
             | 
| 93 | 
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         | 
| 94 | 
            +
                    if hasattr(self, "conv"):
         | 
| 95 | 
            +
                        x = self.conv(x)
         | 
| 96 | 
            +
                        return pixel_shuffle_3d(x, 2)
         | 
| 97 | 
            +
                    else:
         | 
| 98 | 
            +
                        return F.interpolate(x, scale_factor=2, mode="nearest")
         | 
| 99 | 
            +
                    
         | 
| 100 | 
            +
             | 
| 101 | 
            +
            class SparseStructureEncoder(nn.Module):
         | 
| 102 | 
            +
                """
         | 
| 103 | 
            +
                Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
         | 
| 104 | 
            +
                
         | 
| 105 | 
            +
                Args:
         | 
| 106 | 
            +
                    in_channels (int): Channels of the input.
         | 
| 107 | 
            +
                    latent_channels (int): Channels of the latent representation.
         | 
| 108 | 
            +
                    num_res_blocks (int): Number of residual blocks at each resolution.
         | 
| 109 | 
            +
                    channels (List[int]): Channels of the encoder blocks.
         | 
| 110 | 
            +
                    num_res_blocks_middle (int): Number of residual blocks in the middle.
         | 
| 111 | 
            +
                    norm_type (Literal["group", "layer"]): Type of normalization layer.
         | 
| 112 | 
            +
                    use_fp16 (bool): Whether to use FP16.
         | 
| 113 | 
            +
                """
         | 
| 114 | 
            +
                def __init__(
         | 
| 115 | 
            +
                    self,
         | 
| 116 | 
            +
                    in_channels: int,
         | 
| 117 | 
            +
                    latent_channels: int,
         | 
| 118 | 
            +
                    num_res_blocks: int,
         | 
| 119 | 
            +
                    channels: List[int],
         | 
| 120 | 
            +
                    num_res_blocks_middle: int = 2,
         | 
| 121 | 
            +
                    norm_type: Literal["group", "layer"] = "layer",
         | 
| 122 | 
            +
                    use_fp16: bool = False,
         | 
| 123 | 
            +
                ):
         | 
| 124 | 
            +
                    super().__init__()
         | 
| 125 | 
            +
                    self.in_channels = in_channels
         | 
| 126 | 
            +
                    self.latent_channels = latent_channels
         | 
| 127 | 
            +
                    self.num_res_blocks = num_res_blocks
         | 
| 128 | 
            +
                    self.channels = channels
         | 
| 129 | 
            +
                    self.num_res_blocks_middle = num_res_blocks_middle
         | 
| 130 | 
            +
                    self.norm_type = norm_type
         | 
| 131 | 
            +
                    self.use_fp16 = use_fp16
         | 
| 132 | 
            +
                    self.dtype = torch.float16 if use_fp16 else torch.float32
         | 
| 133 | 
            +
             | 
| 134 | 
            +
                    self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
         | 
| 135 | 
            +
             | 
| 136 | 
            +
                    self.blocks = nn.ModuleList([])
         | 
| 137 | 
            +
                    for i, ch in enumerate(channels):
         | 
| 138 | 
            +
                        self.blocks.extend([
         | 
| 139 | 
            +
                            ResBlock3d(ch, ch)
         | 
| 140 | 
            +
                            for _ in range(num_res_blocks)
         | 
| 141 | 
            +
                        ])
         | 
| 142 | 
            +
                        if i < len(channels) - 1:
         | 
| 143 | 
            +
                            self.blocks.append(
         | 
| 144 | 
            +
                                DownsampleBlock3d(ch, channels[i+1])
         | 
| 145 | 
            +
                            )
         | 
| 146 | 
            +
                    
         | 
| 147 | 
            +
                    self.middle_block = nn.Sequential(*[
         | 
| 148 | 
            +
                        ResBlock3d(channels[-1], channels[-1])
         | 
| 149 | 
            +
                        for _ in range(num_res_blocks_middle)
         | 
| 150 | 
            +
                    ])
         | 
| 151 | 
            +
             | 
| 152 | 
            +
                    self.out_layer = nn.Sequential(
         | 
| 153 | 
            +
                        norm_layer(norm_type, channels[-1]),
         | 
| 154 | 
            +
                        nn.SiLU(),
         | 
| 155 | 
            +
                        nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
         | 
| 156 | 
            +
                    )
         | 
| 157 | 
            +
             | 
| 158 | 
            +
                    if use_fp16:
         | 
| 159 | 
            +
                        self.convert_to_fp16()
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                @property
         | 
| 162 | 
            +
                def device(self) -> torch.device:
         | 
| 163 | 
            +
                    """
         | 
| 164 | 
            +
                    Return the device of the model.
         | 
| 165 | 
            +
                    """
         | 
| 166 | 
            +
                    return next(self.parameters()).device
         | 
| 167 | 
            +
             | 
| 168 | 
            +
                def convert_to_fp16(self) -> None:
         | 
| 169 | 
            +
                    """
         | 
| 170 | 
            +
                    Convert the torso of the model to float16.
         | 
| 171 | 
            +
                    """
         | 
| 172 | 
            +
                    self.use_fp16 = True
         | 
| 173 | 
            +
                    self.dtype = torch.float16
         | 
| 174 | 
            +
                    self.blocks.apply(convert_module_to_f16)
         | 
| 175 | 
            +
                    self.middle_block.apply(convert_module_to_f16)
         | 
| 176 | 
            +
             | 
| 177 | 
            +
                def convert_to_fp32(self) -> None:
         | 
| 178 | 
            +
                    """
         | 
| 179 | 
            +
                    Convert the torso of the model to float32.
         | 
| 180 | 
            +
                    """
         | 
| 181 | 
            +
                    self.use_fp16 = False
         | 
| 182 | 
            +
                    self.dtype = torch.float32
         | 
| 183 | 
            +
                    self.blocks.apply(convert_module_to_f32)
         | 
| 184 | 
            +
                    self.middle_block.apply(convert_module_to_f32)
         | 
| 185 | 
            +
             | 
| 186 | 
            +
                def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
         | 
| 187 | 
            +
                    h = self.input_layer(x)
         | 
| 188 | 
            +
                    h = h.type(self.dtype)
         | 
| 189 | 
            +
             | 
| 190 | 
            +
                    for block in self.blocks:
         | 
| 191 | 
            +
                        h = block(h)
         | 
| 192 | 
            +
                    h = self.middle_block(h)
         | 
| 193 | 
            +
             | 
| 194 | 
            +
                    h = h.type(x.dtype)
         | 
| 195 | 
            +
                    h = self.out_layer(h)
         | 
| 196 | 
            +
             | 
| 197 | 
            +
                    mean, logvar = h.chunk(2, dim=1)
         | 
| 198 | 
            +
             | 
| 199 | 
            +
                    if sample_posterior:
         | 
| 200 | 
            +
                        std = torch.exp(0.5 * logvar)
         | 
| 201 | 
            +
                        z = mean + std * torch.randn_like(std)
         | 
| 202 | 
            +
                    else:
         | 
| 203 | 
            +
                        z = mean
         | 
| 204 | 
            +
                        
         | 
| 205 | 
            +
                    if return_raw:
         | 
| 206 | 
            +
                        return z, mean, logvar
         | 
| 207 | 
            +
                    return z
         | 
| 208 | 
            +
                    
         | 
| 209 | 
            +
             | 
| 210 | 
            +
            class SparseStructureDecoder(nn.Module):
         | 
| 211 | 
            +
                """
         | 
| 212 | 
            +
                Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
         | 
| 213 | 
            +
                
         | 
| 214 | 
            +
                Args:
         | 
| 215 | 
            +
                    out_channels (int): Channels of the output.
         | 
| 216 | 
            +
                    latent_channels (int): Channels of the latent representation.
         | 
| 217 | 
            +
                    num_res_blocks (int): Number of residual blocks at each resolution.
         | 
| 218 | 
            +
                    channels (List[int]): Channels of the decoder blocks.
         | 
| 219 | 
            +
                    num_res_blocks_middle (int): Number of residual blocks in the middle.
         | 
| 220 | 
            +
                    norm_type (Literal["group", "layer"]): Type of normalization layer.
         | 
| 221 | 
            +
                    use_fp16 (bool): Whether to use FP16.
         | 
| 222 | 
            +
                """ 
         | 
| 223 | 
            +
                def __init__(
         | 
| 224 | 
            +
                    self,
         | 
| 225 | 
            +
                    out_channels: int,
         | 
| 226 | 
            +
                    latent_channels: int,
         | 
| 227 | 
            +
                    num_res_blocks: int,
         | 
| 228 | 
            +
                    channels: List[int],
         | 
| 229 | 
            +
                    num_res_blocks_middle: int = 2,
         | 
| 230 | 
            +
                    norm_type: Literal["group", "layer"] = "layer",
         | 
| 231 | 
            +
                    use_fp16: bool = False,
         | 
| 232 | 
            +
                ):
         | 
| 233 | 
            +
                    super().__init__()
         | 
| 234 | 
            +
                    self.out_channels = out_channels
         | 
| 235 | 
            +
                    self.latent_channels = latent_channels
         | 
| 236 | 
            +
                    self.num_res_blocks = num_res_blocks
         | 
| 237 | 
            +
                    self.channels = channels
         | 
| 238 | 
            +
                    self.num_res_blocks_middle = num_res_blocks_middle
         | 
| 239 | 
            +
                    self.norm_type = norm_type
         | 
| 240 | 
            +
                    self.use_fp16 = use_fp16
         | 
| 241 | 
            +
                    self.dtype = torch.float16 if use_fp16 else torch.float32
         | 
| 242 | 
            +
             | 
| 243 | 
            +
                    self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
         | 
| 244 | 
            +
             | 
| 245 | 
            +
                    self.middle_block = nn.Sequential(*[
         | 
| 246 | 
            +
                        ResBlock3d(channels[0], channels[0])
         | 
| 247 | 
            +
                        for _ in range(num_res_blocks_middle)
         | 
| 248 | 
            +
                    ])
         | 
| 249 | 
            +
             | 
| 250 | 
            +
                    self.blocks = nn.ModuleList([])
         | 
| 251 | 
            +
                    for i, ch in enumerate(channels):
         | 
| 252 | 
            +
                        self.blocks.extend([
         | 
| 253 | 
            +
                            ResBlock3d(ch, ch)
         | 
| 254 | 
            +
                            for _ in range(num_res_blocks)
         | 
| 255 | 
            +
                        ])
         | 
| 256 | 
            +
                        if i < len(channels) - 1:
         | 
| 257 | 
            +
                            self.blocks.append(
         | 
| 258 | 
            +
                                UpsampleBlock3d(ch, channels[i+1])
         | 
| 259 | 
            +
                            )
         | 
| 260 | 
            +
             | 
| 261 | 
            +
                    self.out_layer = nn.Sequential(
         | 
| 262 | 
            +
                        norm_layer(norm_type, channels[-1]),
         | 
| 263 | 
            +
                        nn.SiLU(),
         | 
| 264 | 
            +
                        nn.Conv3d(channels[-1], out_channels, 3, padding=1)
         | 
| 265 | 
            +
                    )
         | 
| 266 | 
            +
             | 
| 267 | 
            +
                    if use_fp16:
         | 
| 268 | 
            +
                        self.convert_to_fp16()
         | 
| 269 | 
            +
             | 
| 270 | 
            +
                @property
         | 
| 271 | 
            +
                def device(self) -> torch.device:
         | 
| 272 | 
            +
                    """
         | 
| 273 | 
            +
                    Return the device of the model.
         | 
| 274 | 
            +
                    """
         | 
| 275 | 
            +
                    return next(self.parameters()).device
         | 
| 276 | 
            +
                
         | 
| 277 | 
            +
                def convert_to_fp16(self) -> None:
         | 
| 278 | 
            +
                    """
         | 
| 279 | 
            +
                    Convert the torso of the model to float16.
         | 
| 280 | 
            +
                    """
         | 
| 281 | 
            +
                    self.use_fp16 = True
         | 
| 282 | 
            +
                    self.dtype = torch.float16
         | 
| 283 | 
            +
                    self.blocks.apply(convert_module_to_f16)
         | 
| 284 | 
            +
                    self.middle_block.apply(convert_module_to_f16)
         | 
| 285 | 
            +
             | 
| 286 | 
            +
                def convert_to_fp32(self) -> None:
         | 
| 287 | 
            +
                    """
         | 
| 288 | 
            +
                    Convert the torso of the model to float32.
         | 
| 289 | 
            +
                    """
         | 
| 290 | 
            +
                    self.use_fp16 = False
         | 
| 291 | 
            +
                    self.dtype = torch.float32
         | 
| 292 | 
            +
                    self.blocks.apply(convert_module_to_f32)
         | 
| 293 | 
            +
                    self.middle_block.apply(convert_module_to_f32)
         | 
| 294 | 
            +
                
         | 
| 295 | 
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         | 
| 296 | 
            +
                    h = self.input_layer(x)
         | 
| 297 | 
            +
                    
         | 
| 298 | 
            +
                    h = h.type(self.dtype)
         | 
| 299 | 
            +
                            
         | 
| 300 | 
            +
                    h = self.middle_block(h)
         | 
| 301 | 
            +
                    for block in self.blocks:
         | 
| 302 | 
            +
                        h = block(h)
         | 
| 303 | 
            +
             | 
| 304 | 
            +
                    h = h.type(x.dtype)
         | 
| 305 | 
            +
                    h = self.out_layer(h)
         | 
| 306 | 
            +
                    return h
         | 
    	
        trellis/models/structured_latent_flow.py
    ADDED
    
    | @@ -0,0 +1,262 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from typing import *
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn as nn
         | 
| 4 | 
            +
            import torch.nn.functional as F
         | 
| 5 | 
            +
            import numpy as np
         | 
| 6 | 
            +
            from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
         | 
| 7 | 
            +
            from ..modules.transformer import AbsolutePositionEmbedder
         | 
| 8 | 
            +
            from ..modules.norm import LayerNorm32
         | 
| 9 | 
            +
            from ..modules import sparse as sp
         | 
| 10 | 
            +
            from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
         | 
| 11 | 
            +
            from .sparse_structure_flow import TimestepEmbedder
         | 
| 12 | 
            +
             | 
| 13 | 
            +
             | 
| 14 | 
            +
            class SparseResBlock3d(nn.Module):
         | 
| 15 | 
            +
                def __init__(
         | 
| 16 | 
            +
                    self,
         | 
| 17 | 
            +
                    channels: int,
         | 
| 18 | 
            +
                    emb_channels: int,
         | 
| 19 | 
            +
                    out_channels: Optional[int] = None,
         | 
| 20 | 
            +
                    downsample: bool = False,
         | 
| 21 | 
            +
                    upsample: bool = False,
         | 
| 22 | 
            +
                ):
         | 
| 23 | 
            +
                    super().__init__()
         | 
| 24 | 
            +
                    self.channels = channels
         | 
| 25 | 
            +
                    self.emb_channels = emb_channels
         | 
| 26 | 
            +
                    self.out_channels = out_channels or channels
         | 
| 27 | 
            +
                    self.downsample = downsample
         | 
| 28 | 
            +
                    self.upsample = upsample
         | 
| 29 | 
            +
                    
         | 
| 30 | 
            +
                    assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
         | 
| 31 | 
            +
             | 
| 32 | 
            +
                    self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
         | 
| 33 | 
            +
                    self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
         | 
| 34 | 
            +
                    self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
         | 
| 35 | 
            +
                    self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
         | 
| 36 | 
            +
                    self.emb_layers = nn.Sequential(
         | 
| 37 | 
            +
                        nn.SiLU(),
         | 
| 38 | 
            +
                        nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
         | 
| 39 | 
            +
                    )
         | 
| 40 | 
            +
                    self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
         | 
| 41 | 
            +
                    self.updown = None
         | 
| 42 | 
            +
                    if self.downsample:
         | 
| 43 | 
            +
                        self.updown = sp.SparseDownsample(2)
         | 
| 44 | 
            +
                    elif self.upsample:
         | 
| 45 | 
            +
                        self.updown = sp.SparseUpsample(2)
         | 
| 46 | 
            +
             | 
| 47 | 
            +
                def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
         | 
| 48 | 
            +
                    if self.updown is not None:
         | 
| 49 | 
            +
                        x = self.updown(x)
         | 
| 50 | 
            +
                    return x
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
         | 
| 53 | 
            +
                    emb_out = self.emb_layers(emb).type(x.dtype)
         | 
| 54 | 
            +
                    scale, shift = torch.chunk(emb_out, 2, dim=1)
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                    x = self._updown(x)
         | 
| 57 | 
            +
                    h = x.replace(self.norm1(x.feats))
         | 
| 58 | 
            +
                    h = h.replace(F.silu(h.feats))
         | 
| 59 | 
            +
                    h = self.conv1(h)
         | 
| 60 | 
            +
                    h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
         | 
| 61 | 
            +
                    h = h.replace(F.silu(h.feats))
         | 
| 62 | 
            +
                    h = self.conv2(h)
         | 
| 63 | 
            +
                    h = h + self.skip_connection(x)
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                    return h
         | 
| 66 | 
            +
                
         | 
| 67 | 
            +
             | 
| 68 | 
            +
            class SLatFlowModel(nn.Module):
         | 
| 69 | 
            +
                def __init__(
         | 
| 70 | 
            +
                    self,
         | 
| 71 | 
            +
                    resolution: int,
         | 
| 72 | 
            +
                    in_channels: int,
         | 
| 73 | 
            +
                    model_channels: int,
         | 
| 74 | 
            +
                    cond_channels: int,
         | 
| 75 | 
            +
                    out_channels: int,
         | 
| 76 | 
            +
                    num_blocks: int,
         | 
| 77 | 
            +
                    num_heads: Optional[int] = None,
         | 
| 78 | 
            +
                    num_head_channels: Optional[int] = 64,
         | 
| 79 | 
            +
                    mlp_ratio: float = 4,
         | 
| 80 | 
            +
                    patch_size: int = 2,
         | 
| 81 | 
            +
                    num_io_res_blocks: int = 2,
         | 
| 82 | 
            +
                    io_block_channels: List[int] = None,
         | 
| 83 | 
            +
                    pe_mode: Literal["ape", "rope"] = "ape",
         | 
| 84 | 
            +
                    use_fp16: bool = False,
         | 
| 85 | 
            +
                    use_checkpoint: bool = False,
         | 
| 86 | 
            +
                    use_skip_connection: bool = True,
         | 
| 87 | 
            +
                    share_mod: bool = False,
         | 
| 88 | 
            +
                    qk_rms_norm: bool = False,
         | 
| 89 | 
            +
                    qk_rms_norm_cross: bool = False,
         | 
| 90 | 
            +
                ):
         | 
| 91 | 
            +
                    super().__init__()
         | 
| 92 | 
            +
                    self.resolution = resolution
         | 
| 93 | 
            +
                    self.in_channels = in_channels
         | 
| 94 | 
            +
                    self.model_channels = model_channels
         | 
| 95 | 
            +
                    self.cond_channels = cond_channels
         | 
| 96 | 
            +
                    self.out_channels = out_channels
         | 
| 97 | 
            +
                    self.num_blocks = num_blocks
         | 
| 98 | 
            +
                    self.num_heads = num_heads or model_channels // num_head_channels
         | 
| 99 | 
            +
                    self.mlp_ratio = mlp_ratio
         | 
| 100 | 
            +
                    self.patch_size = patch_size
         | 
| 101 | 
            +
                    self.num_io_res_blocks = num_io_res_blocks
         | 
| 102 | 
            +
                    self.io_block_channels = io_block_channels
         | 
| 103 | 
            +
                    self.pe_mode = pe_mode
         | 
| 104 | 
            +
                    self.use_fp16 = use_fp16
         | 
| 105 | 
            +
                    self.use_checkpoint = use_checkpoint
         | 
| 106 | 
            +
                    self.use_skip_connection = use_skip_connection
         | 
| 107 | 
            +
                    self.share_mod = share_mod
         | 
| 108 | 
            +
                    self.qk_rms_norm = qk_rms_norm
         | 
| 109 | 
            +
                    self.qk_rms_norm_cross = qk_rms_norm_cross
         | 
| 110 | 
            +
                    self.dtype = torch.float16 if use_fp16 else torch.float32
         | 
| 111 | 
            +
             | 
| 112 | 
            +
                    assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
         | 
| 113 | 
            +
                    assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
         | 
| 114 | 
            +
             | 
| 115 | 
            +
                    self.t_embedder = TimestepEmbedder(model_channels)
         | 
| 116 | 
            +
                    if share_mod:
         | 
| 117 | 
            +
                        self.adaLN_modulation = nn.Sequential(
         | 
| 118 | 
            +
                            nn.SiLU(),
         | 
| 119 | 
            +
                            nn.Linear(model_channels, 6 * model_channels, bias=True)
         | 
| 120 | 
            +
                        )
         | 
| 121 | 
            +
             | 
| 122 | 
            +
                    if pe_mode == "ape":
         | 
| 123 | 
            +
                        self.pos_embedder = AbsolutePositionEmbedder(model_channels)
         | 
| 124 | 
            +
             | 
| 125 | 
            +
                    self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
         | 
| 126 | 
            +
                    self.input_blocks = nn.ModuleList([])
         | 
| 127 | 
            +
                    for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
         | 
| 128 | 
            +
                        self.input_blocks.extend([
         | 
| 129 | 
            +
                            SparseResBlock3d(
         | 
| 130 | 
            +
                                chs,
         | 
| 131 | 
            +
                                model_channels,
         | 
| 132 | 
            +
                                out_channels=chs,
         | 
| 133 | 
            +
                            )
         | 
| 134 | 
            +
                            for _ in range(num_io_res_blocks-1)
         | 
| 135 | 
            +
                        ])
         | 
| 136 | 
            +
                        self.input_blocks.append(
         | 
| 137 | 
            +
                            SparseResBlock3d(
         | 
| 138 | 
            +
                                chs,
         | 
| 139 | 
            +
                                model_channels,
         | 
| 140 | 
            +
                                out_channels=next_chs,
         | 
| 141 | 
            +
                                downsample=True,
         | 
| 142 | 
            +
                            )
         | 
| 143 | 
            +
                        )
         | 
| 144 | 
            +
                        
         | 
| 145 | 
            +
                    self.blocks = nn.ModuleList([
         | 
| 146 | 
            +
                        ModulatedSparseTransformerCrossBlock(
         | 
| 147 | 
            +
                            model_channels,
         | 
| 148 | 
            +
                            cond_channels,
         | 
| 149 | 
            +
                            num_heads=self.num_heads,
         | 
| 150 | 
            +
                            mlp_ratio=self.mlp_ratio,
         | 
| 151 | 
            +
                            attn_mode='full',
         | 
| 152 | 
            +
                            use_checkpoint=self.use_checkpoint,
         | 
| 153 | 
            +
                            use_rope=(pe_mode == "rope"),
         | 
| 154 | 
            +
                            share_mod=self.share_mod,
         | 
| 155 | 
            +
                            qk_rms_norm=self.qk_rms_norm,
         | 
| 156 | 
            +
                            qk_rms_norm_cross=self.qk_rms_norm_cross,
         | 
| 157 | 
            +
                        )
         | 
| 158 | 
            +
                        for _ in range(num_blocks)
         | 
| 159 | 
            +
                    ])
         | 
| 160 | 
            +
             | 
| 161 | 
            +
                    self.out_blocks = nn.ModuleList([])
         | 
| 162 | 
            +
                    for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
         | 
| 163 | 
            +
                        self.out_blocks.append(
         | 
| 164 | 
            +
                            SparseResBlock3d(
         | 
| 165 | 
            +
                                prev_chs * 2 if self.use_skip_connection else prev_chs,
         | 
| 166 | 
            +
                                model_channels,
         | 
| 167 | 
            +
                                out_channels=chs,
         | 
| 168 | 
            +
                                upsample=True,
         | 
| 169 | 
            +
                            )
         | 
| 170 | 
            +
                        )
         | 
| 171 | 
            +
                        self.out_blocks.extend([
         | 
| 172 | 
            +
                            SparseResBlock3d(
         | 
| 173 | 
            +
                                chs * 2 if self.use_skip_connection else chs,
         | 
| 174 | 
            +
                                model_channels,
         | 
| 175 | 
            +
                                out_channels=chs,
         | 
| 176 | 
            +
                            )
         | 
| 177 | 
            +
                            for _ in range(num_io_res_blocks-1)
         | 
| 178 | 
            +
                        ])
         | 
| 179 | 
            +
                    self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
         | 
| 180 | 
            +
             | 
| 181 | 
            +
                    self.initialize_weights()
         | 
| 182 | 
            +
                    if use_fp16:
         | 
| 183 | 
            +
                        self.convert_to_fp16()
         | 
| 184 | 
            +
             | 
| 185 | 
            +
                @property
         | 
| 186 | 
            +
                def device(self) -> torch.device:
         | 
| 187 | 
            +
                    """
         | 
| 188 | 
            +
                    Return the device of the model.
         | 
| 189 | 
            +
                    """
         | 
| 190 | 
            +
                    return next(self.parameters()).device
         | 
| 191 | 
            +
             | 
| 192 | 
            +
                def convert_to_fp16(self) -> None:
         | 
| 193 | 
            +
                    """
         | 
| 194 | 
            +
                    Convert the torso of the model to float16.
         | 
| 195 | 
            +
                    """
         | 
| 196 | 
            +
                    self.input_blocks.apply(convert_module_to_f16)
         | 
| 197 | 
            +
                    self.blocks.apply(convert_module_to_f16)
         | 
| 198 | 
            +
                    self.out_blocks.apply(convert_module_to_f16)
         | 
| 199 | 
            +
             | 
| 200 | 
            +
                def convert_to_fp32(self) -> None:
         | 
| 201 | 
            +
                    """
         | 
| 202 | 
            +
                    Convert the torso of the model to float32.
         | 
| 203 | 
            +
                    """
         | 
| 204 | 
            +
                    self.input_blocks.apply(convert_module_to_f32)
         | 
| 205 | 
            +
                    self.blocks.apply(convert_module_to_f32)
         | 
| 206 | 
            +
                    self.out_blocks.apply(convert_module_to_f32)
         | 
| 207 | 
            +
             | 
| 208 | 
            +
                def initialize_weights(self) -> None:
         | 
| 209 | 
            +
                    # Initialize transformer layers:
         | 
| 210 | 
            +
                    def _basic_init(module):
         | 
| 211 | 
            +
                        if isinstance(module, nn.Linear):
         | 
| 212 | 
            +
                            torch.nn.init.xavier_uniform_(module.weight)
         | 
| 213 | 
            +
                            if module.bias is not None:
         | 
| 214 | 
            +
                                nn.init.constant_(module.bias, 0)
         | 
| 215 | 
            +
                    self.apply(_basic_init)
         | 
| 216 | 
            +
             | 
| 217 | 
            +
                    # Initialize timestep embedding MLP:
         | 
| 218 | 
            +
                    nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
         | 
| 219 | 
            +
                    nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
         | 
| 220 | 
            +
             | 
| 221 | 
            +
                    # Zero-out adaLN modulation layers in DiT blocks:
         | 
| 222 | 
            +
                    if self.share_mod:
         | 
| 223 | 
            +
                        nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
         | 
| 224 | 
            +
                        nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
         | 
| 225 | 
            +
                    else:
         | 
| 226 | 
            +
                        for block in self.blocks:
         | 
| 227 | 
            +
                            nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
         | 
| 228 | 
            +
                            nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
         | 
| 229 | 
            +
             | 
| 230 | 
            +
                    # Zero-out output layers:
         | 
| 231 | 
            +
                    nn.init.constant_(self.out_layer.weight, 0)
         | 
| 232 | 
            +
                    nn.init.constant_(self.out_layer.bias, 0)
         | 
| 233 | 
            +
             | 
| 234 | 
            +
                def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
         | 
| 235 | 
            +
                    h = self.input_layer(x).type(self.dtype)
         | 
| 236 | 
            +
                    t_emb = self.t_embedder(t)
         | 
| 237 | 
            +
                    if self.share_mod:
         | 
| 238 | 
            +
                        t_emb = self.adaLN_modulation(t_emb)
         | 
| 239 | 
            +
                    t_emb = t_emb.type(self.dtype)
         | 
| 240 | 
            +
                    cond = cond.type(self.dtype)
         | 
| 241 | 
            +
             | 
| 242 | 
            +
                    skips = []
         | 
| 243 | 
            +
                    # pack with input blocks
         | 
| 244 | 
            +
                    for block in self.input_blocks:
         | 
| 245 | 
            +
                        h = block(h, t_emb)
         | 
| 246 | 
            +
                        skips.append(h.feats)
         | 
| 247 | 
            +
                    
         | 
| 248 | 
            +
                    if self.pe_mode == "ape":
         | 
| 249 | 
            +
                        h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
         | 
| 250 | 
            +
                    for block in self.blocks:
         | 
| 251 | 
            +
                        h = block(h, t_emb, cond)
         | 
| 252 | 
            +
             | 
| 253 | 
            +
                    # unpack with output blocks
         | 
| 254 | 
            +
                    for block, skip in zip(self.out_blocks, reversed(skips)):
         | 
| 255 | 
            +
                        if self.use_skip_connection:
         | 
| 256 | 
            +
                            h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
         | 
| 257 | 
            +
                        else:
         | 
| 258 | 
            +
                            h = block(h, t_emb)
         | 
| 259 | 
            +
             | 
| 260 | 
            +
                    h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
         | 
| 261 | 
            +
                    h = self.out_layer(h.type(x.dtype))
         | 
| 262 | 
            +
                    return h
         | 
    	
        trellis/models/structured_latent_vae/__init__.py
    ADDED
    
    | @@ -0,0 +1,4 @@ | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from .encoder import SLatEncoder
         | 
| 2 | 
            +
            from .decoder_gs import SLatGaussianDecoder
         | 
| 3 | 
            +
            from .decoder_rf import SLatRadianceFieldDecoder
         | 
| 4 | 
            +
            from .decoder_mesh import SLatMeshDecoder
         | 
    	
        trellis/models/structured_latent_vae/base.py
    ADDED
    
    | @@ -0,0 +1,117 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            from typing import *
         | 
| 2 | 
            +
            import torch
         | 
| 3 | 
            +
            import torch.nn as nn
         | 
| 4 | 
            +
            from ...modules.utils import convert_module_to_f16, convert_module_to_f32
         | 
| 5 | 
            +
            from ...modules import sparse as sp
         | 
| 6 | 
            +
            from ...modules.transformer import AbsolutePositionEmbedder
         | 
| 7 | 
            +
            from ...modules.sparse.transformer import SparseTransformerBlock
         | 
| 8 | 
            +
             | 
| 9 | 
            +
             | 
| 10 | 
            +
            def block_attn_config(self):
         | 
| 11 | 
            +
                """
         | 
| 12 | 
            +
                Return the attention configuration of the model.
         | 
| 13 | 
            +
                """
         | 
| 14 | 
            +
                for i in range(self.num_blocks):
         | 
| 15 | 
            +
                    if self.attn_mode == "shift_window":
         | 
| 16 | 
            +
                        yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
         | 
| 17 | 
            +
                    elif self.attn_mode == "shift_sequence":
         | 
| 18 | 
            +
                        yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
         | 
| 19 | 
            +
                    elif self.attn_mode == "shift_order":
         | 
| 20 | 
            +
                        yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
         | 
| 21 | 
            +
                    elif self.attn_mode == "full":
         | 
| 22 | 
            +
                        yield "full", None, None, None, None
         | 
| 23 | 
            +
                    elif self.attn_mode == "swin":
         | 
| 24 | 
            +
                        yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
         | 
| 25 | 
            +
             | 
| 26 | 
            +
             | 
| 27 | 
            +
            class SparseTransformerBase(nn.Module):
         | 
| 28 | 
            +
                """
         | 
| 29 | 
            +
                Sparse Transformer without output layers.
         | 
| 30 | 
            +
                Serve as the base class for encoder and decoder.
         | 
| 31 | 
            +
                """
         | 
| 32 | 
            +
                def __init__(
         | 
| 33 | 
            +
                    self,
         | 
| 34 | 
            +
                    in_channels: int,
         | 
| 35 | 
            +
                    model_channels: int,
         | 
| 36 | 
            +
                    num_blocks: int,
         | 
| 37 | 
            +
                    num_heads: Optional[int] = None,
         | 
| 38 | 
            +
                    num_head_channels: Optional[int] = 64,
         | 
| 39 | 
            +
                    mlp_ratio: float = 4.0,
         | 
| 40 | 
            +
                    attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
         | 
| 41 | 
            +
                    window_size: Optional[int] = None,
         | 
| 42 | 
            +
                    pe_mode: Literal["ape", "rope"] = "ape",
         | 
| 43 | 
            +
                    use_fp16: bool = False,
         | 
| 44 | 
            +
                    use_checkpoint: bool = False,
         | 
| 45 | 
            +
                    qk_rms_norm: bool = False,
         | 
| 46 | 
            +
                ):
         | 
| 47 | 
            +
                    super().__init__()
         | 
| 48 | 
            +
                    self.in_channels = in_channels
         | 
| 49 | 
            +
                    self.model_channels = model_channels
         | 
| 50 | 
            +
                    self.num_blocks = num_blocks
         | 
| 51 | 
            +
                    self.window_size = window_size
         | 
| 52 | 
            +
                    self.num_heads = num_heads or model_channels // num_head_channels
         | 
| 53 | 
            +
                    self.mlp_ratio = mlp_ratio
         | 
| 54 | 
            +
                    self.attn_mode = attn_mode
         | 
| 55 | 
            +
                    self.pe_mode = pe_mode
         | 
| 56 | 
            +
                    self.use_fp16 = use_fp16
         | 
| 57 | 
            +
                    self.use_checkpoint = use_checkpoint
         | 
| 58 | 
            +
                    self.qk_rms_norm = qk_rms_norm
         | 
| 59 | 
            +
                    self.dtype = torch.float16 if use_fp16 else torch.float32
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                    if pe_mode == "ape":
         | 
| 62 | 
            +
                        self.pos_embedder = AbsolutePositionEmbedder(model_channels)
         | 
| 63 | 
            +
             | 
| 64 | 
            +
                    self.input_layer = sp.SparseLinear(in_channels, model_channels)
         | 
| 65 | 
            +
                    self.blocks = nn.ModuleList([
         | 
| 66 | 
            +
                        SparseTransformerBlock(
         | 
| 67 | 
            +
                            model_channels,
         | 
| 68 | 
            +
                            num_heads=self.num_heads,
         | 
| 69 | 
            +
                            mlp_ratio=self.mlp_ratio,
         | 
| 70 | 
            +
                            attn_mode=attn_mode,
         | 
| 71 | 
            +
                            window_size=window_size,
         | 
| 72 | 
            +
                            shift_sequence=shift_sequence,
         | 
| 73 | 
            +
                            shift_window=shift_window,
         | 
| 74 | 
            +
                            serialize_mode=serialize_mode,
         | 
| 75 | 
            +
                            use_checkpoint=self.use_checkpoint,
         | 
| 76 | 
            +
                            use_rope=(pe_mode == "rope"),
         | 
| 77 | 
            +
                            qk_rms_norm=self.qk_rms_norm,
         | 
| 78 | 
            +
                        )
         | 
| 79 | 
            +
                        for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
         | 
| 80 | 
            +
                    ])
         | 
| 81 | 
            +
             | 
| 82 | 
            +
                @property
         | 
| 83 | 
            +
                def device(self) -> torch.device:
         | 
| 84 | 
            +
                    """
         | 
| 85 | 
            +
                    Return the device of the model.
         | 
| 86 | 
            +
                    """
         | 
| 87 | 
            +
                    return next(self.parameters()).device
         | 
| 88 | 
            +
             | 
| 89 | 
            +
                def convert_to_fp16(self) -> None:
         | 
| 90 | 
            +
                    """
         | 
| 91 | 
            +
                    Convert the torso of the model to float16.
         | 
| 92 | 
            +
                    """
         | 
| 93 | 
            +
                    self.blocks.apply(convert_module_to_f16)
         | 
| 94 | 
            +
             | 
| 95 | 
            +
                def convert_to_fp32(self) -> None:
         | 
| 96 | 
            +
                    """
         | 
| 97 | 
            +
                    Convert the torso of the model to float32.
         | 
| 98 | 
            +
                    """
         | 
| 99 | 
            +
                    self.blocks.apply(convert_module_to_f32)
         | 
| 100 | 
            +
             | 
| 101 | 
            +
                def initialize_weights(self) -> None:
         | 
| 102 | 
            +
                    # Initialize transformer layers:
         | 
| 103 | 
            +
                    def _basic_init(module):
         | 
| 104 | 
            +
                        if isinstance(module, nn.Linear):
         | 
| 105 | 
            +
                            torch.nn.init.xavier_uniform_(module.weight)
         | 
| 106 | 
            +
                            if module.bias is not None:
         | 
| 107 | 
            +
                                nn.init.constant_(module.bias, 0)
         | 
| 108 | 
            +
                    self.apply(_basic_init)
         | 
| 109 | 
            +
             | 
| 110 | 
            +
                def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
         | 
| 111 | 
            +
                    h = self.input_layer(x)
         | 
| 112 | 
            +
                    if self.pe_mode == "ape":
         | 
| 113 | 
            +
                        h = h + self.pos_embedder(x.coords[:, 1:])
         | 
| 114 | 
            +
                    h = h.type(self.dtype)
         | 
| 115 | 
            +
                    for block in self.blocks:
         | 
| 116 | 
            +
                        h = block(h)
         | 
| 117 | 
            +
                    return h
         |