LTT commited on
Commit
8e9a1c4
·
verified ·
1 Parent(s): 775d9d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -108,16 +108,16 @@ flux_pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_tha
108
 
109
 
110
  # lrm
111
- config = OmegaConf.load("./models/lrm/config/PRM_inference.yaml")
112
- model_config = config.model_config
113
- infer_config = config.infer_config
114
- model = instantiate_from_config(model_config)
115
- model_ckpt_path = hf_hub_download(repo_id="LTT/PRM", filename="final_ckpt.ckpt", repo_type="model")
116
- state_dict = torch.load(model_ckpt_path, map_location='cpu')['state_dict']
117
- state_dict = {k[14:]: v for k, v in state_dict.items() if k.startswith('lrm_generator.')}
118
- model.load_state_dict(state_dict, strict=True)
119
- model = model.to(device_1)
120
- torch.cuda.empty_cache()
121
  @spaces.GPU
122
  def lrm_reconstructions(image, input_cameras, save_path=None, name="temp", export_texmap=False, if_save_video=False):
123
  images = image.unsqueeze(0).to(device_1)
 
108
 
109
 
110
  # lrm
111
+ # config = OmegaConf.load("./models/lrm/config/PRM_inference.yaml")
112
+ # model_config = config.model_config
113
+ # infer_config = config.infer_config
114
+ # model = instantiate_from_config(model_config)
115
+ # model_ckpt_path = hf_hub_download(repo_id="LTT/PRM", filename="final_ckpt.ckpt", repo_type="model")
116
+ # state_dict = torch.load(model_ckpt_path, map_location='cpu')['state_dict']
117
+ # state_dict = {k[14:]: v for k, v in state_dict.items() if k.startswith('lrm_generator.')}
118
+ # model.load_state_dict(state_dict, strict=True)
119
+ # model = model.to(device_1)
120
+ # torch.cuda.empty_cache()
121
  @spaces.GPU
122
  def lrm_reconstructions(image, input_cameras, save_path=None, name="temp", export_texmap=False, if_save_video=False):
123
  images = image.unsqueeze(0).to(device_1)