wanghaofan commited on
Commit
48ab7a2
1 Parent(s): 6726142

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -43,8 +43,7 @@ model = DepthAnythingV2(**model_configs[encoder])
43
  filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-Large", filename=f"depth_anything_v2_vitl.pth", repo_type="model")
44
  state_dict = torch.load(filepath, map_location="cpu")
45
  model.load_state_dict(state_dict)
46
- # model = model.to(DEVICE).eval()
47
- model = model.eval()
48
 
49
  import torch
50
  from diffusers.utils import load_image
@@ -67,6 +66,7 @@ open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
67
  torch.backends.cuda.matmul.allow_tf32 = True
68
  pipe.vae.enable_tiling()
69
  pipe.vae.enable_slicing()
 
70
 
71
  def convert_from_image_to_cv2(img: Image) -> np.ndarray:
72
  return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
 
43
  filepath = hf_hub_download(repo_id=f"depth-anything/Depth-Anything-V2-Large", filename=f"depth_anything_v2_vitl.pth", repo_type="model")
44
  state_dict = torch.load(filepath, map_location="cpu")
45
  model.load_state_dict(state_dict)
46
+ model = model.to(DEVICE).eval()
 
47
 
48
  import torch
49
  from diffusers.utils import load_image
 
66
  torch.backends.cuda.matmul.allow_tf32 = True
67
  pipe.vae.enable_tiling()
68
  pipe.vae.enable_slicing()
69
+ pipe.enable_model_cpu_offload() # for saving memory
70
 
71
  def convert_from_image_to_cv2(img: Image) -> np.ndarray:
72
  return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)