raphael-gl HF staff commited on
Commit
a839033
1 Parent(s): fa4e4a3

Gradio: cache examples in a lazy manner

Browse files

This should avoid runtime errors on startup, by lack of gpu quota
requirements update: lazy cache support comes after 4.25
See https://www.gradio.app/changelog

Signed-off-by: Raphael Glon <[email protected]>

Files changed (2) hide show
  1. app.py +1 -1
  2. requirements.txt +1 -1
app.py CHANGED
@@ -21,7 +21,7 @@ if not torch.cuda.is_available():
21
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU. </p>"
22
  IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
23
  HF_TOKEN = os.getenv("HF_TOKEN")
24
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
25
  MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512"))
26
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
27
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
 
21
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU. </p>"
22
  IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
23
  HF_TOKEN = os.getenv("HF_TOKEN")
24
+ CACHE_EXAMPLES = 'lazy' if torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1" else False
25
  MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512"))
26
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
27
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
  accelerate==0.27.2
2
  diffusers==0.26.3
3
- gradio==4.20.0
4
  invisible-watermark==0.2.0
5
  Pillow==10.2.0
6
  spaces==0.24.0
 
1
  accelerate==0.27.2
2
  diffusers==0.26.3
3
+ gradio>=4.25.0
4
  invisible-watermark==0.2.0
5
  Pillow==10.2.0
6
  spaces==0.24.0