Luigi commited on
Commit
774840a
·
1 Parent(s): 1daf416

Fix device mismatch: load models on same device as GPU computation

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -35,8 +35,8 @@ def load_models_and_components(model_name: str):
35
  """Load and cache models, tokenizer, vocoder, and feature extractor."""
36
  global _models_cache, _tokenizer_cache, _vocoder_cache, _feature_extractor_cache
37
 
38
- # Set device (CPU for Spaces)
39
- device = torch.device("cpu")
40
 
41
  if model_name not in _models_cache:
42
  print(f"Loading {model_name} model...")
 
35
  """Load and cache models, tokenizer, vocoder, and feature extractor."""
36
  global _models_cache, _tokenizer_cache, _vocoder_cache, _feature_extractor_cache
37
 
38
+ # Set device (GPU if available for Spaces GPU acceleration)
39
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
40
 
41
  if model_name not in _models_cache:
42
  print(f"Loading {model_name} model...")