AIvry commited on
Commit
424791f
·
verified ·
1 Parent(s): 236f287

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +35 -33
config.py CHANGED
@@ -1,33 +1,35 @@
1
- import os
2
- import torch
3
-
4
- import warnings
5
- warnings.filterwarnings(
6
- "ignore",
7
- category=UserWarning,
8
- message=r"^expandable_segments not supported on this platform"
9
- )
10
-
11
- SR = 16_000
12
- RESULTS_ROOT = "results"
13
- BATCH_SIZE = 2
14
- ENERGY_WIN_MS = 20
15
- ENERGY_HOP_MS = 20
16
- SILENCE_RATIO = 0.1
17
- EPS = 1e-4
18
- COV_TOL = 1e-6
19
-
20
- DEFAULT_LAYER = 2
21
- DEFAULT_ADD_CI = True
22
- DEFAULT_DELTA_CI = 0.05
23
- DEFAULT_ALPHA = 1.0
24
-
25
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128,expandable_segments:True,garbage_collection_threshold:0.6"
26
- os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
27
-
28
- torch.backends.cudnn.benchmark = True
29
- torch.backends.cudnn.deterministic = False
30
- torch.backends.cudnn.enabled = True
31
-
32
- if torch.cuda.is_available():
33
- torch.cuda.set_per_process_memory_fraction(0.8)
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ import warnings
5
+ warnings.filterwarnings(
6
+ "ignore",
7
+ category=UserWarning,
8
+ message=r"^expandable_segments not supported on this platform"
9
+ )
10
+
11
+ SR = 16_000
12
+ RESULTS_ROOT = "results"
13
+ BATCH_SIZE = 2
14
+ ENERGY_WIN_MS = 20
15
+ ENERGY_HOP_MS = 20
16
+ SILENCE_RATIO = 0.1
17
+ EPS = 1e-4
18
+ COV_TOL = 1e-6
19
+
20
+ DEFAULT_LAYER = 2
21
+ DEFAULT_ADD_CI = True
22
+ DEFAULT_DELTA_CI = 0.05
23
+ DEFAULT_ALPHA = 1.0
24
+
25
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128,expandable_segments:True,garbage_collection_threshold:0.6"
26
+ os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
27
+
28
+ torch.backends.cudnn.benchmark = True
29
+ torch.backends.cudnn.deterministic = False
30
+ torch.backends.cudnn.enabled = True
31
+
32
+ # Only set CUDA memory fraction if we're not in the main process on HF Spaces
33
+ if not (os.environ.get("SPACE_ID") and torch.cuda.is_available()):
34
+ if torch.cuda.is_available():
35
+ torch.cuda.set_per_process_memory_fraction(0.8)