Skip to content

Commit 602ba86

Browse files
authored
gate sparsity tests by presence of cusparselt (#1602)
Summary: I have a PyTorch build without `cuSparseLt`. Adding logic to properly skip tests which depend on this library being available. Test Plan: Local testing on an H100 without cuSparseLt: ``` pytest test/prototype/test_sparse_api.py -s ``` Reviewers: Subscribers: Tasks: Tags:
1 parent 166a357 commit 602ba86

File tree

1 file changed

+9
-2
lines changed

1 file changed

+9
-2
lines changed

test/dtypes/test_affine_quantized.py

+9-2
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@
2323
is_sm_at_least_89,
2424
)
2525

26+
is_cusparselt_available = (
27+
hasattr(torch.backends, "cusparselt") and torch.backends.cusparselt.is_available()
28+
)
29+
2630

2731
def get_quantization_functions(
2832
do_sparse: bool, do_int4: bool, device: str = "cuda", int4_zp_int: bool = False
@@ -91,7 +95,8 @@ def test_tensor_core_layout_transpose(self):
9195

9296
@unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available")
9397
@common_utils.parametrize(
94-
"apply_quant", get_quantization_functions(True, True, "cuda", True)
98+
"apply_quant",
99+
get_quantization_functions(is_cusparselt_available, True, "cuda", True),
95100
)
96101
def test_weights_only(self, apply_quant):
97102
linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16, device="cuda")
@@ -168,7 +173,9 @@ def apply_uint6_weight_only_quant(linear):
168173

169174
deregister_aqt_quantized_linear_dispatch(dispatch_condition)
170175

171-
@common_utils.parametrize("apply_quant", get_quantization_functions(True, True))
176+
@common_utils.parametrize(
177+
"apply_quant", get_quantization_functions(is_cusparselt_available, True)
178+
)
172179
@unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available")
173180
def test_print_quantized_module(self, apply_quant):
174181
linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16, device="cuda")

0 commit comments

Comments
 (0)