1
- # Copyright (c) 2021-2023 , NVIDIA CORPORATION. All rights reserved.
1
+ # Copyright (c) 2021-2025 , NVIDIA CORPORATION. All rights reserved.
2
2
# See file LICENSE for terms.
3
3
4
4
from enum import Enum
@@ -62,12 +62,10 @@ class nvCompManager:
62
62
# Default options exist for every option type for every class that inherits
63
63
# from nvCompManager, which takes advantage of the below property-setting
64
64
# code.
65
- stream : cp .cuda .Stream = cp .cuda .Stream ()
66
65
chunk_size : int = 1 << 16
67
66
data_type : _lib .pyNvcompType_t = _lib .pyNvcompType_t .pyNVCOMP_TYPE_UCHAR
68
67
# Some classes have this defined as type, some as data_type.
69
68
type : _lib .pyNvcompType_t = _lib .pyNvcompType_t .pyNVCOMP_TYPE_UCHAR
70
- device_id : int = 0
71
69
72
70
# Bitcomp Defaults
73
71
bitcomp_algo : int = 0
@@ -84,12 +82,6 @@ def __init__(self, kwargs):
84
82
85
83
Special case: Convert data_type to a _lib.pyNvcompType_t
86
84
"""
87
- # Special case: Throw error if stream or device_id are specified
88
- if kwargs .get ("stream" ) is not None :
89
- raise NotImplementedError (
90
- "stream argument not yet supported: " "Use the default argument"
91
- )
92
-
93
85
# data_type will be passed in as a python object. Convert it to
94
86
# a C++ nvcompType_t here.
95
87
if kwargs .get ("data_type" ):
@@ -221,13 +213,10 @@ def __init__(self, **kwargs):
221
213
----------
222
214
chunk_size: int (optional)
223
215
Defaults to 4096.
224
- device_id: int (optional)
225
- Specify which device_id on the node to use for allocation and compression.
226
- Defaults to 0.
227
216
"""
228
217
super ().__init__ (kwargs )
229
218
230
- self ._manager = _lib ._ANSManager (self .chunk_size , self . stream , self . device_id )
219
+ self ._manager = _lib ._ANSManager (self .chunk_size )
231
220
232
221
233
222
class BitcompManager (nvCompManager ):
@@ -241,18 +230,13 @@ def __init__(self, **kwargs):
241
230
----------
242
231
chunk_size: int (optional)
243
232
Defaults to 4096.
244
- device_id: int (optional)
245
- Specify which device_id on the node to use
246
- Defaults to 0.
247
233
"""
248
234
super ().__init__ (kwargs )
249
235
250
236
self ._manager = _lib ._BitcompManager (
251
237
self .chunk_size ,
252
238
self .data_type .value ,
253
239
self .bitcomp_algo ,
254
- self .stream ,
255
- self .device_id ,
256
240
)
257
241
258
242
@@ -278,9 +262,6 @@ def __init__(self, **kwargs):
278
262
use_bp: bool (optional)
279
263
Enable Bitpacking, see [algorithms overview.md](
280
264
https://github.com/NVIDIA/nvcomp/blob/main/doc/algorithms_overview.md#bitpacking) # noqa: E501
281
- device_id: int (optional)
282
- Specify which device_id on the node to use
283
- Defaults to 0.
284
265
"""
285
266
super ().__init__ (kwargs )
286
267
default_options = {
@@ -304,9 +285,7 @@ def __init__(self, **kwargs):
304
285
"num_deltas" : self .num_deltas ,
305
286
"use_bp" : self .use_bp ,
306
287
}
307
- self ._manager = _lib ._CascadedManager (
308
- default_options , self .stream , self .device_id
309
- )
288
+ self ._manager = _lib ._CascadedManager (default_options )
310
289
311
290
312
291
class GdeflateManager (nvCompManager ):
@@ -322,18 +301,10 @@ def __init__(self, **kwargs):
322
301
algo: int (optional)
323
302
Integer in the range [0, 1, 2]. Only algorithm #0 is currently
324
303
supported.
325
- stream: cudaStream_t (optional)
326
- Which CUDA stream to perform the operation on. Not currently
327
- supported.
328
- device_id: int (optional)
329
- Specify which device_id on the node to use
330
- Defaults to 0.
331
304
"""
332
305
super ().__init__ (kwargs )
333
306
334
- self ._manager = _lib ._GdeflateManager (
335
- self .chunk_size , self .algo , self .stream , self .device_id
336
- )
307
+ self ._manager = _lib ._GdeflateManager (self .chunk_size , self .algo )
337
308
338
309
339
310
class LZ4Manager (nvCompManager ):
@@ -354,17 +325,9 @@ def __init__(self, **kwargs):
354
325
data_type: pyNVCOMP_TYPE (optional)
355
326
The data type returned for decompression.
356
327
Defaults to pyNVCOMP_TYPE.UCHAR
357
- stream: cudaStream_t (optional)
358
- Which CUDA stream to perform the operation on. Not currently
359
- supported.
360
- device_id: int (optional)
361
- Specify which device_id on the node to use
362
- Defaults to 0.
363
328
"""
364
329
super ().__init__ (kwargs )
365
- self ._manager = _lib ._LZ4Manager (
366
- self .chunk_size , self .data_type .value , self .stream , self .device_id
367
- )
330
+ self ._manager = _lib ._LZ4Manager (self .chunk_size , self .data_type .value )
368
331
369
332
370
333
class SnappyManager (nvCompManager ):
@@ -377,17 +340,9 @@ def __init__(self, **kwargs):
377
340
Parameters
378
341
----------
379
342
chunk_size: int (optional)
380
- stream: cudaStream_t (optional)
381
- Which CUDA stream to perform the operation on. Not currently
382
- supported.
383
- device_id: int (optional)
384
- Specify which device_id on the node to use
385
- Defaults to 0.
386
343
"""
387
344
super ().__init__ (kwargs )
388
- self ._manager = _lib ._SnappyManager (
389
- self .chunk_size , self .stream , self .device_id
390
- )
345
+ self ._manager = _lib ._SnappyManager (self .chunk_size )
391
346
392
347
393
348
class ManagedDecompressionManager (nvCompManager ):
0 commit comments