@@ -34,7 +34,7 @@ def TV_ROF_CPU(inputData, regularisation_parameter, iterationsNumb,
34
34
infovector = np .zeros ((2 ,), dtype = 'float32' )
35
35
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
36
36
37
- dims = list (inputData .shape )
37
+ dims = list (inputData .shape )[:: - 1 ]
38
38
if inputData .ndim == 2 :
39
39
dims .append (1 )
40
40
@@ -79,7 +79,7 @@ def TV_FGP_CPU(inputData, lambdaPar, iterationsNumb, epsil, methodTV, nonneg, ou
79
79
infovector = np .zeros ((2 ,), dtype = 'float32' )
80
80
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
81
81
82
- dims = list (inputData .shape )
82
+ dims = list (inputData .shape )[:: - 1 ]
83
83
if inputData .ndim == 2 :
84
84
dims .append (1 )
85
85
@@ -122,7 +122,7 @@ def PDTV_CPU(inputData, lambdaPar, iterationsNumb, epsil, lipschitz_const, metho
122
122
infovector = np .zeros ((2 ,), dtype = 'float32' )
123
123
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
124
124
125
- dims = list (inputData .shape )
125
+ dims = list (inputData .shape )[:: - 1 ]
126
126
if inputData .ndim == 2 :
127
127
dims .append (1 )
128
128
@@ -161,7 +161,7 @@ def SB_TV_CPU(inputData, mu, iter, epsil, methodTV, out=None, infovector=None):
161
161
infovector = np .zeros ((2 ,), dtype = 'float32' )
162
162
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
163
163
164
- dims = list (inputData .shape )
164
+ dims = list (inputData .shape )[:: - 1 ]
165
165
if inputData .ndim == 2 :
166
166
dims .append (1 )
167
167
@@ -199,7 +199,7 @@ def LLT_ROF_CPU(inputData, lambdaROF, lambdaLLT, iterationsNumb, tau, epsil, out
199
199
infovector = np .zeros ((2 ,), dtype = 'float32' )
200
200
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
201
201
202
- dims = list (inputData .shape )
202
+ dims = list (inputData .shape )[:: - 1 ]
203
203
if inputData .ndim == 2 :
204
204
dims .append (1 )
205
205
@@ -239,7 +239,7 @@ def TGV_CPU(inputData, lambdaPar, alpha1, alpha0, iterationsNumb, L2, epsil, out
239
239
infovector = np .zeros ((2 ,), dtype = 'float32' )
240
240
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
241
241
242
- dims = list (inputData .shape )
242
+ dims = list (inputData .shape )[:: - 1 ]
243
243
if inputData .ndim == 2 :
244
244
dims .append (1 )
245
245
@@ -281,7 +281,7 @@ def dTV_FGP_CPU(inputData, inputRef, lambdaPar, iterationsNumb, epsil, eta, meth
281
281
infovector = np .zeros ((2 ,), dtype = 'float32' )
282
282
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
283
283
284
- dims = list (inputData .shape )
284
+ dims = list (inputData .shape )[:: - 1 ]
285
285
if inputData .ndim == 2 :
286
286
dims .append (1 )
287
287
@@ -312,7 +312,7 @@ def TNV(inputData, lambdaPar, maxIter, tol, out=None):
312
312
out = np .zeros_like (inputData )
313
313
out_p = out .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
314
314
315
- dims = list (inputData .shape )
315
+ dims = list (inputData .shape )[:: - 1 ]
316
316
if inputData .ndim != 3 :
317
317
raise ValueError ('Input data must be 2D + 1 channel' )
318
318
@@ -439,7 +439,7 @@ def TV_ENERGY(U, U0, lambdaPar, type, E_val=None):
439
439
ctypes .c_int , # dimY (int)
440
440
]
441
441
cilreg .TV_energy2D .restype = ctypes .c_float # return value is float
442
- result = cilreg .TV_energy2D (u_p , u0_p , e_val_p , lambdaPar , type , dims [0 ], dims [1 ])
442
+ result = cilreg .TV_energy2D (u_p , u0_p , e_val_p , lambdaPar , type , dims [1 ], dims [0 ])
443
443
elif U .ndim == 3 :
444
444
# float TV_energy3D(float *U, float *U0, float *E_val, float lambdaPar, int type, int dimX, int dimY, int dimZ);
445
445
cilreg .TV_energy3D .argtypes = [
@@ -455,7 +455,7 @@ def TV_ENERGY(U, U0, lambdaPar, type, E_val=None):
455
455
cilreg .TV_energy3D .restype = ctypes .c_float # return value is float
456
456
457
457
# float TV_energy3D(float *U, float *U0, float *E_val, float lambdaPar, int type, int dimX, int dimY, int dimZ);
458
- result = cilreg .TV_energy3D (u_p , u0_p , e_val_p , lambdaPar , type , dims [0 ], dims [1 ], dims [2 ])
458
+ result = cilreg .TV_energy3D (u_p , u0_p , e_val_p , lambdaPar , type , dims [2 ], dims [1 ], dims [0 ])
459
459
else :
460
460
raise ValueError (f"TV_ENERGY: Only 2D and 3D data are supported. Got { U .ndim } " )
461
461
return E_val
@@ -503,7 +503,7 @@ def TV_ROF_GPU(inputData, regularisation_parameter, iterationsNumb,
503
503
infovector = np .zeros ((2 ,), dtype = 'float32' )
504
504
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
505
505
506
- dims = list (inputData .shape )
506
+ dims = list (inputData .shape )[:: - 1 ]
507
507
if inputData .ndim == 2 :
508
508
dims .append (1 )
509
509
@@ -551,7 +551,7 @@ def TV_FGP_GPU(inputData, lambdaPar, iterationsNumb, epsil, methodTV, nonneg, gp
551
551
infovector = np .zeros ((2 ,), dtype = 'float32' )
552
552
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
553
553
554
- dims = list (inputData .shape )
554
+ dims = list (inputData .shape )[:: - 1 ]
555
555
if inputData .ndim == 2 :
556
556
dims .append (1 )
557
557
if gpu_device == 'gpu' :
@@ -594,7 +594,7 @@ def PDTV_GPU(Input, lambdaPar, iter, epsil, lipschitz_const, methodTV, nonneg, g
594
594
infovector = np .zeros_like (Input )
595
595
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
596
596
597
- dims = list (Input .shape )
597
+ dims = list (Input .shape )[:: - 1 ]
598
598
if Input .ndim == 2 :
599
599
dims .append (1 )
600
600
# int TV_PD_GPU_main(float *Input, float *Output, float *infovector, float lambdaPar, int iter, float epsil,
@@ -635,7 +635,7 @@ def SB_TV_GPU(inputData, lambdaPar, iterationsNumb, epsil, methodTV,
635
635
infovector = np .zeros ((2 ,), dtype = 'float32' )
636
636
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
637
637
638
- dims = list (inputData .shape )
638
+ dims = list (inputData .shape )[:: - 1 ]
639
639
if inputData .ndim == 2 :
640
640
dims .append (1 )
641
641
@@ -676,7 +676,7 @@ def LLT_ROF_GPU(inputData, lambdaROF, lambdaLLT, iterationsNumb, tau, epsil, gpu
676
676
infovector = np .zeros ((2 ,), dtype = 'float32' )
677
677
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
678
678
679
- dims = list (inputData .shape )
679
+ dims = list (inputData .shape )[:: - 1 ]
680
680
if inputData .ndim == 2 :
681
681
dims .append (1 )
682
682
@@ -719,7 +719,7 @@ def TGV_GPU(inputData, lambdaPar, alpha1, alpha0, iterationsNumb, L2, epsil,
719
719
infovector = np .zeros ((2 ,), dtype = 'float32' )
720
720
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
721
721
722
- dims = list (inputData .shape )
722
+ dims = list (inputData .shape )[:: - 1 ]
723
723
if inputData .ndim == 2 :
724
724
dims .append (1 )
725
725
dims = dims [::- 1 ]
@@ -765,7 +765,7 @@ def dTV_FGP_GPU(inputData, inputRef, lambdaPar, iterationsNumb, epsil, eta,
765
765
infovector = np .zeros ((2 ,), dtype = 'float32' )
766
766
infovector_p = infovector .ctypes .data_as (ctypes .POINTER (ctypes .c_float ))
767
767
768
- dims = list (inputData .shape )
768
+ dims = list (inputData .shape )[:: - 1 ]
769
769
if inputData .ndim == 2 :
770
770
dims .append (1 )
771
771
0 commit comments