Skip to content

Commit 398ee7d

Browse files
committed
Fix bug in handling of row/column matrices in GEMV c_code
Bug was caused by reusing the adjusted strides in the logic to decide whether the call to GEMV should be transposed or not. Particularly the +1 in the strides variable was causing the error branch (no double-strides) to be reached wrongly. The +1 was supposedly there for the case of matrix with length 0, but that triggers a branch where the adjusted strides are never used. This bug was introduced in afe934b
1 parent 8362f6a commit 398ee7d

File tree

2 files changed

+66
-83
lines changed

2 files changed

+66
-83
lines changed

pytensor/tensor/blas_c.py

Lines changed: 61 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -409,42 +409,46 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
409409
}
410410
}
411411
{
412-
char TRANS = 'T';
413-
char NOTRANS = 'N';
414412
int NA0 = PyArray_DIMS(%(A)s)[0];
415413
int NA1 = PyArray_DIMS(%(A)s)[1];
416-
/* This formula is needed in the case where A is actually a row or
417-
* column matrix, because BLAS sometimes insists that the strides:
418-
* - are not smaller than the number of elements in the array
419-
* - are not 0.
420-
*/
421-
int SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : (NA1 + 1);
422-
int SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : (NA0 + 1);
423-
int Sz = PyArray_STRIDES(%(z)s)[0] / elemsize;
424-
int Sx = PyArray_STRIDES(%(x)s)[0] / elemsize;
425-
426-
dtype_%(A)s* A_data = (dtype_%(A)s*) PyArray_DATA(%(A)s);
427-
dtype_%(x)s* x_data = (dtype_%(x)s*) PyArray_DATA(%(x)s);
428-
dtype_%(z)s* z_data = (dtype_%(z)s*) PyArray_DATA(%(z)s);
429-
// gemv expects pointers to the beginning of memory arrays,
430-
// but numpy provides a pointer to the first element,
431-
// so when the stride is negative, we need to get the last one.
432-
if (Sx < 0)
433-
x_data += (NA1 - 1) * Sx;
434-
if (Sz < 0)
435-
z_data += (NA0 - 1) * Sz;
436414
437415
if (NA0 * NA1)
438416
{
417+
// Non-empty A matrix
418+
419+
/* In the case where A is actually a row or column matrix,
420+
* the strides corresponding to the dummy dimension don't matter,
421+
* but BLAS requires these to be no smaller than the number of elements in the array.
422+
*/
423+
int SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : NA1;
424+
int SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : NA0;
425+
int Sz = PyArray_STRIDES(%(z)s)[0] / elemsize;
426+
int Sx = PyArray_STRIDES(%(x)s)[0] / elemsize;
427+
428+
dtype_%(A)s* A_data = (dtype_%(A)s*) PyArray_DATA(%(A)s);
429+
dtype_%(x)s* x_data = (dtype_%(x)s*) PyArray_DATA(%(x)s);
430+
dtype_%(z)s* z_data = (dtype_%(z)s*) PyArray_DATA(%(z)s);
431+
432+
// gemv expects pointers to the beginning of memory arrays,
433+
// but numpy provides a pointer to the first element,
434+
// so when the stride is negative, we need to get the last one.
435+
if (Sx < 0)
436+
x_data += (NA1 - 1) * Sx;
437+
if (Sz < 0)
438+
z_data += (NA0 - 1) * Sz;
439+
439440
if ( ((SA0 < 0) || (SA1 < 0)) && (abs(SA0) == 1 || (abs(SA1) == 1)) )
440441
{
441442
// We can treat the array A as C-or F-contiguous by changing the order of iteration
442-
if (SA0 < 0){
443+
// printf("GEMV: Iterating in reverse NA0=%%d, NA1=%%d, SA0=%%d, SA1=%%d\\n", NA0, NA1, SA0, SA1);
444+
if (SA0 < 0)
445+
{
443446
A_data += (NA0 -1) * SA0; // Jump to first row
444447
SA0 = -SA0; // Iterate over rows in reverse
445448
Sz = -Sz; // Iterate over y in reverse
446449
}
447-
if (SA1 < 0){
450+
if (SA1 < 0)
451+
{
448452
A_data += (NA1 -1) * SA1; // Jump to first column
449453
SA1 = -SA1; // Iterate over columns in reverse
450454
Sx = -Sx; // Iterate over x in reverse
@@ -454,24 +458,33 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
454458
// Array isn't contiguous, we have to make a copy
455459
// - if the copy is too long, maybe call vector/vector dot on
456460
// each row instead
457-
// printf("GEMV: Making a copy SA0=%%d, SA1=%%d\\n", SA0, SA1);
461+
// printf("GEMV: Making a copy NA0=%%d, NA1=%%d, SA0=%%d, SA1=%%d\\n", NA0, NA1, SA0, SA1);
458462
npy_intp dims[2];
459463
dims[0] = NA0;
460464
dims[1] = NA1;
461-
462-
PyArrayObject * A_copy = (PyArrayObject *) PyArray_Copy(
463-
%(A)s);
465+
PyArrayObject * A_copy = (PyArrayObject *) PyArray_Copy(%(A)s);
464466
if (!A_copy)
465467
%(fail)s
466468
Py_XDECREF(%(A)s);
467469
%(A)s = A_copy;
468-
SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : (NA1 + 1);
469-
SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : (NA0 + 1);
470+
SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : NA1;
471+
SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : NA0;
470472
A_data = (dtype_%(A)s*) PyArray_DATA(%(A)s);
471473
}
474+
//else {printf("GEMV: Using the original array NA0=%%d, NA1=%%d, SA0=%%d, SA1=%%d\\n", NA0, NA1, SA0, SA1);}
472475
473-
if (SA0 == 1)
476+
if (NA0 == 1)
474477
{
478+
// Vector-vector dot product, it seems faster to avoid GEMV
479+
dtype_%(alpha)s alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
480+
z_data[0] *= dbeta;
481+
z_data[0] += alpha*ddot_(&NA1, (double*)(A_data), &SA1,
482+
(double*)x_data, &Sx);
483+
}
484+
else if (SA0 == 1)
485+
{
486+
// F-contiguous
487+
char NOTRANS = 'N';
475488
if (PyArray_DESCR(%(A)s)->type_num == NPY_FLOAT)
476489
{
477490
float alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
@@ -501,61 +514,27 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
501514
}
502515
else if (SA1 == 1)
503516
{
517+
// C-contiguous
518+
char TRANS = 'T';
504519
if (PyArray_DESCR(%(A)s)->type_num == NPY_FLOAT)
505520
{
506521
float alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
507-
508-
// Check for vector-vector dot (NA0 == 1). The code may work
509-
// for SA1 != 1 as well, but has not been tested for this case,
510-
// so SA1 == 1 is required for safety.
511-
if (NA0 == 1 && SA1 == 1)
512-
{
513-
if (fbeta != 0.f) {
514-
z_data[0] = fbeta*z_data[0];
515-
} else {
516-
z_data[0] = 0.f;
517-
}
518-
z_data[0] += alpha*sdot_(&NA1,
519-
(float*)(A_data), &SA1,
520-
(float*)x_data, &Sx);
521-
}
522-
else
523-
{
524-
sgemv_(&TRANS, &NA1, &NA0,
525-
&alpha,
526-
(float*)(A_data), &SA0,
527-
(float*)x_data, &Sx,
528-
&fbeta,
529-
(float*)z_data, &Sz);
530-
}
522+
sgemv_(&TRANS, &NA1, &NA0,
523+
&alpha,
524+
(float*)(A_data), &SA0,
525+
(float*)x_data, &Sx,
526+
&fbeta,
527+
(float*)z_data, &Sz);
531528
}
532529
else if (PyArray_DESCR(%(A)s)->type_num == NPY_DOUBLE)
533530
{
534531
double alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
535-
536-
// Check for vector-vector dot (NA0 == 1). The code may work
537-
// for SA1 != 1 as well, but has not been tested for this case,
538-
// so SA1 == 1 is required for safety.
539-
if (NA0 == 1 && SA1 == 1)
540-
{
541-
if (dbeta != 0.) {
542-
z_data[0] = dbeta*z_data[0];
543-
} else {
544-
z_data[0] = 0.;
545-
}
546-
z_data[0] += alpha*ddot_(&NA1,
547-
(double*)(A_data), &SA1,
548-
(double*)x_data, &Sx);
549-
}
550-
else
551-
{
552-
dgemv_(&TRANS, &NA1, &NA0,
553-
&alpha,
554-
(double*)(A_data), &SA0,
555-
(double*)x_data, &Sx,
556-
&dbeta,
557-
(double*)z_data, &Sz);
558-
}
532+
dgemv_(&TRANS, &NA1, &NA0,
533+
&alpha,
534+
(double*)(A_data), &SA0,
535+
(double*)x_data, &Sx,
536+
&dbeta,
537+
(double*)z_data, &Sz);
559538
}
560539
else
561540
{
@@ -567,8 +546,7 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
567546
else
568547
{
569548
PyErr_SetString(PyExc_AssertionError,
570-
"xx is a double-strided matrix, and should have been "
571-
"copied into a memory-contiguous one.");
549+
"A is neither C nor F-contiguous, it should have been copied into a memory-contiguous array;");
572550
%(fail)s
573551
}
574552
}
@@ -577,6 +555,7 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
577555
// the matrix has at least one dim of length 0
578556
// so we do this loop, which either iterates over 0 elements
579557
// or else it does the right thing for length-0 A.
558+
int Sz = PyArray_STRIDES(%(z)s)[0] / elemsize;
580559
dtype_%(z)s * zptr = (dtype_%(z)s*)(PyArray_DATA(%(z)s));
581560
for (int i = 0; i < NA0; ++i)
582561
{
@@ -613,7 +592,7 @@ def c_code(self, node, name, inp, out, sub):
613592
return code
614593

615594
def c_code_cache_version(self):
616-
return (15, blas_header_version(), check_force_gemv_init())
595+
return (16, blas_header_version(), check_force_gemv_init())
617596

618597

619598
cgemv_inplace = CGemv(inplace=True)

tests/tensor/test_blas.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2226,8 +2226,10 @@ def cmp_gemv(self, a_shp, b_shp, c_shp, rng):
22262226

22272227
a.set_value(a_dev.copy()[::a_step], borrow=True)
22282228
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
2229+
# Copy as C so that it becomes F after the transpose in the graph
22292230
b_t.set_value(
2230-
np.transpose(b_dev.copy())[::b_step2, ::b_step1], borrow=True
2231+
np.transpose(b_dev).copy(order="C")[::b_step2, ::b_step1],
2232+
borrow=True,
22312233
)
22322234
c.set_value(c_dev.copy()[::c_step], borrow=True)
22332235

@@ -2244,6 +2246,7 @@ def test_gemv(self):
22442246
self.cmp_gemv(3, (3, 5), 5, rng)
22452247
self.cmp_gemv(1, (1, 5), 5, rng)
22462248
self.cmp_gemv(3, (3, 1), 1, rng)
2249+
self.cmp_gemv(1, (1, 1), 1, rng)
22472250
self.cmp_gemv(0, (0, 5), 5, rng)
22482251
self.cmp_gemv(3, (3, 0), 0, rng)
22492252
self.cmp_gemv(0, (0, 1), 1, rng)
@@ -2301,6 +2304,7 @@ def test_ger_strides(self):
23012304
self.cmp_ger((3, 5), 3, 5, rng)
23022305
self.cmp_ger((1, 5), 1, 5, rng)
23032306
self.cmp_ger((3, 1), 3, 1, rng)
2307+
self.cmp_ger((1, 1), 1, 1, rng)
23042308
self.cmp_ger((0, 5), 0, 5, rng)
23052309
self.cmp_ger((3, 0), 3, 0, rng)
23062310
self.cmp_ger((0, 1), 0, 1, rng)

0 commit comments

Comments
 (0)