@@ -409,36 +409,38 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
409
409
}
410
410
}
411
411
{
412
- char TRANS = 'T';
413
- char NOTRANS = 'N';
414
412
int NA0 = PyArray_DIMS(%(A)s)[0];
415
413
int NA1 = PyArray_DIMS(%(A)s)[1];
416
- /* This formula is needed in the case where A is actually a row or
417
- * column matrix, because BLAS sometimes insists that the strides:
418
- * - are not smaller than the number of elements in the array
419
- * - are not 0.
420
- */
421
- int SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : (NA1 + 1);
422
- int SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : (NA0 + 1);
423
- int Sz = PyArray_STRIDES(%(z)s)[0] / elemsize;
424
- int Sx = PyArray_STRIDES(%(x)s)[0] / elemsize;
425
-
426
- dtype_%(A)s* A_data = (dtype_%(A)s*) PyArray_DATA(%(A)s);
427
- dtype_%(x)s* x_data = (dtype_%(x)s*) PyArray_DATA(%(x)s);
428
- dtype_%(z)s* z_data = (dtype_%(z)s*) PyArray_DATA(%(z)s);
429
- // gemv expects pointers to the beginning of memory arrays,
430
- // but numpy provides a pointer to the first element,
431
- // so when the stride is negative, we need to get the last one.
432
- if (Sx < 0)
433
- x_data += (NA1 - 1) * Sx;
434
- if (Sz < 0)
435
- z_data += (NA0 - 1) * Sz;
436
414
437
415
if (NA0 * NA1)
438
416
{
417
+ // Non-empty A matrix
418
+
419
+ /* In the case where A is actually a row or column matrix,
420
+ * the strides corresponding to the dummy dimension don't matter,
421
+ * but BLAS requires these to be no smaller than the number of elements in the array.
422
+ */
423
+ int SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : NA1;
424
+ int SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : NA0;
425
+ int Sz = PyArray_STRIDES(%(z)s)[0] / elemsize;
426
+ int Sx = PyArray_STRIDES(%(x)s)[0] / elemsize;
427
+
428
+ dtype_%(A)s* A_data = (dtype_%(A)s*) PyArray_DATA(%(A)s);
429
+ dtype_%(x)s* x_data = (dtype_%(x)s*) PyArray_DATA(%(x)s);
430
+ dtype_%(z)s* z_data = (dtype_%(z)s*) PyArray_DATA(%(z)s);
431
+
432
+ // gemv expects pointers to the beginning of memory arrays,
433
+ // but numpy provides a pointer to the first element,
434
+ // so when the stride is negative, we need to get the last one.
435
+ if (Sx < 0)
436
+ x_data += (NA1 - 1) * Sx;
437
+ if (Sz < 0)
438
+ z_data += (NA0 - 1) * Sz;
439
+
439
440
if ( ((SA0 < 0) || (SA1 < 0)) && (abs(SA0) == 1 || (abs(SA1) == 1)) )
440
441
{
441
442
// We can treat the array A as C-or F-contiguous by changing the order of iteration
443
+ // printf("GEMV: Iterating in reverse NA0=%%d, NA1=%%d, SA0=%%d, SA1=%%d\\ n", NA0, NA1, SA0, SA1);
442
444
if (SA0 < 0){
443
445
A_data += (NA0 -1) * SA0; // Jump to first row
444
446
SA0 = -SA0; // Iterate over rows in reverse
@@ -454,24 +456,33 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
454
456
// Array isn't contiguous, we have to make a copy
455
457
// - if the copy is too long, maybe call vector/vector dot on
456
458
// each row instead
457
- // printf("GEMV: Making a copy SA0=%%d, SA1=%%d\\ n", SA0, SA1);
459
+ // printf("GEMV: Making a copy NA0=%%d, NA1=%%d, SA0=%%d, SA1=%%d\\ n", NA0, NA1 , SA0, SA1);
458
460
npy_intp dims[2];
459
461
dims[0] = NA0;
460
462
dims[1] = NA1;
461
-
462
- PyArrayObject * A_copy = (PyArrayObject *) PyArray_Copy(
463
- %(A)s);
463
+ PyArrayObject * A_copy = (PyArrayObject *) PyArray_Copy(%(A)s);
464
464
if (!A_copy)
465
465
%(fail)s
466
466
Py_XDECREF(%(A)s);
467
467
%(A)s = A_copy;
468
- SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : ( NA1 + 1) ;
469
- SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : ( NA0 + 1) ;
468
+ SA0 = (NA0 > 1) ? (PyArray_STRIDES(%(A)s)[0] / elemsize) : NA1;
469
+ SA1 = (NA1 > 1) ? (PyArray_STRIDES(%(A)s)[1] / elemsize) : NA0;
470
470
A_data = (dtype_%(A)s*) PyArray_DATA(%(A)s);
471
471
}
472
+ //else {printf("GEMV: Using the original array NA0=%%d, NA1=%%d, SA0=%%d, SA1=%%d\\ n", NA0, NA1, SA0, SA1);}
472
473
473
- if (SA0 == 1)
474
+ if (NA0 == 1)
475
+ {
476
+ // Vector-vector dot product, it seems faster to avoid GEMV
477
+ dtype_%(alpha)s alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
478
+ z_data[0] *= dbeta;
479
+ z_data[0] += alpha*ddot_(&NA1, (double*)(A_data), &SA1,
480
+ (double*)x_data, &Sx);
481
+ }
482
+ else if (SA0 == 1)
474
483
{
484
+ // F-contiguous
485
+ char NOTRANS = 'N';
475
486
if (PyArray_DESCR(%(A)s)->type_num == NPY_FLOAT)
476
487
{
477
488
float alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
@@ -501,61 +512,27 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
501
512
}
502
513
else if (SA1 == 1)
503
514
{
515
+ // C-contiguous
516
+ char TRANS = 'T';
504
517
if (PyArray_DESCR(%(A)s)->type_num == NPY_FLOAT)
505
518
{
506
519
float alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
507
-
508
- // Check for vector-vector dot (NA0 == 1). The code may work
509
- // for SA1 != 1 as well, but has not been tested for this case,
510
- // so SA1 == 1 is required for safety.
511
- if (NA0 == 1 && SA1 == 1)
512
- {
513
- if (fbeta != 0.f) {
514
- z_data[0] = fbeta*z_data[0];
515
- } else {
516
- z_data[0] = 0.f;
517
- }
518
- z_data[0] += alpha*sdot_(&NA1,
519
- (float*)(A_data), &SA1,
520
- (float*)x_data, &Sx);
521
- }
522
- else
523
- {
524
- sgemv_(&TRANS, &NA1, &NA0,
525
- &alpha,
526
- (float*)(A_data), &SA0,
527
- (float*)x_data, &Sx,
528
- &fbeta,
529
- (float*)z_data, &Sz);
530
- }
520
+ sgemv_(&TRANS, &NA1, &NA0,
521
+ &alpha,
522
+ (float*)(A_data), &SA0,
523
+ (float*)x_data, &Sx,
524
+ &fbeta,
525
+ (float*)z_data, &Sz);
531
526
}
532
527
else if (PyArray_DESCR(%(A)s)->type_num == NPY_DOUBLE)
533
528
{
534
529
double alpha = ((dtype_%(alpha)s*)PyArray_DATA(%(alpha)s))[0];
535
-
536
- // Check for vector-vector dot (NA0 == 1). The code may work
537
- // for SA1 != 1 as well, but has not been tested for this case,
538
- // so SA1 == 1 is required for safety.
539
- if (NA0 == 1 && SA1 == 1)
540
- {
541
- if (dbeta != 0.) {
542
- z_data[0] = dbeta*z_data[0];
543
- } else {
544
- z_data[0] = 0.;
545
- }
546
- z_data[0] += alpha*ddot_(&NA1,
547
- (double*)(A_data), &SA1,
548
- (double*)x_data, &Sx);
549
- }
550
- else
551
- {
552
- dgemv_(&TRANS, &NA1, &NA0,
553
- &alpha,
554
- (double*)(A_data), &SA0,
555
- (double*)x_data, &Sx,
556
- &dbeta,
557
- (double*)z_data, &Sz);
558
- }
530
+ dgemv_(&TRANS, &NA1, &NA0,
531
+ &alpha,
532
+ (double*)(A_data), &SA0,
533
+ (double*)x_data, &Sx,
534
+ &dbeta,
535
+ (double*)z_data, &Sz);
559
536
}
560
537
else
561
538
{
@@ -567,8 +544,7 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
567
544
else
568
545
{
569
546
PyErr_SetString(PyExc_AssertionError,
570
- "xx is a double-strided matrix, and should have been "
571
- "copied into a memory-contiguous one.");
547
+ "A is neither C nor F-contiguous, it should have been copied into a memory-contiguous array;");
572
548
%(fail)s
573
549
}
574
550
}
@@ -577,6 +553,7 @@ def gemv_c_code(y, A, x, z, alpha, beta, fail, force_init_beta=False, params=Non
577
553
// the matrix has at least one dim of length 0
578
554
// so we do this loop, which either iterates over 0 elements
579
555
// or else it does the right thing for length-0 A.
556
+ int Sz = PyArray_STRIDES(%(z)s)[0] / elemsize;
580
557
dtype_%(z)s * zptr = (dtype_%(z)s*)(PyArray_DATA(%(z)s));
581
558
for (int i = 0; i < NA0; ++i)
582
559
{
@@ -613,7 +590,7 @@ def c_code(self, node, name, inp, out, sub):
613
590
return code
614
591
615
592
def c_code_cache_version (self ):
616
- return (15 , blas_header_version (), check_force_gemv_init ())
593
+ return (16 , blas_header_version (), check_force_gemv_init ())
617
594
618
595
619
596
cgemv_inplace = CGemv (inplace = True )
0 commit comments