@@ -469,16 +469,17 @@ define <8 x half>@test_int_x86_avx512_mask3_vfmadd_sh(<8 x half> %x0, <8 x half>
469
469
; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_sh:
470
470
; X86: # %bb.0:
471
471
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
472
- ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x08]
472
+ ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx # encoding: [0x0f,0xb6,0x4c,0x24,0x08]
473
+ ; X86-NEXT: kmovd %ecx, %k1 # encoding: [0xc5,0xfb,0x92,0xc9]
473
474
; X86-NEXT: vfmadd231sh (%eax), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb9,0x08]
474
- ; X86-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
475
+ ; X86-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
475
476
; X86-NEXT: retl # encoding: [0xc3]
476
477
;
477
478
; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_sh:
478
479
; X64: # %bb.0:
479
480
; X64-NEXT: kmovd %esi, %k1 # encoding: [0xc5,0xfb,0x92,0xce]
480
481
; X64-NEXT: vfmadd231sh (%rdi), %xmm0, %xmm1 {%k1} # encoding: [0x62,0xf6,0x7d,0x09,0xb9,0x0f]
481
- ; X64-NEXT: vmovaps %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc1]
482
+ ; X64-NEXT: vmovaps %xmm1, %xmm0 # encoding: [0xc5,0xf8,0x28,0xc1]
482
483
; X64-NEXT: retq # encoding: [0xc3]
483
484
%q = load half , ptr %ptr_b
484
485
%vecinit.i = insertelement <8 x half > undef , half %q , i32 0
@@ -496,7 +497,8 @@ define <8 x half>@test_int_x86_avx512_mask3_vfmadd_sh(<8 x half> %x0, <8 x half>
496
497
define <8 x half >@test_int_x86_avx512_maskz_vfmadd_sh (<8 x half > %x0 , <8 x half > %x1 , <8 x half > %x2 , i8 %x3 , i32 %x4 ){
497
498
; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_sh:
498
499
; X86: # %bb.0:
499
- ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
500
+ ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04]
501
+ ; X86-NEXT: kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
500
502
; X86-NEXT: vfmadd213sh %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf6,0x75,0x89,0xa9,0xc2]
501
503
; X86-NEXT: retl # encoding: [0xc3]
502
504
;
@@ -528,16 +530,17 @@ define <8 x half>@test_int_x86_avx512_maskz_vfmadd_sh(<8 x half> %x0, <8 x half>
528
530
define void @fmadd_sh_mask_memfold (ptr %a , ptr %b , i8 %c ) {
529
531
; X86-LABEL: fmadd_sh_mask_memfold:
530
532
; X86: # %bb.0:
531
- ; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c ,0x24,0x0c]
532
- ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44 ,0x24,0x08]
533
- ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c ,0x24,0x04]
533
+ ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44 ,0x24,0x0c]
534
+ ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c ,0x24,0x08]
535
+ ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx # encoding: [0x8b,0x54 ,0x24,0x04]
534
536
; X86-NEXT: vmovsh {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero
535
- ; X86-NEXT: # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x01 ]
537
+ ; X86-NEXT: # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x02 ]
536
538
; X86-NEXT: vmovsh {{.*#+}} xmm1 = mem[0],zero,zero,zero,zero,zero,zero,zero
537
- ; X86-NEXT: # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x08 ]
539
+ ; X86-NEXT: # encoding: [0x62,0xf5,0x7e,0x08,0x10,0x09 ]
538
540
; X86-NEXT: vfmadd213sh %xmm0, %xmm0, %xmm1 # encoding: [0x62,0xf6,0x7d,0x08,0xa9,0xc8]
541
+ ; X86-NEXT: kmovd %eax, %k1 # encoding: [0xc5,0xfb,0x92,0xc8]
539
542
; X86-NEXT: vmovsh %xmm1, %xmm0, %xmm0 {%k1} # encoding: [0x62,0xf5,0x7e,0x09,0x10,0xc1]
540
- ; X86-NEXT: vmovsh %xmm0, (%ecx ) # encoding: [0x62,0xf5,0x7e,0x08,0x11,0x01 ]
543
+ ; X86-NEXT: vmovsh %xmm0, (%edx ) # encoding: [0x62,0xf5,0x7e,0x08,0x11,0x02 ]
541
544
; X86-NEXT: retl # encoding: [0xc3]
542
545
;
543
546
; X64-LABEL: fmadd_sh_mask_memfold:
0 commit comments