@@ -75,6 +75,19 @@ extern "C" {
75
75
#[ link_name = "llvm.ppc.altivec.lvxl" ]
76
76
fn lvxl ( p : * const i8 ) -> vector_unsigned_int ;
77
77
78
+ #[ link_name = "llvm.ppc.altivec.stvx" ]
79
+ fn stvx ( a : vector_signed_int , p : * const i8 ) ;
80
+
81
+ #[ link_name = "llvm.ppc.altivec.stvebx" ]
82
+ fn stvebx ( a : vector_signed_char , p : * const i8 ) ;
83
+ #[ link_name = "llvm.ppc.altivec.stvehx" ]
84
+ fn stvehx ( a : vector_signed_short , p : * const i8 ) ;
85
+ #[ link_name = "llvm.ppc.altivec.stvewx" ]
86
+ fn stvewx ( a : vector_signed_int , p : * const i8 ) ;
87
+
88
+ #[ link_name = "llvm.ppc.altivec.stvxl" ]
89
+ fn stvxl ( a : vector_signed_int , p : * const i8 ) ;
90
+
78
91
#[ link_name = "llvm.ppc.altivec.vperm" ]
79
92
fn vperm (
80
93
a : vector_signed_int ,
@@ -606,6 +619,98 @@ mod sealed {
606
619
607
620
impl_vec_lde ! { vec_lde_f32 lvewx f32 }
608
621
622
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
623
+ pub trait VectorSt {
624
+ type Target ;
625
+ unsafe fn vec_st ( self , off : isize , p : Self :: Target ) ;
626
+ unsafe fn vec_stl ( self , off : isize , p : Self :: Target ) ;
627
+ }
628
+
629
+ macro_rules! impl_vec_st {
630
+ ( $fun: ident $fun_lru: ident $ty: ident) => {
631
+ #[ inline]
632
+ #[ target_feature( enable = "altivec" ) ]
633
+ #[ cfg_attr( test, assert_instr( stvx) ) ]
634
+ pub unsafe fn $fun( a: t_t_l!( $ty) , off: isize , p: * const $ty) {
635
+ let addr = ( p as * const i8 ) . offset( off) ;
636
+ stvx( transmute( a) , addr)
637
+ }
638
+
639
+ #[ inline]
640
+ #[ target_feature( enable = "altivec" ) ]
641
+ #[ cfg_attr( test, assert_instr( stvxl) ) ]
642
+ pub unsafe fn $fun_lru( a: t_t_l!( $ty) , off: isize , p: * const $ty) {
643
+ let addr = ( p as * const i8 ) . offset( off as isize ) ;
644
+ stvxl( transmute( a) , addr)
645
+ }
646
+
647
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
648
+ impl VectorSt for t_t_l!( $ty) {
649
+ type Target = * const $ty;
650
+ #[ inline]
651
+ #[ target_feature( enable = "altivec" ) ]
652
+ unsafe fn vec_st( self , off: isize , p: Self :: Target ) {
653
+ $fun( self , off, p)
654
+ }
655
+ #[ inline]
656
+ #[ target_feature( enable = "altivec" ) ]
657
+ unsafe fn vec_stl( self , off: isize , p: Self :: Target ) {
658
+ $fun( self , off, p)
659
+ }
660
+ }
661
+ } ;
662
+ }
663
+
664
+ impl_vec_st ! { vec_st_u8 vec_stl_u8 u8 }
665
+ impl_vec_st ! { vec_st_i8 vec_stl_i8 i8 }
666
+
667
+ impl_vec_st ! { vec_st_u16 vec_stl_u16 u16 }
668
+ impl_vec_st ! { vec_st_i16 vec_stl_i16 i16 }
669
+
670
+ impl_vec_st ! { vec_st_u32 vec_stl_u32 u32 }
671
+ impl_vec_st ! { vec_st_i32 vec_stl_i32 i32 }
672
+
673
+ impl_vec_st ! { vec_st_f32 vec_stl_f32 f32 }
674
+
675
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
676
+ pub trait VectorSte {
677
+ type Target ;
678
+ unsafe fn vec_ste ( self , off : isize , p : Self :: Target ) ;
679
+ }
680
+
681
+ macro_rules! impl_vec_ste {
682
+ ( $fun: ident $instr: ident $ty: ident) => {
683
+ #[ inline]
684
+ #[ target_feature( enable = "altivec" ) ]
685
+ #[ cfg_attr( test, assert_instr( $instr) ) ]
686
+ pub unsafe fn $fun( a: t_t_l!( $ty) , off: isize , p: * const $ty) {
687
+ let addr = ( p as * const i8 ) . offset( off) ;
688
+ $instr( transmute( a) , addr)
689
+ }
690
+
691
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
692
+ impl VectorSte for t_t_l!( $ty) {
693
+ type Target = * const $ty;
694
+ #[ inline]
695
+ #[ target_feature( enable = "altivec" ) ]
696
+ unsafe fn vec_ste( self , off: isize , p: Self :: Target ) {
697
+ $fun( self , off, p)
698
+ }
699
+ }
700
+ } ;
701
+ }
702
+
703
+ impl_vec_ste ! { vec_ste_u8 stvebx u8 }
704
+ impl_vec_ste ! { vec_ste_i8 stvebx i8 }
705
+
706
+ impl_vec_ste ! { vec_ste_u16 stvehx u16 }
707
+ impl_vec_ste ! { vec_ste_i16 stvehx i16 }
708
+
709
+ impl_vec_ste ! { vec_ste_u32 stvewx u32 }
710
+ impl_vec_ste ! { vec_ste_i32 stvewx i32 }
711
+
712
+ impl_vec_ste ! { vec_ste_f32 stvewx f32 }
713
+
609
714
#[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
610
715
pub trait VectorXl {
611
716
type Result ;
@@ -3270,6 +3375,76 @@ where
3270
3375
p. vec_lde ( off)
3271
3376
}
3272
3377
3378
+ /// Vector Store Indexed
3379
+ ///
3380
+ /// ## Purpose
3381
+ /// Stores a 16-byte vector into memory at the address specified by a displacement and a
3382
+ /// pointer, ignoring the four low-order bits of the calculated address.
3383
+ ///
3384
+ /// ## Operation
3385
+ /// A memory address is obtained by adding b and c, and masking off the four low-order
3386
+ /// bits of the result. The 16-byte vector in a is stored to the resultant memory address.
3387
+ #[ inline]
3388
+ #[ target_feature( enable = "altivec" ) ]
3389
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
3390
+ pub unsafe fn vec_st < T > ( a : T , off : isize , c : <T as sealed:: VectorSt >:: Target )
3391
+ where
3392
+ T : sealed:: VectorSt ,
3393
+ {
3394
+ a. vec_st ( off, c)
3395
+ }
3396
+
3397
+ /// Vector Store Indexed Least Recently Used
3398
+ ///
3399
+ /// ## Purpose
3400
+ /// Stores a 16-byte vector into memory at the address specified by a displacement and
3401
+ /// a pointer, ignoring the four low-order bits of the calculated address, and marking the cache
3402
+ /// line containing the address as least frequently used.
3403
+ ///
3404
+ /// ## Operation
3405
+ /// A memory address is obtained by adding b and c, and masking off the four
3406
+ /// low-order bits of the result. The 16-byte vector in a is stored to the resultant memory
3407
+ /// address, and the containing cache line is marked as least frequently used.
3408
+ ///
3409
+ /// ## Notes
3410
+ /// This intrinsic can be used to indicate the last access to a portion of memory, as a hint to the
3411
+ /// data cache controller that the associated cache line can be replaced without performance loss.
3412
+ #[ inline]
3413
+ #[ target_feature( enable = "altivec" ) ]
3414
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
3415
+ pub unsafe fn vec_stl < T > ( a : T , off : isize , c : <T as sealed:: VectorSt >:: Target )
3416
+ where
3417
+ T : sealed:: VectorSt ,
3418
+ {
3419
+ a. vec_stl ( off, c)
3420
+ }
3421
+
3422
+ /// Vector Store Element Indexed
3423
+ ///
3424
+ /// ## Purpose
3425
+ /// Stores a single element from a 16-byte vector into memory at the address specified by
3426
+ /// a displacement and a pointer, aligned to the element size.
3427
+ ///
3428
+ /// ## Operation
3429
+ /// The integer value b is added to the pointer value c. The resulting address is
3430
+ /// rounded down to the nearest address that is a multiple of es, where es is 1 for char pointers,
3431
+ /// 2 for short pointers, and 4 for float or int pointers. An element offset eo is calculated by
3432
+ /// taking the resultant address modulo 16. The vector element of a at offset eo is stored to the
3433
+ /// resultant address.
3434
+ ///
3435
+ /// ## Notes
3436
+ /// Be careful to note that the address (b+c) is aligned to an element boundary. Do not attempt
3437
+ /// to store unaligned data with this intrinsic.
3438
+ #[ inline]
3439
+ #[ target_feature( enable = "altivec" ) ]
3440
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
3441
+ pub unsafe fn vec_ste < T > ( a : T , off : isize , c : <T as sealed:: VectorSte >:: Target )
3442
+ where
3443
+ T : sealed:: VectorSte ,
3444
+ {
3445
+ a. vec_ste ( off, c)
3446
+ }
3447
+
3273
3448
/// VSX Unaligned Load
3274
3449
#[ inline]
3275
3450
#[ target_feature( enable = "altivec" ) ]
0 commit comments