@@ -569,44 +569,6 @@ namespace xsimd
569
569
return _mm256_floor_pd (self);
570
570
}
571
571
572
- // ge
573
- template <class A >
574
- inline batch_bool<float , A> ge (batch<float , A> const & self, batch<float , A> const & other, requires_arch<avx>)
575
- {
576
- return _mm256_cmp_ps (self, other, _CMP_GE_OQ);
577
- }
578
- template <class A >
579
- inline batch_bool<double , A> ge (batch<double , A> const & self, batch<double , A> const & other, requires_arch<avx>)
580
- {
581
- return _mm256_cmp_pd (self, other, _CMP_GE_OQ);
582
- }
583
- template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
584
- inline batch_bool<T, A> ge (batch<T, A> const & self, batch<T, A> const & other, requires_arch<avx>)
585
- {
586
- return detail::fwd_to_sse ([](__m128i s, __m128i o)
587
- { return ge (batch<T, sse4_2>(s), batch<T, sse4_2>(o)); },
588
- self, other);
589
- }
590
-
591
- // gt
592
- template <class A >
593
- inline batch_bool<float , A> gt (batch<float , A> const & self, batch<float , A> const & other, requires_arch<avx>)
594
- {
595
- return _mm256_cmp_ps (self, other, _CMP_GT_OQ);
596
- }
597
- template <class A >
598
- inline batch_bool<double , A> gt (batch<double , A> const & self, batch<double , A> const & other, requires_arch<avx>)
599
- {
600
- return _mm256_cmp_pd (self, other, _CMP_GT_OQ);
601
- }
602
- template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
603
- inline batch_bool<T, A> gt (batch<T, A> const & self, batch<T, A> const & other, requires_arch<avx>)
604
- {
605
- return detail::fwd_to_sse ([](__m128i s, __m128i o)
606
- { return gt (batch<T, sse4_2>(s), batch<T, sse4_2>(o)); },
607
- self, other);
608
- }
609
-
610
572
// hadd
611
573
template <class A >
612
574
inline float hadd (batch<float , A> const & rhs, requires_arch<avx>)
0 commit comments