From a77c91c4978c8e0b470cd15cfb86c5debf534bf7 Mon Sep 17 00:00:00 2001 From: bokutou Date: Wed, 22 Dec 2021 01:28:35 +0900 Subject: [PATCH 1/7] add shrink_to_fit to OwendRepr --- src/data_repr.rs | 55 +++++++++++++----------- src/impl_owned_array.rs | 95 +++++++++++++++++++++++++++++++---------- 2 files changed, 103 insertions(+), 47 deletions(-) diff --git a/src/data_repr.rs b/src/data_repr.rs index 6630f9ddf..7ab401177 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -1,10 +1,10 @@ +use crate::extension::nonnull; +use alloc::borrow::ToOwned; +use alloc::slice; +use alloc::vec::Vec; use std::mem; use std::mem::ManuallyDrop; use std::ptr::NonNull; -use alloc::slice; -use alloc::borrow::ToOwned; -use alloc::vec::Vec; -use crate::extension::nonnull; use rawpointer::PointerExt; @@ -30,11 +30,7 @@ impl OwnedRepr { let len = v.len(); let capacity = v.capacity(); let ptr = nonnull::nonnull_from_vec_data(&mut v); - Self { - ptr, - len, - capacity, - } + Self { ptr, len, capacity } } pub(crate) fn into_vec(self) -> Vec { @@ -42,12 +38,12 @@ impl OwnedRepr { } pub(crate) fn as_slice(&self) -> &[A] { - unsafe { - slice::from_raw_parts(self.ptr.as_ptr(), self.len) - } + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } - pub(crate) fn len(&self) -> usize { self.len } + pub(crate) fn len(&self) -> usize { + self.len + } pub(crate) fn as_ptr(&self) -> *const A { self.ptr.as_ptr() @@ -63,13 +59,11 @@ impl OwnedRepr { /// Return end pointer pub(crate) fn as_end_nonnull(&self) -> NonNull { - unsafe { - self.ptr.add(self.len) - } + unsafe { self.ptr.add(self.len) } } /// Reserve `additional` elements; return the new pointer - /// + /// /// ## Safety /// /// Note that existing pointers into the data are invalidated @@ -82,6 +76,21 @@ impl OwnedRepr { self.as_nonnull_mut() } + /// Shrink the capacity of the array with lower bound. + /// The capacity will remain at least as large as both the length and + /// supplied value. + /// If the current capacity is less than the lower limit, this is a no-op. + pub(crate) fn shrink_to_fit(&mut self, len: usize) { + if len < self.len { + self.len = len; + self.modify_as_vec(|mut v| { + v.shrink_to_fit(); + v + }); + self.capacity = len; + } + } + /// Set the valid length of the data /// /// ## Safety @@ -126,14 +135,13 @@ impl OwnedRepr { let len = self.len; self.len = 0; self.capacity = 0; - unsafe { - Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) - } + unsafe { Vec::from_raw_parts(self.ptr.as_ptr(), len, capacity) } } } impl Clone for OwnedRepr - where A: Clone +where + A: Clone, { fn clone(&self) -> Self { Self::from(self.as_slice().to_owned()) @@ -174,6 +182,5 @@ impl Drop for OwnedRepr { } } -unsafe impl Sync for OwnedRepr where A: Sync { } -unsafe impl Send for OwnedRepr where A: Send { } - +unsafe impl Sync for OwnedRepr where A: Sync {} +unsafe impl Send for OwnedRepr where A: Send {} diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 8cfb82b55..1757fe746 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -1,4 +1,3 @@ - use alloc::vec::Vec; use std::mem; use std::mem::MaybeUninit; @@ -7,10 +6,12 @@ use rawpointer::PointerExt; use crate::imp_prelude::*; +use crate::azip; use crate::dimension; use crate::error::{ErrorKind, ShapeError}; use crate::iterators::Baseiter; use crate::low_level_util::AbortIfPanic; +use crate::ArrayViewMut; use crate::OwnedRepr; use crate::Zip; @@ -166,7 +167,8 @@ impl Array { } impl Array - where D: Dimension +where + D: Dimension, { /// Move all elements from self into `new_array`, which must be of the same shape but /// can have a different memory layout. The destination is overwritten completely. @@ -198,9 +200,7 @@ impl Array } else { // If `A` doesn't need drop, we can overwrite the destination. // Safe because: move_into_uninit only writes initialized values - unsafe { - self.move_into_uninit(new_array.into_maybe_uninit()) - } + unsafe { self.move_into_uninit(new_array.into_maybe_uninit()) } } } @@ -209,7 +209,8 @@ impl Array // Afterwards, `self` drops full of initialized values and dropping works as usual. // This avoids moving out of owned values in `self` while at the same time managing // the dropping if the values being overwritten in `new_array`. - Zip::from(&mut self).and(new_array) + Zip::from(&mut self) + .and(new_array) .for_each(|src, dst| mem::swap(src, dst)); } @@ -401,16 +402,51 @@ impl Array /// [0., 0., 0., 0.], /// [1., 1., 1., 1.]]); /// ``` - pub fn push(&mut self, axis: Axis, array: ArrayView) - -> Result<(), ShapeError> + pub fn push(&mut self, axis: Axis, array: ArrayView) -> Result<(), ShapeError> where A: Clone, D: RemoveAxis, { // same-dimensionality conversion - self.append(axis, array.insert_axis(axis).into_dimensionality::().unwrap()) + self.append( + axis, + array.insert_axis(axis).into_dimensionality::().unwrap(), + ) + } + + /// Return the offset between the pointer to the beginning of the heap + /// allocated by Array (`self.data.ptr`) and `self.ptr`. + unsafe fn offset_from_data(&self) -> isize { + if std::mem::size_of::() != 0 { + self.as_ptr().offset_from(self.data.as_ptr()) + } else { + 0 + } } + /// Shrinks the capacity of the array as much as possible. + /// + /// ``` + /// use ndarray::array; + /// use ndarray::s; + /// + /// let a = array![[0, 1, 2], [3, 4, 5], [6, 7,8]]; + /// let mut a = a.slice_move(s![.., 0..2]); + /// let b = a.clone(); + /// a.shrink_to_fit(); + /// assert_eq!(a, b); + /// ``` + pub fn shrink_to_fit(&mut self) { + // Get ArrayViewMut without consuming variable references. + let view_mut = + unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }; + unsafe { self.ptr.as_ptr().offset(self.offset_from_data()) }; + self.strides = self.dim.default_strides(); + azip!((self_elm in &mut *self, view_elm in view_mut) { + mem::swap(self_elm, view_elm); + }); + self.data.shrink_to_fit(self.len()); + } /// Append an array to the array along an axis. /// @@ -462,8 +498,7 @@ impl Array /// [1., 1., 1., 1.], /// [1., 1., 1., 1.]]); /// ``` - pub fn append(&mut self, axis: Axis, mut array: ArrayView) - -> Result<(), ShapeError> + pub fn append(&mut self, axis: Axis, mut array: ArrayView) -> Result<(), ShapeError> where A: Clone, D: RemoveAxis, @@ -556,7 +591,11 @@ impl Array acc } else { let this_ax = ax.len as isize * ax.stride.abs(); - if this_ax > acc { this_ax } else { acc } + if this_ax > acc { + this_ax + } else { + acc + } } }); let mut strides = self.strides.clone(); @@ -574,7 +613,10 @@ impl Array 0 }; debug_assert!(data_to_array_offset >= 0); - self.ptr = self.data.reserve(len_to_append).offset(data_to_array_offset); + self.ptr = self + .data + .reserve(len_to_append) + .offset(data_to_array_offset); // clone elements from view to the array now // @@ -608,10 +650,13 @@ impl Array if tail_view.ndim() > 1 { sort_axes_in_default_order_tandem(&mut tail_view, &mut array); - debug_assert!(tail_view.is_standard_layout(), - "not std layout dim: {:?}, strides: {:?}", - tail_view.shape(), tail_view.strides()); - } + debug_assert!( + tail_view.is_standard_layout(), + "not std layout dim: {:?}, strides: {:?}", + tail_view.shape(), + tail_view.strides() + ); + } // Keep track of currently filled length of `self.data` and update it // on scope exit (panic or loop finish). This "indirect" way to @@ -635,7 +680,6 @@ impl Array data: &mut self.data, }; - // Safety: tail_view is constructed to have the same shape as array Zip::from(tail_view) .and_unchecked(array) @@ -665,8 +709,11 @@ impl Array /// /// This is an internal function for use by move_into and IntoIter only, safety invariants may need /// to be upheld across the calls from those implementations. -pub(crate) unsafe fn drop_unreachable_raw(mut self_: RawArrayViewMut, data_ptr: *mut A, data_len: usize) -where +pub(crate) unsafe fn drop_unreachable_raw( + mut self_: RawArrayViewMut, + data_ptr: *mut A, + data_len: usize, +) where D: Dimension, { let self_len = self_.len(); @@ -731,8 +778,11 @@ where dropped_elements += 1; } - assert_eq!(data_len, dropped_elements + self_len, - "Internal error: inconsistency in move_into"); + assert_eq!( + data_len, + dropped_elements + self_len, + "Internal error: inconsistency in move_into" + ); } /// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride @@ -774,7 +824,6 @@ where } } - /// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride /// /// Axes in a and b are sorted by the strides of `a`, and `a`'s axes should have stride >= 0 before From 4fd2744093b6df77f696039afe7d5d13f71b169e Mon Sep 17 00:00:00 2001 From: bokutou Date: Wed, 22 Dec 2021 19:04:13 +0900 Subject: [PATCH 2/7] add to_default_stride --- src/impl_owned_array.rs | 54 ++++++++++++++++++++++++++++++++++------- 1 file changed, 45 insertions(+), 9 deletions(-) diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 1757fe746..b291c5d90 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; +use std::collections::HashSet; use std::mem; use std::mem::MaybeUninit; @@ -6,7 +7,6 @@ use rawpointer::PointerExt; use crate::imp_prelude::*; -use crate::azip; use crate::dimension; use crate::error::{ErrorKind, ShapeError}; use crate::iterators::Baseiter; @@ -424,6 +424,49 @@ where } } + pub fn to_default_stride(&mut self) { + let view_mut = + unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }; + let offset = unsafe { self.offset_from_data() }; + unsafe { self.ptr.offset(offset) }; + self.strides = self.dim.default_strides(); + let mut index_ = match self.dim.first_index() { + Some(x) => x, + None => unreachable!(), + }; + let mut swap_idx: Vec<(isize, isize)> = Vec::new(); + loop { + let self_index = self + .dim + .stride_offset_checked(&self.strides, &index_) + .unwrap(); + let view_mut_index = view_mut + .dim + .stride_offset_checked(&view_mut.strides, &index_) + .unwrap() + + offset; + swap_idx.push(( + std::cmp::min(self_index, view_mut_index), + std::cmp::max(self_index, view_mut_index), + )); + + index_ = match self.dim.next_for(index_) { + Some(x) => x, + None => { + break; + } + }; + } + let swap_idx = swap_idx + .into_iter() + .collect::>() + .into_iter() + .collect::>(); + for (x, y) in swap_idx.iter() { + unsafe { mem::swap(self.ptr.offset(*x).as_mut(), self.ptr.offset(*y).as_mut()) }; + } + } + /// Shrinks the capacity of the array as much as possible. /// /// ``` @@ -437,14 +480,7 @@ where /// assert_eq!(a, b); /// ``` pub fn shrink_to_fit(&mut self) { - // Get ArrayViewMut without consuming variable references. - let view_mut = - unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }; - unsafe { self.ptr.as_ptr().offset(self.offset_from_data()) }; - self.strides = self.dim.default_strides(); - azip!((self_elm in &mut *self, view_elm in view_mut) { - mem::swap(self_elm, view_elm); - }); + self.to_default_stride(); self.data.shrink_to_fit(self.len()); } From 36db501bcdb4f885dc2dc2d4c896d6fa4e5029ee Mon Sep 17 00:00:00 2001 From: bokutou Date: Wed, 22 Dec 2021 19:08:53 +0900 Subject: [PATCH 3/7] add doc --- src/impl_owned_array.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index b291c5d90..e647f9ed2 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -424,6 +424,7 @@ where } } + /// Convert from any stride to the default stride pub fn to_default_stride(&mut self) { let view_mut = unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }; From bbb8106550df97cd1fea851c375694bbad5e228b Mon Sep 17 00:00:00 2001 From: bokutou Date: Thu, 23 Dec 2021 22:37:05 +0900 Subject: [PATCH 4/7] rename --- src/impl_owned_array.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index e647f9ed2..805332f24 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -414,9 +414,8 @@ where ) } - /// Return the offset between the pointer to the beginning of the heap - /// allocated by Array (`self.data.ptr`) and `self.ptr`. - unsafe fn offset_from_data(&self) -> isize { + /// Calculates the distance from `self.ptr` to `self.data.ptr`. + unsafe fn offset_from_data_ptr_to_logical_ptr(&self) -> isize { if std::mem::size_of::() != 0 { self.as_ptr().offset_from(self.data.as_ptr()) } else { @@ -428,7 +427,7 @@ where pub fn to_default_stride(&mut self) { let view_mut = unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }; - let offset = unsafe { self.offset_from_data() }; + let offset = unsafe { self.offset_from_data_ptr_to_logical_ptr() }; unsafe { self.ptr.offset(offset) }; self.strides = self.dim.default_strides(); let mut index_ = match self.dim.first_index() { From 20920fd3e0c37eff7fc8a6938270a5d4b9f8cd0d Mon Sep 17 00:00:00 2001 From: bokutou Date: Sat, 25 Dec 2021 20:50:39 +0900 Subject: [PATCH 5/7] fix --- src/impl_owned_array.rs | 93 ++++++++++++++++++++--------------------- tests/shrink_to_fit.rs | 42 +++++++++++++++++++ 2 files changed, 88 insertions(+), 47 deletions(-) create mode 100644 tests/shrink_to_fit.rs diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 805332f24..4c8f36f83 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -1,5 +1,4 @@ use alloc::vec::Vec; -use std::collections::HashSet; use std::mem; use std::mem::MaybeUninit; @@ -423,50 +422,6 @@ where } } - /// Convert from any stride to the default stride - pub fn to_default_stride(&mut self) { - let view_mut = - unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) }; - let offset = unsafe { self.offset_from_data_ptr_to_logical_ptr() }; - unsafe { self.ptr.offset(offset) }; - self.strides = self.dim.default_strides(); - let mut index_ = match self.dim.first_index() { - Some(x) => x, - None => unreachable!(), - }; - let mut swap_idx: Vec<(isize, isize)> = Vec::new(); - loop { - let self_index = self - .dim - .stride_offset_checked(&self.strides, &index_) - .unwrap(); - let view_mut_index = view_mut - .dim - .stride_offset_checked(&view_mut.strides, &index_) - .unwrap() - + offset; - swap_idx.push(( - std::cmp::min(self_index, view_mut_index), - std::cmp::max(self_index, view_mut_index), - )); - - index_ = match self.dim.next_for(index_) { - Some(x) => x, - None => { - break; - } - }; - } - let swap_idx = swap_idx - .into_iter() - .collect::>() - .into_iter() - .collect::>(); - for (x, y) in swap_idx.iter() { - unsafe { mem::swap(self.ptr.offset(*x).as_mut(), self.ptr.offset(*y).as_mut()) }; - } - } - /// Shrinks the capacity of the array as much as possible. /// /// ``` @@ -479,8 +434,52 @@ where /// a.shrink_to_fit(); /// assert_eq!(a, b); /// ``` - pub fn shrink_to_fit(&mut self) { - self.to_default_stride(); + pub fn shrink_to_fit(&mut self) + where + A: Copy, + { + let dim = self.dim.clone(); + let strides = self.strides.clone(); + let mut shrinked_stride = D::zeros(dim.ndim()); + + // Calculate the new stride after shrink + // Even after shrink, the order of stride size is maintained. + // For example, if dim is [3, 2, 3] and stride is [1, 9, 3], the default + // stride will be [6, 3, 1], but the stride order will be [1, 6, 3] + // because the size order of the original stride is maintained. + let mut stride_order = (0..dim.ndim()).collect::>(); + stride_order.sort_unstable_by(|&i, &j| strides[i].cmp(&strides[j])); + let mut stride_ = 1; + for i in stride_order.iter() { + shrinked_stride[*i] = stride_; + stride_ = stride_ * dim[*i]; + } + + // Calculate which index in shrinked_stride from the pointer offset. + let mut stride_order_order = (0..dim.ndim()).collect::>(); + stride_order_order.sort_unstable_by(|&i, &j| stride_order[j].cmp(&stride_order[i])); + let offset_stride = |offset: usize| { + let mut offset = offset; + let mut index = D::zeros(dim.ndim()); + for i in stride_order_order.iter() { + index[*i] = offset / shrinked_stride[*i]; + offset = offset % shrinked_stride[*i]; + } + index + }; + + // Change the memory order only if it needs to be changed. + let ptr_offset = unsafe { self.offset_from_data_ptr_to_logical_ptr() }; + self.ptr = unsafe { self.ptr.offset(ptr_offset * -1) }; + for offset in 0..self.len() { + let index = offset_stride(offset); + let old_offset = dim.stride_offset_checked(&strides, &index).unwrap(); + unsafe { + *self.ptr.as_ptr().offset(offset as isize) = + *self.ptr.as_ptr().offset(old_offset + ptr_offset); + } + } + self.strides = shrinked_stride; self.data.shrink_to_fit(self.len()); } diff --git a/tests/shrink_to_fit.rs b/tests/shrink_to_fit.rs new file mode 100644 index 000000000..eb875189f --- /dev/null +++ b/tests/shrink_to_fit.rs @@ -0,0 +1,42 @@ +use ndarray::{s, Array}; + +#[test] +fn array3d_0_dim() { + let mut raw_vec = Vec::new(); + for i in 0..4 * 5 * 6 { + raw_vec.push(i); + } + let a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap(); + let mut a_slice = a.slice_move(s![0..2, .., ..]); + let a_slice_clone = a_slice.view().to_owned(); + a_slice.shrink_to_fit(); + assert_eq!(a_slice, a_slice_clone); +} + +#[test] +fn array3d_swap_axis_0_dim() { + let mut raw_vec = Vec::new(); + for i in 0..4 * 5 * 6 { + raw_vec.push(i); + } + let mut a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap(); + a.swap_axes(0, 1); + let mut a_slice = a.slice_move(s![2..3, .., ..]); + let a_slice_clone = a_slice.view().to_owned(); + a_slice.shrink_to_fit(); + assert_eq!(a_slice, a_slice_clone); +} + +#[test] +fn array3d_swap_axis_dim() { + let mut raw_vec = Vec::new(); + for i in 0..4 * 5 * 6 { + raw_vec.push(i); + } + let mut a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap(); + a.swap_axes(2, 1); + let mut a_slice = a.slice_move(s![2..3, 0..3, 0..;2]); + let a_slice_clone = a_slice.view().to_owned(); + a_slice.shrink_to_fit(); + assert_eq!(a_slice, a_slice_clone); +} From f444a2e05ec0555c61542351b3a62611565a4b81 Mon Sep 17 00:00:00 2001 From: bokutou Date: Sat, 25 Dec 2021 21:06:16 +0900 Subject: [PATCH 6/7] add test negative strides --- tests/shrink_to_fit.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tests/shrink_to_fit.rs b/tests/shrink_to_fit.rs index eb875189f..124731b2f 100644 --- a/tests/shrink_to_fit.rs +++ b/tests/shrink_to_fit.rs @@ -1,7 +1,7 @@ use ndarray::{s, Array}; #[test] -fn array3d_0_dim() { +fn dim_0() { let mut raw_vec = Vec::new(); for i in 0..4 * 5 * 6 { raw_vec.push(i); @@ -14,7 +14,7 @@ fn array3d_0_dim() { } #[test] -fn array3d_swap_axis_0_dim() { +fn swap_axis_dim_0() { let mut raw_vec = Vec::new(); for i in 0..4 * 5 * 6 { raw_vec.push(i); @@ -28,7 +28,7 @@ fn array3d_swap_axis_0_dim() { } #[test] -fn array3d_swap_axis_dim() { +fn swap_axis_dim() { let mut raw_vec = Vec::new(); for i in 0..4 * 5 * 6 { raw_vec.push(i); @@ -40,3 +40,16 @@ fn array3d_swap_axis_dim() { a_slice.shrink_to_fit(); assert_eq!(a_slice, a_slice_clone); } + +#[test] +fn stride_negative() { + let mut raw_vec = Vec::new(); + for i in 0..4 * 5 * 6 { + raw_vec.push(i); + } + let mut a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap(); + let mut a_slice = a.slice_move(s![2..3, 0..3, 0..;-1]); + let a_slice_clone = a_slice.view().to_owned(); + a_slice.shrink_to_fit(); + assert_eq!(a_slice, a_slice_clone); +} From 8e4f4ba9a2053d184d184b56776a8ddccb427b02 Mon Sep 17 00:00:00 2001 From: bokutou Date: Sun, 26 Dec 2021 13:10:36 +0900 Subject: [PATCH 7/7] fix clippy --- src/impl_owned_array.rs | 10 +++++----- tests/shrink_to_fit.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 4c8f36f83..b18a74142 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -452,7 +452,7 @@ where let mut stride_ = 1; for i in stride_order.iter() { shrinked_stride[*i] = stride_; - stride_ = stride_ * dim[*i]; + stride_ *= dim[*i]; } // Calculate which index in shrinked_stride from the pointer offset. @@ -463,20 +463,20 @@ where let mut index = D::zeros(dim.ndim()); for i in stride_order_order.iter() { index[*i] = offset / shrinked_stride[*i]; - offset = offset % shrinked_stride[*i]; + offset %= shrinked_stride[*i]; } index }; // Change the memory order only if it needs to be changed. let ptr_offset = unsafe { self.offset_from_data_ptr_to_logical_ptr() }; - self.ptr = unsafe { self.ptr.offset(ptr_offset * -1) }; + self.ptr = unsafe { self.ptr.sub(ptr_offset as usize) }; for offset in 0..self.len() { let index = offset_stride(offset); let old_offset = dim.stride_offset_checked(&strides, &index).unwrap(); unsafe { - *self.ptr.as_ptr().offset(offset as isize) = - *self.ptr.as_ptr().offset(old_offset + ptr_offset); + *self.ptr.as_ptr().add(offset as usize) = + *self.ptr.as_ptr().add((old_offset + ptr_offset) as usize); } } self.strides = shrinked_stride; diff --git a/tests/shrink_to_fit.rs b/tests/shrink_to_fit.rs index 124731b2f..fd5768202 100644 --- a/tests/shrink_to_fit.rs +++ b/tests/shrink_to_fit.rs @@ -47,7 +47,7 @@ fn stride_negative() { for i in 0..4 * 5 * 6 { raw_vec.push(i); } - let mut a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap(); + let a = Array::from_shape_vec((4, 5, 6), raw_vec).unwrap(); let mut a_slice = a.slice_move(s![2..3, 0..3, 0..;-1]); let a_slice_clone = a_slice.view().to_owned(); a_slice.shrink_to_fit();