Skip to content

Commit d4a0fae

Browse files
committed
style: fix clippy warnings
1 parent e4d91e1 commit d4a0fae

File tree

3 files changed

+19
-19
lines changed

3 files changed

+19
-19
lines changed

src/matrix.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ impl<T: Num + Copy + PartialOrd> Index<Coordinate> for DynamicMatrix<T> {
231231
type Output = T;
232232

233233
fn index(&self, index: Coordinate) -> &Self::Output {
234-
&self.tensor.get(&index).unwrap()
234+
self.tensor.get(&index).unwrap()
235235
}
236236
}
237237

src/tensor.rs

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
8181
let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
8282

8383
// We resolve to a scalar value
84-
if axes.is_empty() | (remaining_dims.len() == 0) {
84+
if axes.is_empty() | remaining_dims.is_empty() {
8585
let sum: T = self.data.iter().fold(T::zero(), |acc, x| acc + *x);
8686
return Tensor::new(&shape![1].unwrap(), &[sum]).unwrap();
8787
}
@@ -100,7 +100,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
100100
}
101101

102102
let value = *t.get(&target).unwrap() + *self.get(&indices).unwrap();
103-
let _ = t.set(&target, value).unwrap();
103+
t.set(&target, value).unwrap();
104104
}
105105
}
106106

@@ -119,7 +119,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
119119
result
120120
})
121121
.collect();
122-
let n = if removing_dims_t.len() != 0 {
122+
let n = if !removing_dims_t.is_empty() {
123123
removing_dims_t.iter().fold(T::one(), |acc, x| acc * *x)
124124
} else {
125125
let mut sum = T::zero();
@@ -144,7 +144,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
144144
})
145145
.collect();
146146

147-
let n = if removing_dims_t.len() != 0 {
147+
let n = if !removing_dims_t.is_empty() {
148148
removing_dims_t.iter().fold(T::one(), |acc, x| acc * *x)
149149
} else {
150150
let mut sum = T::zero();
@@ -167,7 +167,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
167167
let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
168168

169169
// We resolve to a scalar value
170-
if axes.is_empty() | (remaining_dims.len() == 0) {
170+
if axes.is_empty() | remaining_dims.is_empty() {
171171
let avg: T = self.data.iter().fold(T::zero(), |acc, x| acc + *x) / n;
172172
let var: T = self
173173
.data
@@ -195,7 +195,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
195195

196196
let centered = *self.get(&indices).unwrap() - *mean.get(&target).unwrap();
197197
let value = *t.get(&target).unwrap() + centered * centered;
198-
let _ = t.set(&target, value).unwrap();
198+
t.set(&target, value).unwrap();
199199
}
200200
}
201201

@@ -216,7 +216,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
216216
let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
217217

218218
// We resolve to a scalar value
219-
if axes.is_empty() | (remaining_dims.len() == 0) {
219+
if axes.is_empty() | remaining_dims.is_empty() {
220220
let min: T = self
221221
.data
222222
.iter()
@@ -268,7 +268,7 @@ impl<T: Num + PartialOrd + Copy> Tensor<T> {
268268
let removing_dims = axes.iter().map(|&i| self.shape[i]).collect::<Vec<_>>();
269269

270270
// We resolve to a scalar value
271-
if axes.is_empty() | (remaining_dims.len() == 0) {
271+
if axes.is_empty() | remaining_dims.is_empty() {
272272
let max: T = self
273273
.data
274274
.iter()
@@ -326,7 +326,7 @@ impl<T: Float + PartialOrd + Copy> Tensor<T> {
326326
pub fn pow(&self, power: T) -> Tensor<T> {
327327
let mut result = Tensor::zeros(&self.shape);
328328
for i in 0..self.size() {
329-
result.data[i] = self.data[i].clone().powf(power);
329+
result.data[i] = self.data[i].powf(power);
330330
}
331331
result
332332
}
@@ -339,7 +339,7 @@ impl<T: Num + PartialOrd + Copy> Mul<T> for Tensor<T> {
339339
fn mul(self, rhs: T) -> Tensor<T> {
340340
let mut result = Tensor::zeros(&self.shape);
341341
for i in 0..self.size() {
342-
result.data[i] = self.data[i].clone() * rhs;
342+
result.data[i] = self.data[i] * rhs;
343343
}
344344
result
345345
}
@@ -352,7 +352,7 @@ impl<T: Num + PartialOrd + Copy> Add<T> for Tensor<T> {
352352
fn add(self, rhs: T) -> Tensor<T> {
353353
let mut result = Tensor::zeros(&self.shape);
354354
for i in 0..self.size() {
355-
result.data[i] = self.data[i].clone() + rhs;
355+
result.data[i] = self.data[i] + rhs;
356356
}
357357
result
358358
}
@@ -366,7 +366,7 @@ impl<T: Num + PartialOrd + Copy> Add<Tensor<T>> for Tensor<T> {
366366
assert!(self.shape == rhs.shape);
367367
let mut result = Tensor::zeros(&self.shape);
368368
for i in 0..self.size() {
369-
result.data[i] = self.data[i].clone() + rhs.data[i].clone();
369+
result.data[i] = self.data[i] + rhs.data[i];
370370
}
371371
result
372372
}
@@ -395,7 +395,7 @@ impl<T: Num + PartialOrd + Copy> Sub<T> for Tensor<T> {
395395
fn sub(self, rhs: T) -> Tensor<T> {
396396
let mut result = Tensor::zeros(&self.shape);
397397
for i in 0..self.size() {
398-
result.data[i] = self.data[i].clone() - rhs;
398+
result.data[i] = self.data[i] - rhs;
399399
}
400400
result
401401
}
@@ -409,7 +409,7 @@ impl<T: Num + PartialOrd + Copy> Sub<Tensor<T>> for Tensor<T> {
409409
assert!(self.shape == rhs.shape);
410410
let mut result = Tensor::zeros(&self.shape);
411411
for i in 0..self.size() {
412-
result.data[i] = self.data[i].clone() - rhs.data[i].clone();
412+
result.data[i] = self.data[i] - rhs.data[i];
413413
}
414414
result
415415
}
@@ -439,7 +439,7 @@ impl<T: Num + PartialOrd + Copy> Mul<Tensor<T>> for Tensor<T> {
439439
assert!(self.shape == rhs.shape);
440440
let mut result = Tensor::zeros(&self.shape);
441441
for i in 0..self.size() {
442-
result.data[i] = self.data[i].clone() * rhs.data[i].clone();
442+
result.data[i] = self.data[i] * rhs.data[i];
443443
}
444444
result
445445
}
@@ -468,7 +468,7 @@ impl<T: Num + PartialOrd + Copy> Div<T> for Tensor<T> {
468468
fn div(self, rhs: T) -> Tensor<T> {
469469
let mut result = Tensor::zeros(&self.shape);
470470
for i in 0..self.size() {
471-
result.data[i] = self.data[i].clone() / rhs;
471+
result.data[i] = self.data[i] / rhs;
472472
}
473473
result
474474
}
@@ -482,7 +482,7 @@ impl<T: Num + PartialOrd + Copy> Div<Tensor<T>> for Tensor<T> {
482482
assert!(self.shape == rhs.shape);
483483
let mut result = Tensor::zeros(&self.shape);
484484
for i in 0..self.size() {
485-
result.data[i] = self.data[i].clone() / rhs.data[i].clone();
485+
result.data[i] = self.data[i] / rhs.data[i];
486486
}
487487
result
488488
}

src/vector.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ impl<T: Num + Copy + PartialOrd> Index<usize> for DynamicVector<T> {
219219
type Output = T;
220220

221221
fn index(&self, index: usize) -> &Self::Output {
222-
&self.tensor.get(&coord![index].unwrap()).unwrap()
222+
self.tensor.get(&coord![index].unwrap()).unwrap()
223223
}
224224
}
225225

0 commit comments

Comments
 (0)