Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add a padding operator. #95

Merged
merged 11 commits into from
Jul 17, 2021
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions make/Makefile.common
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ TESTS = smaug/core/tensor_test.cpp \
smaug/operators/split_op_test.cpp \
smaug/operators/reshape_op_test.cpp \
smaug/operators/repeat_op_test.cpp \
smaug/operators/padding_op_test.cpp \
smaug/operators/control_flow_ops_test.cpp \
smaug/operators/smv/smv_convolution_tiling_test.cpp \
smaug/operators/smv/smv_convolution_op_test.cpp \
Expand Down
35 changes: 19 additions & 16 deletions smaug/core/backend.cpp
Original file line number Diff line number Diff line change
@@ -1,38 +1,39 @@
#include "smaug/core/backend.h"
#include "smaug/operators/batch_norm_op.h"
#include "smaug/operators/concat_op.h"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Code rearrangement generally is confusing :) Moving forward, please move this to a separate PR.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I definitely appreciate you sorting all the includes in alphabetical order! It's certainly cleaner than before. But yes, refactoring and code cleanups should be done separately if it's not related to the main PR purpose.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll fix this

#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/convolution_op.h"
#include "smaug/operators/data_op.h"
#include "smaug/operators/depthwise_convolution_op.h"
#include "smaug/operators/eltwise_add_op.h"
#include "smaug/operators/eltwise_mul_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/elu_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/inner_product_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/padding_op.h"
#include "smaug/operators/pooling_op.h"
#include "smaug/operators/relu_op.h"
#include "smaug/operators/reorder_op.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/repeat_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/sigmoid_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/tanh_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_convolution_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/operators/smv/smv_inner_product_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_pooling_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_relu_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/smv/smv_sigmoid_op.h"
#include "smaug/operators/smv/smv_softmax_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/softmax_op.h"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(Also, if this was meant to sort the header includes, these three should not be at the bottom :] )

#include "smaug/operators/split_op.h"
#include "smaug/operators/tanh_op.h"

namespace smaug {

Expand Down Expand Up @@ -79,6 +80,7 @@ DEF_CREATE_OP(EluOp, ReferenceBackend)
DEF_CREATE_OP(SeluOp, ReferenceBackend)
DEF_CREATE_OP(TanhOp, ReferenceBackend)
DEF_CREATE_OP(HardTanhOp, ReferenceBackend)
DEF_CREATE_OP(PaddingOp, ReferenceBackend)

DEF_CREATE_SMV_OP(ConvolutionOp)
DEF_CREATE_SMV_OP(InnerProductOp)
Expand Down Expand Up @@ -108,7 +110,9 @@ DEF_CREATE_OP(RepeatOp, SmvBackend)
DEF_CREATE_OP(FlattenOp, SmvBackend)
DEF_CREATE_OP(SwitchOp, SmvBackend)
DEF_CREATE_OP(MergeOp, SmvBackend)
DEF_CREATE_OP(PaddingOp, SmvBackend)

// for simple tracing.
namespace ref {
const unsigned kConvolutionHw = 0x0001;
const unsigned kInnerProductHw = 0x0002;
Expand Down Expand Up @@ -140,5 +144,4 @@ float* spad1;
float* spad2;
} // namespace smv


} // namespace smaug
6 changes: 4 additions & 2 deletions smaug/core/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ template <typename Backend> class EluOp;
template <typename Backend> class SeluOp;
template <typename Backend> class TanhOp;
template <typename Backend> class HardTanhOp;
template <typename Backend> class PaddingOp;

#endif

/**
Expand Down Expand Up @@ -123,9 +125,9 @@ class ReferenceBackend {
DECL_CREATE_OP(SeluOp);
DECL_CREATE_OP(TanhOp);
DECL_CREATE_OP(HardTanhOp);
DECL_CREATE_OP(PaddingOp);

#undef DECL_CREATE_OP

};

/**
Expand Down Expand Up @@ -238,10 +240,10 @@ class SmvBackend {
DECL_CREATE_OP(FlattenOp);
DECL_CREATE_OP(SwitchOp);
DECL_CREATE_OP(MergeOp);
DECL_CREATE_OP(PaddingOp);

#undef DECL_SMV_OP
#undef DECL_CREATE_OP

};

} // namespace smaug
Expand Down
51 changes: 28 additions & 23 deletions smaug/core/network_builder.cpp
Original file line number Diff line number Diff line change
@@ -1,56 +1,57 @@
#include <iostream>
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Again, it'd better to put the code rearrangement into a separate PR.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll fix this.

#include <fstream>
#include <fcntl.h>
#include <fstream>
#include <iostream>

#include <google/protobuf/text_format.h>
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>

#include "smaug/core/backend.h"
#include "smaug/core/tensor.h"
#include "smaug/core/graph.pb.h"
#include "smaug/core/network.h"
#include "smaug/core/network_builder.h"
#include "smaug/core/workspace.h"
#include "smaug/core/graph.pb.h"
#include "smaug/core/node.pb.h"
#include "smaug/core/tensor.h"
#include "smaug/core/tensor.pb.h"
#include "smaug/core/types.pb.h"
#include "smaug/operators/common.h"
#include "smaug/core/workspace.h"
#include "smaug/operators/batch_norm_op.h"
#include "smaug/operators/common.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/convolution_op.h"
#include "smaug/operators/data_op.h"
#include "smaug/operators/depthwise_convolution_op.h"
#include "smaug/operators/eltwise_add_op.h"
#include "smaug/operators/eltwise_mul_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/control_flow_ops.h"
#include "smaug/operators/elu_op.h"
#include "smaug/operators/greater_op.h"
#include "smaug/operators/inner_product_op.h"
#include "smaug/operators/less_op.h"
#include "smaug/operators/padding_op.h"
#include "smaug/operators/pooling_op.h"
#include "smaug/operators/relu_op.h"
#include "smaug/operators/reorder_op.h"
#include "smaug/operators/concat_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/repeat_op.h"
#include "smaug/operators/reshape_op.h"
#include "smaug/operators/sigmoid_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/tanh_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_convolution_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/operators/smv/smv_inner_product_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_pooling_op.h"
#include "smaug/operators/smv/smv_batch_norm_op.h"
#include "smaug/operators/smv/smv_relu_op.h"
#include "smaug/operators/smv/smv_elu_op.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/smv/smv_sigmoid_op.h"
#include "smaug/operators/smv/smv_softmax_op.h"
#include "smaug/operators/smv/smv_eltwise_add_op.h"
#include "smaug/operators/smv/smv_eltwise_mul_op.h"
#include "smaug/operators/smv/smv_less_op.h"
#include "smaug/operators/smv/smv_greater_op.h"
#include "smaug/utility/utils.h"
#include "smaug/operators/smv/smv_tanh_op.h"
#include "smaug/operators/softmax_op.h"
#include "smaug/operators/split_op.h"
#include "smaug/operators/tanh_op.h"
#include "smaug/utility/debug_stream.h"
#include "smaug/utility/utils.h"

using namespace smaug;
using namespace std;
Expand Down Expand Up @@ -263,6 +264,10 @@ static void createAndAddOperator(const NodeProto& node,
} else if (type == OpType::Tanh) {
auto op = Backend::createTanhOp(name, workspace);
network->addOperator(op);
} else if (type == OpType::Padding) {
auto op = Backend::createPaddingOp(name, workspace);
op->setPaddingSize(node.params().padding_params().padding_size());
network->addOperator(op);
} else if (type == OpType::HardTanh) {
auto op = Backend::createHardTanhOp(name, workspace);
network->addOperator(op);
Expand Down
5 changes: 5 additions & 0 deletions smaug/core/node.proto
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ message PoolParams {
repeated int32 pool_size = 2;
}

message PaddingParams {
repeated int32 padding_size = 1;
}

message ConcatParams {
int32 concat_axis = 1;
}
Expand Down Expand Up @@ -52,6 +56,7 @@ message Params {
PoolParams pool_params = 2;
ConcatParams concat_params = 4;
SplitParams split_params = 5;
PaddingParams padding_params = 6;
}
ActivationParams act_params = 3;
}
Expand Down
3 changes: 2 additions & 1 deletion smaug/core/types.proto
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ enum DataLayout {
NCT = 16;
NTC = 32;
N = 64;
X = 127; // Elementwise
X = 127; // Elementwise
EndDataLayout = 64;
}

Expand Down Expand Up @@ -64,6 +64,7 @@ enum OpType {
GreaterEqual = 26;
Switch = 27;
Merge = 28;
Padding = 29;
}

enum PaddingType {
Expand Down
98 changes: 98 additions & 0 deletions smaug/operators/padding_op.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
#ifndef _OPERATORS_PADDING_OP_H_
#define _OPERATORS_PADDING_OP_H_

#include "smaug/core/backend.h"
#include "smaug/core/operator.h"
#include "smaug/core/tensor.h"
#include "smaug/core/workspace.h"
#include <google/protobuf/repeated_field.h>
using namespace google::protobuf;

namespace smaug {

/** \ingroup Operators
* \brief Pad a given tensor in any number of dimensions with arbitrary size.
*
* This has a software-based implementation.
*
* @tparam Backend The Backend that sets Alignment.
*/
template <typename Backend>
class PaddingOp : public Operator {
public:
PaddingOp(const std::string& name, Workspace* workspace)
: Operator(name, OpType::Padding, workspace) {
inputs.resize(kNumInputs, nullptr);
outputs.resize(kNumOutputs, nullptr);
}

/**
* Set the paddingSize of the Tensor along each dimension.
* The paddingSize is orgainized as <dim1_forward, dim1_backward, ...
* ,dimk_backward>
*/
void setPaddingSize(RepeatedField<google::protobuf::int32> const& val) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be const RepeatedField&, not RepeatedField const&.

Also, what does "forward" and "backward" mean?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll update this.

std::vector<double> paddingSize(val.begin(), val.end());
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why converting to double here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I change it to int.

}

void setPaddingSize(std::vector<int> const& val) { paddingSize = val; }

std::vector<int> getPaddingSize() { return paddingSize; }
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be a const member function.


void run() override {
Tensor* input = getInput(0);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Use the enums defined below (kInput and kOutput) to access all the input/output tensors, here and everywhere else in this file.

Tensor* output = getOutput(0);
int ndims = input->ndims();
const std::vector<int> inputDims = input->getShape().dims();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

both inputDims and outputDims should be const-ref.

const std::vector<int> outputDims = output->getShape().dims();
int total_dim = 1;
for (int i : outputDims) {
total_dim *= i;
}
std::vector<float> vf(total_dim, 0);
output->fillData(vf.data(), vf.size());
std::vector<int> destOrigin, paddingBegin, srcOrigin;
for (int i = 0; i < ndims; i++) {
paddingBegin.push_back(paddingSize[2 * i]);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use paddingSize.at(2*i) instead; this will throw an exception if you go out of bounds, whereas paddingSize[] will attempt to add an element at that position.

srcOrigin.push_back(0);
}
destOrigin = std::vector<int>(paddingBegin);
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's no need to declare destOrigin earlier and then re-initialize it here - that causes the vector to be constructed twice. Just declare it here directly: std::vector<int> destOrigin = ....

std::vector<int> regionSize = inputDims;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here - no need to make a copy of inputDims, just use it directly. I think you're trying to make it more clear what each vector is representing in the copyTensorRegion call but there's no need since the API documents it very clearly already.

copyTensorRegion(output, input, destOrigin, srcOrigin, regionSize);
}

// Optional override for testing purposes.
void createAllTensors() override {
Tensor* input = getInput(0);
int ndims = input->ndims();
std::vector<int> dims = input->getShape().dims();
for (int i = 0; i < ndims; i++) {
dims[i] += (paddingSize[2 * i] + paddingSize[2 * i + 1]);
}
TensorShape shape(
dims, input->getShape().getLayout(), Backend::Alignment);
Tensor* output = new Tensor(name, shape);
workspace->addTensor(output);
outputs.at(0) = output;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here - use enums instead of hardcoded constants.

}

// Optional but recommended function to verify operator parameters.
bool validate() override {
Tensor* input = getInput(0);
int ndims = input->ndims();
if (paddingSize.size() != 2 * ndims) {
return false;
}
return Operator::validate();
}

enum { kInputs, kNumInputs };
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: kInput instead of kInputs (you only have one), and likewise for outputs.

enum { kOutputs, kNumOutputs };

private:
std::vector<int> paddingSize = {};
};

} // namespace smaug

#endif
Loading