Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[onert] Remove UNUSED_RELEASE macro #14336

Merged
merged 1 commit into from
Nov 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 5 additions & 13 deletions runtime/onert/backend/acl_cl/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
#include "exec/NopFunction.h"
#include "exec/FunctionSequence.h"
#include "util/logging.h"
#include "util/Utils.h"
#include "AclKernelGen.h"

namespace onert
Expand Down Expand Up @@ -419,14 +418,11 @@ void KernelGenerator::visit(const ir::operation::Slice &node)
assert(_ctx.at(sizes_index).data());
auto beginData_base = _ctx.at(begins_index).data()->base();
auto sizeData_base = _ctx.at(sizes_index).data()->base();
const int beginData_size = _ctx.at(begins_index).shape().num_elements();
const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();
[[maybe_unused]] const int beginData_size = _ctx.at(begins_index).shape().num_elements();
[[maybe_unused]] const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();

using ir::DataType;

UNUSED_RELEASE(beginData_size);
UNUSED_RELEASE(sizeData_size);

assert(_ctx.at(begins_index).typeInfo().type() == DataType::INT32);
assert(_ctx.at(sizes_index).typeInfo().type() == DataType::INT32);
assert(beginData_size == input_rank);
Expand Down Expand Up @@ -486,16 +482,12 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node)
auto startData_base = _ctx.at(starts_index).data()->base();
auto endData_base = _ctx.at(ends_index).data()->base();
auto stridesData_base = _ctx.at(strides_index).data()->base();
const int startData_size = _ctx.at(starts_index).shape().num_elements();
const int endData_size = _ctx.at(ends_index).shape().num_elements();
const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
[[maybe_unused]] const int startData_size = _ctx.at(starts_index).shape().num_elements();
[[maybe_unused]] const int endData_size = _ctx.at(ends_index).shape().num_elements();
[[maybe_unused]] const int stridesData_size = _ctx.at(strides_index).shape().num_elements();

using ir::DataType;

UNUSED_RELEASE(startData_size);
UNUSED_RELEASE(endData_size);
UNUSED_RELEASE(stridesData_size);

assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
Expand Down
6 changes: 2 additions & 4 deletions runtime/onert/backend/acl_common/AclKernelGen.h
Original file line number Diff line number Diff line change
Expand Up @@ -238,9 +238,8 @@ kernelGenFullyConnected(const ir::operation::FullyConnected &node, const ir::Ope

const auto input_rank = operands.at(input_index).shape().rank();

const auto output_size =
[[maybe_unused]] const auto output_size =
operands.at(output_index).shape().dim(operands.at(output_index).shape().rank() - 1);
UNUSED_RELEASE(output_size);
assert(bias_index.undefined() || operands.at(bias_index).shape().dim(0) == output_size);
assert(operands.at(weight_index).shape().dim(0) == output_size);
const auto batch_size =
Expand All @@ -254,13 +253,12 @@ kernelGenFullyConnected(const ir::operation::FullyConnected &node, const ir::Ope
if (input_rank == 3 || input_rank == 4)
{
const auto &ifm_shape = operands.at(input_index).shape();
auto feature_size = 1;
[[maybe_unused]] auto feature_size = 1;
for (int i = 0; i < ifm_shape.rank(); ++i)
{
feature_size *= ifm_shape.dim(i);
}

UNUSED_RELEASE(feature_size);
assert(feature_size == batch_size * input_size);

// for reshaping
Expand Down
21 changes: 6 additions & 15 deletions runtime/onert/backend/acl_neon/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
#include "ir/InternalType.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
#include "util/Utils.h"
#include "AclKernelGen.h"

namespace onert
Expand Down Expand Up @@ -751,8 +750,7 @@ void KernelGenerator::visit(const ir::operation::Pad &node)
padding_list[axis] = ::arm_compute::PaddingInfo{from[0], from[1]};
}

const auto input_type = _ctx.at(input_index).typeInfo();
UNUSED_RELEASE(input_type);
[[maybe_unused]] const auto input_type = _ctx.at(input_index).typeInfo();
assert(input->info()->data_type() == acl_common::asDataType(input_type.type()));
assert(input->info()->quantization_info() ==
::arm_compute::QuantizationInfo(input_type.scale(), input_type.zero_point()));
Expand Down Expand Up @@ -1041,14 +1039,11 @@ void KernelGenerator::visit(const ir::operation::Slice &node)
{
auto beginData_base = _ctx.at(begins_index).data()->base();
auto sizeData_base = _ctx.at(sizes_index).data()->base();
const int beginData_size = _ctx.at(begins_index).shape().num_elements();
const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();
[[maybe_unused]] const int beginData_size = _ctx.at(begins_index).shape().num_elements();
[[maybe_unused]] const int sizeData_size = _ctx.at(sizes_index).shape().num_elements();

using ir::DataType;

UNUSED_RELEASE(beginData_size);
UNUSED_RELEASE(sizeData_size);

assert(_ctx.at(begins_index).typeInfo().type() == DataType::INT32);
assert(_ctx.at(sizes_index).typeInfo().type() == DataType::INT32);
assert(beginData_size == input_rank);
Expand Down Expand Up @@ -1105,16 +1100,12 @@ void KernelGenerator::visit(const ir::operation::StridedSlice &node)
auto startData_base = _ctx.at(starts_index).data()->base();
auto endData_base = _ctx.at(ends_index).data()->base();
auto stridesData_base = _ctx.at(strides_index).data()->base();
const int startData_size = _ctx.at(starts_index).shape().num_elements();
const int endData_size = _ctx.at(ends_index).shape().num_elements();
const int stridesData_size = _ctx.at(strides_index).shape().num_elements();
[[maybe_unused]] const int startData_size = _ctx.at(starts_index).shape().num_elements();
[[maybe_unused]] const int endData_size = _ctx.at(ends_index).shape().num_elements();
[[maybe_unused]] const int stridesData_size = _ctx.at(strides_index).shape().num_elements();

using ir::DataType;

UNUSED_RELEASE(startData_size);
UNUSED_RELEASE(endData_size);
UNUSED_RELEASE(stridesData_size);

assert(_ctx.at(starts_index).typeInfo().type() == DataType::INT32);
assert(_ctx.at(ends_index).typeInfo().type() == DataType::INT32);
assert(_ctx.at(strides_index).typeInfo().type() == DataType::INT32);
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/backend/cpu/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI
auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
{
dyn_ctx->op = &_operations_ctx.at(ind);
dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_ctx, _tensor_reg);
dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_tensor_reg);
}
ret->dynamic_tensor_ctx(dyn_ctx);

Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/backend/cpu/ops/OperationUtils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,10 @@ void GetQuantizedConvolutionMultiplier(const IPortableTensor *input, const IPort
double *multiplier)
{
const double input_product_scale = input->data_scale() * filter->data_scale();
const double bias_scale = (bias != nullptr) ? bias->data_scale() : input_product_scale;
[[maybe_unused]] const double bias_scale =
(bias != nullptr) ? bias->data_scale() : input_product_scale;
const double output_scale = output->data_scale();
// The following conditions must be guaranteed by the training pipeline.
UNUSED_RELEASE(bias_scale);
assert(std::abs(input_product_scale - bias_scale) <=
1e-6 * std::min(input_product_scale, bias_scale));
assert(input_product_scale >= 0);
Expand Down
6 changes: 4 additions & 2 deletions runtime/onert/backend/ruy/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI
auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
{
dyn_ctx->op = &_operations_ctx.at(ind);
dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_ctx, _tensor_reg);
dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_tensor_reg);
}
ret->dynamic_tensor_ctx(dyn_ctx);

Expand Down Expand Up @@ -137,6 +137,8 @@ void KernelGenerator::visit(const ir::operation::FullyConnected &node)
const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
const auto activation = node.param().activation;
const auto weights_format = node.param().weights_format;
if (weights_format != ir::FullyConnectedWeightsFormat::Default)
throw std::runtime_error("Unsupported FullyConnected Weights Format");
Comment on lines +140 to +141
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

weights_format param is not used on ruy backend. ruy backend supports default format only.


auto output_tensor = _tensor_reg->getPortableTensor(output_index);
auto input_tensor = _tensor_reg->getPortableTensor(input_index);
Expand All @@ -145,7 +147,7 @@ void KernelGenerator::visit(const ir::operation::FullyConnected &node)

auto fn = std::make_unique<ops::FullyConnectedLayer>();

fn->configure(input_tensor, weight_tensor, bias_tensor, activation, weights_format, output_tensor,
fn->configure(input_tensor, weight_tensor, bias_tensor, activation, output_tensor,
_external_context);

_return_fn = std::move(fn);
Expand Down
2 changes: 0 additions & 2 deletions runtime/onert/backend/ruy/ops/FullyConnectedLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,9 @@ void FullyConnectedLayer::fullyConnectedFloat32()

void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortableTensor *weights,
const IPortableTensor *bias, ir::Activation activation,
ir::FullyConnectedWeightsFormat weights_format,
IPortableTensor *output,
const std::shared_ptr<ExternalContext> &external_context)
{
UNUSED_RELEASE(weights_format);
_input = input;
_weights = weights;
_bias = bias;
Expand Down
3 changes: 1 addition & 2 deletions runtime/onert/backend/ruy/ops/FullyConnectedLayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ class FullyConnectedLayer : public ::onert::exec::IFunction
void fullyConnectedFloat32();

void configure(const IPortableTensor *input, const IPortableTensor *weights,
const IPortableTensor *bias, ir::Activation activation,
ir::FullyConnectedWeightsFormat weights_format, IPortableTensor *output,
const IPortableTensor *bias, ir::Activation activation, IPortableTensor *output,
const std::shared_ptr<ExternalContext> &external_context);

void run() override;
Expand Down
5 changes: 2 additions & 3 deletions runtime/onert/backend/train/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
#include <backend/Backend.h>
#include <backend/IConfig.h>
#include <memory>
#include <util/Utils.h>
#include <util/logging.h>
#include <exec/DynamicShapeInferer.h>

Expand Down Expand Up @@ -622,8 +621,8 @@ IPortableTensor *KernelGenerator::getBackPropIn(const ir::IOperation &node,
_tensor_reg->getDisposableBackPropTensor(DisposableTensorIndex{op_index, operand_index});
if (disposable_tensor != nullptr)
{
const auto &training_usedefs = _tgraph.trainingUseDefs().at(backwarding_operand_index);
UNUSED_RELEASE(training_usedefs);
[[maybe_unused]] const auto &training_usedefs =
_tgraph.trainingUseDefs().at(backwarding_operand_index);
assert(std::count_if(training_usedefs.getTrainingDefs().begin(),
training_usedefs.getTrainingDefs().end(),
[&](const ir::train::TrainingOperationIndex &op_index) {
Expand Down
3 changes: 0 additions & 3 deletions runtime/onert/backend/train/TensorPlanner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,6 @@ TensorPlanner::TensorPlanner(const ir::train::TrainableGraph &tgraph,
: _tgraph{tgraph}, _external_operands{external_operands}
{
// DO NOTHING
// TODO Remove the following lines
UNUSED_RELEASE(_tgraph);
UNUSED_RELEASE(_external_operands);
}

void TensorPlanner::planNonConstTensors(TensorBuilder *tensor_builder)
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/backend/xnnpack/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI
auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
{
dyn_ctx->op = &_operations_ctx.at(ind);
dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_ctx, _tensor_reg);
dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_tensor_reg);
}
ret->dynamic_tensor_ctx(dyn_ctx);

Expand Down
4 changes: 1 addition & 3 deletions runtime/onert/core/include/backend/ITensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
#include "ir/Layout.h"
#include "ir/Shape.h"
#include "ir/Coordinates.h"
#include "util/Utils.h"

namespace onert
{
Expand Down Expand Up @@ -96,9 +95,8 @@ class ITensor
* @brief Set the shape of tenser to new_shape
* @note Higer dimension will be placed on front.
*/
virtual void setShape(const ir::Shape &new_shape)
virtual void setShape(const ir::Shape &)
{
UNUSED_RELEASE(new_shape);
throw std::runtime_error("This backend does not support dynamic setShape");
}

Expand Down
12 changes: 3 additions & 9 deletions runtime/onert/core/include/exec/DynamicShapeInferer.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,10 @@ namespace exec
class DynamicShapeInferer : public ir::OperationVisitor
{
public:
DynamicShapeInferer(const ir::Operands &operands,
const std::shared_ptr<backend::ITensorRegistry> &tensor_registry)
: _operands(operands), _tensor_registry(tensor_registry)
DynamicShapeInferer(const std::shared_ptr<backend::ITensorRegistry> &tensor_registry)
: _tensor_registry(tensor_registry)
{
UNUSED_RELEASE(_operands);
UNUSED_RELEASE(_tensor_registry);
// DO NOTHING
}

public:
Expand Down Expand Up @@ -120,10 +118,6 @@ class DynamicShapeInferer : public ir::OperationVisitor
bool currently_static(backend::ITensor *op_input) { return !op_input->is_dynamic(); }

private:
/**
* @brief To get operand-level info, e.g., ir::Operand::isConstant()
*/
const ir::Operands &_operands;
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_operands is not used in DynamicShapeInferer.

/**
* @brief To get tensor object and access tensor-level info, e.g., ITensor::buffer()
*/
Expand Down
6 changes: 1 addition & 5 deletions runtime/onert/core/include/util/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@
#include "ir/Coordinates.h"
#include "ir/Shape.h"

#define UNUSED_RELEASE(a) (void)(a)

template <size_t rest> struct ForEachDimension
{
template <typename L>
Expand All @@ -53,10 +51,8 @@ template <size_t rest> struct ForEachDimension
template <> struct ForEachDimension<0>
{
template <typename L>
static void unroll(const onert::ir::Shape &shape, onert::ir::Coordinates &coords,
L lambda_function)
static void unroll(const onert::ir::Shape &, onert::ir::Coordinates &coords, L lambda_function)
{
UNUSED_RELEASE(shape);
lambda_function(coords);
}
};
Expand Down
7 changes: 2 additions & 5 deletions runtime/onert/core/src/backend/builtin/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@ KernelGenerator::KernelGenerator(const ir::Graph &graph, DynamicTensorManager *d
_tensor_reg{tensor_reg}, _tensor_registries{}, _executors{nullptr}, _model_index{},
_external_context{external_context}
{
UNUSED_RELEASE(_graph);
UNUSED_RELEASE(_tensor_registries);
UNUSED_RELEASE(_executors);
// DO NOTHING
}

std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationIndex ind)
Expand All @@ -52,8 +50,7 @@ std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationI
auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
{
dyn_ctx->op = &_graph.operations().at(ind);
dyn_ctx->dynamic_shape_inferer =
std::make_unique<exec::DynamicShapeInferer>(_graph.operands(), _tensor_reg);
dyn_ctx->dynamic_shape_inferer = std::make_unique<exec::DynamicShapeInferer>(_tensor_reg);
}
ret->dynamic_tensor_ctx(dyn_ctx);

Expand Down
21 changes: 10 additions & 11 deletions runtime/onert/core/src/compiler/ExecutorFactory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ createBackendContexts(compiler::ILoweredGraph &lgraph, bool linear_executor,
auto new_operand = std::make_unique<ir::Operand>(operand);
new_operand->clearDefUse();
operand.releaseData(); // Deref data of LoweredGraph
auto new_operand_ind = partial_graph.addOperand(operand_ind, std::move(new_operand));
UNUSED_RELEASE(new_operand_ind);
[[maybe_unused]] auto new_operand_ind =
partial_graph.addOperand(operand_ind, std::move(new_operand));
assert(new_operand_ind == operand_ind);
});
// Separate operations into partial graphs
Expand All @@ -202,15 +202,14 @@ createBackendContexts(compiler::ILoweredGraph &lgraph, bool linear_executor,
const auto &operand = whole_graph.operands().at(operand_ind);
auto new_operand = std::make_unique<ir::Operand>(operand);
new_operand->clearDefUse();
auto new_operand_ind = partial_graph.addOperand(operand_ind, std::move(new_operand));
UNUSED_RELEASE(new_operand_ind);
[[maybe_unused]] auto new_operand_ind =
partial_graph.addOperand(operand_ind, std::move(new_operand));
assert(new_operand_ind == operand_ind);

external_operands.add(operand_ind);
}

auto new_op_ind = partial_graph.addOperation(op_ind, clone(operation));
UNUSED_RELEASE(new_op_ind);
[[maybe_unused]] auto new_op_ind = partial_graph.addOperation(op_ind, clone(operation));
assert(new_op_ind == op_ind);
}
});
Expand Down Expand Up @@ -649,7 +648,8 @@ exec::IExecutor *ExecutorFactory::createTrainableExecutor(
const onert::ir::IOperation &op) {
try
{
UNUSED_RELEASE(dynamic_cast<const ir::train::ITrainableOperation &>(op));
[[maybe_unused]] const auto &casted_op =
dynamic_cast<const ir::train::ITrainableOperation &>(op);
}
catch (std::bad_cast &)
{
Expand All @@ -674,8 +674,7 @@ exec::IExecutor *ExecutorFactory::createTrainableExecutor(
[&](const onert::ir::OperationIndex &op_index, const onert::ir::IOperation &) {
const auto &orig_tgraph = lowered_graph->trainable_graph();
const auto &trainable_op = orig_tgraph.operation(op_index);
auto gen_index = tgraph->replaceOperation(op_index, trainable_op.clone());
UNUSED_RELEASE(gen_index);
[[maybe_unused]] auto gen_index = tgraph->replaceOperation(op_index, trainable_op.clone());
assert(gen_index == op_index);
});
data.graph->operands().iterate([&](const ir::OperandIndex &index, const ir::Operand &) {
Expand All @@ -684,8 +683,8 @@ exec::IExecutor *ExecutorFactory::createTrainableExecutor(
{
const auto &bwd_operand = orig_tgraph.backward_operands().at(index);
auto new_bwd_operand = std::make_unique<ir::Operand>(bwd_operand);
auto gen_index = tgraph->addBackwardOperand(index, std::move(new_bwd_operand));
UNUSED_RELEASE(gen_index);
[[maybe_unused]] auto gen_index =
tgraph->addBackwardOperand(index, std::move(new_bwd_operand));
assert(gen_index == index);
}
});
Expand Down
8 changes: 4 additions & 4 deletions runtime/onert/core/src/compiler/HEScheduler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -289,10 +289,10 @@ void HEScheduler::makeRank()
[&](const ir::OperationIndex &index, const ir::IOperation &) { DFSMaxRank(index); });

// Check that ranks are calculated for all operations(nodes)
_graph->operations().iterate([&](const ir::OperationIndex &index, const ir::IOperation &) {
UNUSED_RELEASE(index);
assert(_op_to_rank->find(index) != _op_to_rank->end());
});
_graph->operations().iterate(
[&]([[maybe_unused]] const ir::OperationIndex &index, const ir::IOperation &) {
assert(_op_to_rank->find(index) != _op_to_rank->end());
});
VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl;
}

Expand Down
Loading