Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[onert] Apply structured binding in ARMComputeEx #14664

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,7 @@ inline TensorShape compute_transposeconv_upsampled_shape(
unsigned int invalid_bottom, unsigned int &pad_left, unsigned int &pad_right,
unsigned int &pad_top, unsigned int &pad_bottom)
{
unsigned int sx = info.stride().first;
unsigned int sy = info.stride().second;
auto [sx, sy] = info.stride();
const DataLayout data_layout = input.data_layout();
const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
Expand Down Expand Up @@ -139,8 +138,9 @@ compute_transposeconv_output_shape(const std::pair<unsigned int, unsigned int> &
const int batch_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);

TensorShape out_shape{input_shape};
out_shape.set(width_idx, out_dims.first);
out_shape.set(height_idx, out_dims.second);
auto [width, height] = out_dims;
out_shape.set(width_idx, width);
out_shape.set(height_idx, height);
out_shape.set(channel_idx, weights_shape[batch_idx]);
return out_shape;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@ void CLEmbeddingLookupKernel::configure(const ICLTensor *input, ICLTensor *outpu
static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(kernel_name.str(), build_opts));

// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
auto [error, window] = validate_and_configure_window(input->info(), output->info());
ARM_COMPUTE_ERROR_THROW_ON(error);
ICLKernel::configure_internal(window);
}

void CLEmbeddingLookupKernel::run(const Window &window, cl::CommandQueue &queue)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,9 @@ void CLHashtableLookupKernel::configure(const ICLTensor *lookups, const ICLTenso
static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(kernel_name.str(), build_opts));

// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
auto [error, window] = validate_and_configure_window(input->info(), output->info());
ARM_COMPUTE_ERROR_THROW_ON(error);
ICLKernel::configure_internal(window);
}

void CLHashtableLookupKernel::run(const Window &window, cl::CommandQueue &queue)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,9 +139,9 @@ void CLInstanceNormalizationLayerKernelEx::configure(ICLTensor *input, ICLTensor
CLKernelLibraryEx::get().create_kernel("instance_normalization_ex", build_opts.options()));

// Configure kernel window
auto win_config = validate_and_configure_window(_input->info(), _output->info());
ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
ICLKernel::configure_internal(std::get<1>(win_config));
auto [error, window] = validate_and_configure_window(_input->info(), _output->info());
ARM_COMPUTE_ERROR_THROW_ON(error);
ICLKernel::configure_internal(window);
}

Status CLInstanceNormalizationLayerKernelEx::validate(const ITensorInfo *input,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,13 +120,13 @@ void CLOneHotKernel::configure_common(const ICLTensor *indices, const ICLTensor
ARM_COMPUTE_ERROR_THROW_ON(
validate_arguments(indices->info(), on_value->info(), output->info(), depth, axis));
// Configure kernel window
auto win_config =
auto [error, window] =
validate_and_configure_window(indices->info(), on_value->info(), output->info(), depth, axis);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ARM_COMPUTE_ERROR_THROW_ON(error);
if (_is_off_value_memset)
{
// Replace window with calculated by infices info
win_config.second = calculate_max_window(*indices->info(), Steps());
window = calculate_max_window(*indices->info(), Steps());
}
_indices = indices;
_on_value = on_value;
Expand All @@ -144,7 +144,7 @@ void CLOneHotKernel::configure_common(const ICLTensor *indices, const ICLTensor
const std::string kernel_name = _is_off_value_memset ? "one_hot_only_on_value" : "one_hot";
_kernel = static_cast<cl::Kernel>(
CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts.options()));
ICLKernel::configure_internal(win_config.second);
ICLKernel::configure_internal(window);
}
Status CLOneHotKernel::validate(const ITensorInfo *indices, const ITensorInfo *on_value,
const ITensorInfo *off_value, const ITensorInfo *output, int depth,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,9 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
const auto is_reflect = static_cast<unsigned int>(mode == PaddingMode::REFLECT);
for (size_t i = 0; i < padding.size(); ++i)
{
ARM_COMPUTE_RETURN_ERROR_ON(padding.at(i).first > (input->dimension(i) - is_reflect));
ARM_COMPUTE_RETURN_ERROR_ON(padding.at(i).second > (input->dimension(i) - is_reflect));
auto [pad_before, pad_after] = padding.at(i);
ARM_COMPUTE_RETURN_ERROR_ON(pad_before > (input->dimension(i) - is_reflect));
ARM_COMPUTE_RETURN_ERROR_ON(pad_after > (input->dimension(i) - is_reflect));
}
}

Expand Down Expand Up @@ -152,10 +153,10 @@ void CLPadLayerKernelEx::configure(const CLCompileContext &compile_context, cons

// Configure window
unsigned int vec_size;
auto win_config = validate_and_configure_window(input->info(), output->info(), padding,
constant_value, mode, vec_size);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
auto [error, window] = validate_and_configure_window(input->info(), output->info(), padding,
constant_value, mode, vec_size);
ARM_COMPUTE_ERROR_THROW_ON(error);
ICLKernel::configure_internal(window);

// Set build options
std::string kernel_name = "pad_layer_";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ void CLQuantizationSymmetricKernel::configure(const ICLTensor *input, const ICLT
const bool multi_access_x = (input_width_x / vec_size_x > 0);

// Configure kernel window
auto win_config = validate_and_configure_window(input->info(), output->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
auto [error, window] = validate_and_configure_window(input->info(), output->info());
ARM_COMPUTE_ERROR_THROW_ON(error);
ICLKernel::configure_internal(window);

// Create kernel
CLBuildOptions build_opts;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,11 +121,11 @@ void CLScaleFactorSymm8Kernel::configure(const ICLTensor *input, ICLTensor *outp
_kernel = static_cast<cl::Kernel>(
CLKernelLibraryEx::get().create_kernel("scale_factor_symm8", build_opts));

auto win_config = validate_and_configure_window(input->info(), output->info());
auto [error, window] = validate_and_configure_window(input->info(), output->info());

ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
ARM_COMPUTE_ERROR_THROW_ON(error);

ICLKernel::configure_internal(std::get<1>(win_config));
ICLKernel::configure_internal(window);
}

Status CLScaleFactorSymm8Kernel::validate(const ITensorInfo *input, const ITensorInfo *output)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,9 @@ void NEGEMMMatrixAccumulateBiasesKernel::configure(ITensor *accum, const ITensor
_accum = accum;

// Configure kernel window
auto win_config = validate_and_configure_window(accum->info(), biases->info());
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
INEKernel::configure(win_config.second);
auto [error, window] = validate_and_configure_window(accum->info(), biases->info());
ARM_COMPUTE_ERROR_THROW_ON(error);
INEKernel::configure(window);
}

Status NEGEMMMatrixAccumulateBiasesKernel::validate(const ITensorInfo *accum,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -274,10 +274,9 @@ void NEInstanceNormalizationLayerKernelEx::configure(ITensor *input, ITensor *ou
}

// Configure kernel window
auto win_config = validate_and_configure_window(_input->info(), _output->info());
ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));

INEKernel::configure(std::get<1>(win_config));
auto [error, window] = validate_and_configure_window(_input->info(), _output->info());
ARM_COMPUTE_ERROR_THROW_ON(error);
INEKernel::configure(window);
}

Status NEInstanceNormalizationLayerKernelEx::validate(const ITensorInfo *input,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,8 @@ template <typename T> void NEQuantizationSymmetricKernel::quantize(const Window
const auto start = reinterpret_cast<const T *>(input.ptr());
const auto min_max = std::minmax_element(start, start + dim_x);
const auto int8_scale = 127;
auto range = std::max(std::abs(*min_max.first), std::abs(*min_max.second));
auto [min_val_ptr, max_val_ptr] = min_max;
auto range = std::max(std::abs(*min_val_ptr), std::abs(*max_val_ptr));
if (range == 0)
{
*reinterpret_cast<T *>(_scale_factor->ptr_to_element({id.y()})) = 1;
Expand Down
3 changes: 1 addition & 2 deletions runtime/compute/ARMComputeEx/src/core/UtilsEx.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,7 @@ arm_compute::transposeconv_output_dimensions(unsigned int in_width, unsigned int
const PadStrideInfo &info, unsigned int invalid_right,
unsigned int invalid_bottom)
{
const unsigned int stride_x = info.stride().first;
const unsigned int stride_y = info.stride().second;
const auto [stride_x, stride_y] = info.stride();
const unsigned int padx = info.pad_left() + info.pad_right();
const unsigned int pady = info.pad_top() + info.pad_bottom();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,7 @@ void CLDirectTransposeConvLayer::configure(const CLCompileContext &compile_conte
unsigned int pad_right = 0;
unsigned int pad_top = 0;
unsigned int pad_bottom = 0;
const unsigned int stride_x = info.stride().first;
const unsigned int stride_y = info.stride().second;
const auto [stride_x, stride_y] = info.stride();

const DataLayout data_layout = input->info()->data_layout();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,10 @@ void CLGEMMMatrixAccumulateBiasesKernel::configure(const CLCompileContext &compi
unsigned int vector_size = 0;

// Configure kernel window
auto win_config =
auto [error, window] =
validate_and_configure_window(accum->info(), biases->info(), gpu_target, vector_size);
ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
ICLKernel::configure_internal(win_config.second);
ARM_COMPUTE_ERROR_THROW_ON(error);
ICLKernel::configure_internal(window);

// Add build options
CLBuildOptions build_opts;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ DeconvolutionMethod CLTransposeConvLayer::get_deconvolution_method(
const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);

if (weights->dimension(idx_w) != deconv_info.stride().first ||
weights->dimension(idx_h) != deconv_info.stride().second || invalid_right != 0 ||
invalid_bottom != 0)
auto [stride_w, stride_h] = deconv_info.stride();
if (weights->dimension(idx_w) != stride_w || weights->dimension(idx_h) != stride_h ||
invalid_right != 0 || invalid_bottom != 0)
{
return DeconvolutionMethod::DIRECT;
}
Expand Down
Loading