From f6e7bcd1bab5aa803a1d57a918031cab2a62e756 Mon Sep 17 00:00:00 2001 From: Mateusz Bencer Date: Wed, 11 Dec 2024 11:14:22 +0100 Subject: [PATCH] review remarks --- .../backend/basic/BackendContextHelpers.h | 27 +++++---- .../backend/basic/StaticTensorManager.h | 5 +- .../src/backend/basic/StaticTensorManager.cc | 55 +++++++++++++------ 3 files changed, 54 insertions(+), 33 deletions(-) diff --git a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h index aae4404620b..5030600ca8c 100644 --- a/runtime/onert/core/include/backend/basic/BackendContextHelpers.h +++ b/runtime/onert/core/include/backend/basic/BackendContextHelpers.h @@ -249,8 +249,11 @@ inline void initConsts(const ir::Operands &operands, const bool has_const_shared_memory = shared_memory_operands_map.find(ind) != std::end(shared_memory_operands_map) && operands.at(shared_memory_operands_map.at(ind)).isConstant(); + if (external_operands.contains(ind)) + return; const bool can_be_initialized_as_const = operand.isConstant() || has_const_shared_memory; - if (external_operands.contains(ind) || !can_be_initialized_as_const) + if (!can_be_initialized_as_const) + // tensor currently processed not a const and source memory tensor (if exists) also not a const return; auto tensor = tensor_registry->getNativeITensor(ind); @@ -264,23 +267,19 @@ inline void initConsts(const ir::Operands &operands, auto memory_source_data = source_operand_ind.shareData(); assert(memory_source_data && memory_source_data->base()); auto shared_mem_tensor = dynamic_cast(tensor); - if (nullptr == shared_mem_tensor) - { - throw std::runtime_error{"Incorrect type of tensor to support sharing memory"}; - } + assert(shared_mem_tensor != nullptr); shared_mem_tensor->setBuffer(const_cast(memory_source_data->base())); + return; } - else + // the default flow for constant initialization + auto data = operand.shareData(); + assert(data && data->base()); + auto ext_tensor = dynamic_cast(tensor); + if (ext_tensor == nullptr) { - auto data = operand.shareData(); - assert(data && data->base()); - auto ext_tensor = dynamic_cast(tensor); - if (ext_tensor == nullptr) - { - throw std::runtime_error{"This tensor is not external tensor"}; - } - ext_tensor->setData(data); + throw std::runtime_error{"This tensor is not external tensor"}; } + ext_tensor->setData(data); }); } diff --git a/runtime/onert/core/include/backend/basic/StaticTensorManager.h b/runtime/onert/core/include/backend/basic/StaticTensorManager.h index e08d17b25bb..e377756f08f 100644 --- a/runtime/onert/core/include/backend/basic/StaticTensorManager.h +++ b/runtime/onert/core/include/backend/basic/StaticTensorManager.h @@ -57,7 +57,10 @@ class StaticTensorManager private: // Update source operand index if source memory operand exist. // Otherwise, return unchanged. - ir::OperandIndex adjust_with_memory_source_operand(const ir::OperandIndex &ind); + ir::OperandIndex adjustWithMemorySourceOperand(const ir::OperandIndex &ind) const; + // Return true if given ind is shared index or source index of shared memory operands map. + // Otherwise, return false. + bool isSharedMemoryOperand(const ir::OperandIndex &ind) const; private: std::unique_ptr _nonconst_mgr; diff --git a/runtime/onert/core/src/backend/basic/StaticTensorManager.cc b/runtime/onert/core/src/backend/basic/StaticTensorManager.cc index 1b5f807e16f..3f5d063e980 100644 --- a/runtime/onert/core/src/backend/basic/StaticTensorManager.cc +++ b/runtime/onert/core/src/backend/basic/StaticTensorManager.cc @@ -56,7 +56,7 @@ void StaticTensorManager::allocateNonconsts(void) for (auto &&[ind, tensor] : _tensors->native_tensors()) { - const auto adjusted_ind = adjust_with_memory_source_operand(ind); + const auto adjusted_ind = adjustWithMemorySourceOperand(ind); if (!_as_constants[adjusted_ind] && !tensor->is_dynamic()) { auto *buffer = _nonconst_mgr->getBuffer(adjusted_ind); @@ -95,17 +95,20 @@ void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size) // This method is called only when a tensor has proper shape assert(!_tensors->getNativeTensor(ind)->is_dynamic()); - const auto claim_ind = adjust_with_memory_source_operand(ind); + const auto claim_ind = adjustWithMemorySourceOperand(ind); if (_as_constants[claim_ind]) { return; } - ++_source_operand_inds_ref_counter[claim_ind]; - // notify only first usage - if (1 == _source_operand_inds_ref_counter[claim_ind]) + if (isSharedMemoryOperand(claim_ind)) { - _nonconst_mgr->claimPlan(claim_ind, size); + ++_source_operand_inds_ref_counter[claim_ind]; + if (_source_operand_inds_ref_counter[claim_ind] > 1) + { + return; // claimPlan should be called only for the first usage + } } + _nonconst_mgr->claimPlan(claim_ind, size); } void StaticTensorManager::releasePlan(const ir::OperandIndex &ind) @@ -115,20 +118,23 @@ void StaticTensorManager::releasePlan(const ir::OperandIndex &ind) // This method is called only when a tensor has proper shape assert(!_tensors->getNativeTensor(ind)->is_dynamic()); - const auto release_ind = adjust_with_memory_source_operand(ind); + const auto release_ind = adjustWithMemorySourceOperand(ind); if (_as_constants[release_ind]) { return; } - if (_source_operand_inds_ref_counter[release_ind] > 0) - { - --_source_operand_inds_ref_counter[release_ind]; - } - // notify only last usage - if (0 == _source_operand_inds_ref_counter[release_ind]) + if (isSharedMemoryOperand(release_ind)) { - _nonconst_mgr->releasePlan(release_ind); + if (_source_operand_inds_ref_counter[release_ind] > 0) // sanity check + { + --_source_operand_inds_ref_counter[release_ind]; + } + if (_source_operand_inds_ref_counter[release_ind] > 0) + { + return; // releasePlan should be called only for the first usage + } } + _nonconst_mgr->releasePlan(release_ind); } void StaticTensorManager::iterate(const std::function &fn) @@ -137,17 +143,30 @@ void StaticTensorManager::iterate(const std::functionsecond; + return shared_operand_ind->second; } // source memory operand not found return ind; } +bool StaticTensorManager::isSharedMemoryOperand(const ir::OperandIndex &ind) const +{ + for (const auto &[shared_ind, source_ind] : _shared_memory_operand_indexes) + { + if (shared_ind == ind || source_ind == ind) + { + return true; + } + } + return false; +} + } // namespace basic } // namespace backend } // namespace onert