Skip to content

Commit

Permalink
review remarks
Browse files Browse the repository at this point in the history
  • Loading branch information
mbencer committed Dec 11, 2024
1 parent 4171e2e commit f6e7bcd
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 33 deletions.
27 changes: 13 additions & 14 deletions runtime/onert/core/include/backend/basic/BackendContextHelpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -249,8 +249,11 @@ inline void initConsts(const ir::Operands &operands,
const bool has_const_shared_memory =
shared_memory_operands_map.find(ind) != std::end(shared_memory_operands_map) &&
operands.at(shared_memory_operands_map.at(ind)).isConstant();
if (external_operands.contains(ind))
return;
const bool can_be_initialized_as_const = operand.isConstant() || has_const_shared_memory;
if (external_operands.contains(ind) || !can_be_initialized_as_const)
if (!can_be_initialized_as_const)
// tensor currently processed not a const and source memory tensor (if exists) also not a const
return;

auto tensor = tensor_registry->getNativeITensor(ind);
Expand All @@ -264,23 +267,19 @@ inline void initConsts(const ir::Operands &operands,
auto memory_source_data = source_operand_ind.shareData();
assert(memory_source_data && memory_source_data->base());
auto shared_mem_tensor = dynamic_cast<Tensor *>(tensor);
if (nullptr == shared_mem_tensor)
{
throw std::runtime_error{"Incorrect type of tensor to support sharing memory"};
}
assert(shared_mem_tensor != nullptr);
shared_mem_tensor->setBuffer(const_cast<uint8_t *>(memory_source_data->base()));
return;
}
else
// the default flow for constant initialization
auto data = operand.shareData();
assert(data && data->base());
auto ext_tensor = dynamic_cast<ExternalTensor *>(tensor);
if (ext_tensor == nullptr)
{
auto data = operand.shareData();
assert(data && data->base());
auto ext_tensor = dynamic_cast<ExternalTensor *>(tensor);
if (ext_tensor == nullptr)
{
throw std::runtime_error{"This tensor is not external tensor"};
}
ext_tensor->setData(data);
throw std::runtime_error{"This tensor is not external tensor"};
}
ext_tensor->setData(data);
});
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,10 @@ class StaticTensorManager
private:
// Update source operand index if source memory operand exist.
// Otherwise, return unchanged.
ir::OperandIndex adjust_with_memory_source_operand(const ir::OperandIndex &ind);
ir::OperandIndex adjustWithMemorySourceOperand(const ir::OperandIndex &ind) const;
// Return true if given ind is shared index or source index of shared memory operands map.
// Otherwise, return false.
bool isSharedMemoryOperand(const ir::OperandIndex &ind) const;

private:
std::unique_ptr<MemoryManager> _nonconst_mgr;
Expand Down
55 changes: 37 additions & 18 deletions runtime/onert/core/src/backend/basic/StaticTensorManager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ void StaticTensorManager::allocateNonconsts(void)

for (auto &&[ind, tensor] : _tensors->native_tensors())
{
const auto adjusted_ind = adjust_with_memory_source_operand(ind);
const auto adjusted_ind = adjustWithMemorySourceOperand(ind);
if (!_as_constants[adjusted_ind] && !tensor->is_dynamic())
{
auto *buffer = _nonconst_mgr->getBuffer(adjusted_ind);
Expand Down Expand Up @@ -95,17 +95,20 @@ void StaticTensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
// This method is called only when a tensor has proper shape
assert(!_tensors->getNativeTensor(ind)->is_dynamic());

const auto claim_ind = adjust_with_memory_source_operand(ind);
const auto claim_ind = adjustWithMemorySourceOperand(ind);
if (_as_constants[claim_ind])
{
return;
}
++_source_operand_inds_ref_counter[claim_ind];
// notify only first usage
if (1 == _source_operand_inds_ref_counter[claim_ind])
if (isSharedMemoryOperand(claim_ind))
{
_nonconst_mgr->claimPlan(claim_ind, size);
++_source_operand_inds_ref_counter[claim_ind];
if (_source_operand_inds_ref_counter[claim_ind] > 1)
{
return; // claimPlan should be called only for the first usage
}
}
_nonconst_mgr->claimPlan(claim_ind, size);
}

void StaticTensorManager::releasePlan(const ir::OperandIndex &ind)
Expand All @@ -115,20 +118,23 @@ void StaticTensorManager::releasePlan(const ir::OperandIndex &ind)
// This method is called only when a tensor has proper shape
assert(!_tensors->getNativeTensor(ind)->is_dynamic());

const auto release_ind = adjust_with_memory_source_operand(ind);
const auto release_ind = adjustWithMemorySourceOperand(ind);
if (_as_constants[release_ind])
{
return;
}
if (_source_operand_inds_ref_counter[release_ind] > 0)
{
--_source_operand_inds_ref_counter[release_ind];
}
// notify only last usage
if (0 == _source_operand_inds_ref_counter[release_ind])
if (isSharedMemoryOperand(release_ind))
{
_nonconst_mgr->releasePlan(release_ind);
if (_source_operand_inds_ref_counter[release_ind] > 0) // sanity check
{
--_source_operand_inds_ref_counter[release_ind];
}
if (_source_operand_inds_ref_counter[release_ind] > 0)
{
return; // releasePlan should be called only for the first usage
}
}
_nonconst_mgr->releasePlan(release_ind);
}

void StaticTensorManager::iterate(const std::function<void(const ir::OperandIndex &)> &fn)
Expand All @@ -137,17 +143,30 @@ void StaticTensorManager::iterate(const std::function<void(const ir::OperandInde
fn(it.first);
}

ir::OperandIndex StaticTensorManager::adjust_with_memory_source_operand(const ir::OperandIndex &ind)
ir::OperandIndex
StaticTensorManager::adjustWithMemorySourceOperand(const ir::OperandIndex &ind) const
{
const auto source_operand_ind = _shared_memory_operand_indexes.find(ind);
if (source_operand_ind != std::end(_shared_memory_operand_indexes))
const auto shared_operand_ind = _shared_memory_operand_indexes.find(ind);
if (shared_operand_ind != std::end(_shared_memory_operand_indexes))
{
return source_operand_ind->second;
return shared_operand_ind->second;
}
// source memory operand not found
return ind;
}

bool StaticTensorManager::isSharedMemoryOperand(const ir::OperandIndex &ind) const
{
for (const auto &[shared_ind, source_ind] : _shared_memory_operand_indexes)
{
if (shared_ind == ind || source_ind == ind)
{
return true;
}
}
return false;
}

} // namespace basic
} // namespace backend
} // namespace onert

0 comments on commit f6e7bcd

Please sign in to comment.