Skip to content

Runtime API to retrieve attributes #10144

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 31 additions & 0 deletions runtime/executor/method.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1593,6 +1593,37 @@ EValue& Method::mutable_input(size_t i) {
return mutable_value(get_input_index(i));
}

Result<executorch::aten::Tensor> Method::get_attribute(
executorch::aten::string_view name) {
auto flatbuffer_values = serialization_plan_->values();
size_t counter = 0;

for (size_t i = 0; i < flatbuffer_values->size(); ++i) {
auto serialization_value = flatbuffer_values->Get(i);
if (serialization_value->val_type() ==
executorch_flatbuffer::KernelTypes::Tensor) {
const auto s_tensor = static_cast<const executorch_flatbuffer::Tensor*>(
serialization_value->val());
if (s_tensor->extra_tensor_info() != nullptr &&
s_tensor->extra_tensor_info()->fully_qualified_name() != nullptr &&
strcmp(
s_tensor->extra_tensor_info()->fully_qualified_name()->c_str(),
name.data()) == 0) {
if (!this->values_[counter].isTensor()) {
ET_LOG(
Error,
"Attribute tensor not at the expected location. The .pte is likely malformed. Please file a bug report on https://github.com/pytorch/executorch/issues");
return Error::Internal;
}
return this->values_[counter].toTensor();
}
}
++counter;
}

return Error::NotFound;
}

size_t Method::outputs_size() const {
const auto* outputs = serialization_plan_->outputs();
return outputs == nullptr ? 0 : outputs->size();
Expand Down
12 changes: 12 additions & 0 deletions runtime/executor/method.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,18 @@ class Method final {
*/
ET_NODISCARD Error get_inputs(EValue* input_evalues, size_t length);

/**
*
* Retrieves the attribute tensor associated with the given name.
*
* @param[in] name The name of the attribute tensor to retrieve.
*
* @returns Result containing the attribute tensor on success, non-Ok on
* failure.
*/
ET_NODISCARD Result<executorch::aten::Tensor> get_attribute(
executorch::aten::string_view name);

/**
* Execute the method.
*
Expand Down
69 changes: 64 additions & 5 deletions runtime/executor/method_meta.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,11 @@ TensorInfo::TensorInfo(
Span<const int32_t> sizes,
Span<const uint8_t> dim_order,
executorch::aten::ScalarType scalar_type,
const bool is_memory_planned)
const bool is_memory_planned,
executorch::aten::string_view name)
: sizes_(sizes),
dim_order_(dim_order),
name_(name),
scalar_type_(scalar_type),
is_memory_planned_(is_memory_planned),
nbytes_(calculate_nbytes(sizes_, scalar_type_)) {}
Expand All @@ -96,6 +98,10 @@ size_t TensorInfo::nbytes() const {
return nbytes_;
}

executorch::aten::string_view TensorInfo::name() const {
return name_;
}

MethodMeta::MethodMeta(const executorch_flatbuffer::ExecutionPlan* s_plan)
: s_plan_(s_plan) {}

Expand Down Expand Up @@ -149,8 +155,9 @@ Result<TensorInfo> MethodMeta::input_tensor_meta(size_t index) const {
tensor_value->dim_order()->data(), tensor_value->dim_order()->size()),
static_cast<executorch::aten::ScalarType>(tensor_value->scalar_type()),
tensor_value->allocation_info() != nullptr ||
tensor_value->data_buffer_idx() !=
0); // Count constant returns as memory planned.
tensor_value->data_buffer_idx() != 0 /* is_memory_planned */,
executorch::aten::string_view{nullptr, 0}); // Count constant returns as
// memory planned.
}

size_t MethodMeta::num_outputs() const {
Expand Down Expand Up @@ -200,8 +207,60 @@ Result<TensorInfo> MethodMeta::output_tensor_meta(size_t index) const {
tensor_value->dim_order()->data(), tensor_value->dim_order()->size()),
static_cast<executorch::aten::ScalarType>(tensor_value->scalar_type()),
tensor_value->allocation_info() != nullptr ||
tensor_value->data_buffer_idx() !=
0); // Count constant returns as memory planned.
tensor_value->data_buffer_idx() != 0 /* is_memory_planned */,
executorch::aten::string_view{nullptr, 0}); // Count constant returns as
// memory planned.
}

size_t MethodMeta::num_attributes() const {
size_t counter = 0;
auto values = s_plan_->values();
for (size_t i = 0; i < values->size(); ++i) {
auto value = values->Get(i);
if (value->val_type() == executorch_flatbuffer::KernelTypes::Tensor) {
auto tensor_value = value->val_as_Tensor();
if (tensor_value->extra_tensor_info() != nullptr &&
tensor_value->extra_tensor_info()->fully_qualified_name()->c_str() !=
nullptr) {
++counter;
}
}
}
return counter;
}

Result<TensorInfo> MethodMeta::attribute_tensor_meta(size_t index) const {
size_t counter = 0;
auto values = s_plan_->values();
for (size_t i = 0; i < values->size(); ++i) {
auto value = values->Get(i);
if (value->val_type() == executorch_flatbuffer::KernelTypes::Tensor) {
auto tensor_value = value->val_as_Tensor();
if (tensor_value->extra_tensor_info() != nullptr &&
tensor_value->extra_tensor_info()->fully_qualified_name()->c_str() !=
nullptr) {
if (counter == index) {
auto t_name =
tensor_value->extra_tensor_info()->fully_qualified_name();
// Count constant returns as memory planned
return TensorInfo(
Span<const int32_t>(
tensor_value->sizes()->data(), tensor_value->sizes()->size()),
Span<const uint8_t>(
tensor_value->dim_order()->data(),
tensor_value->dim_order()->size()),
static_cast<executorch::aten::ScalarType>(
tensor_value->scalar_type()),
tensor_value->allocation_info() != nullptr ||
tensor_value->data_buffer_idx() != 0 /* is_memory_planned */,
executorch::aten::string_view{t_name->c_str(), t_name->size()});
}
++counter;
}
}
}
ET_LOG(Error, "No attribute tensor found at index %zu", index);
return Error::InvalidArgument;
}

size_t MethodMeta::num_memory_planned_buffers() const {
Expand Down
27 changes: 26 additions & 1 deletion runtime/executor/method_meta.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,12 @@ class TensorInfo final {
*/
size_t nbytes() const;

/**
* Returns the fully qualified name of the Tensor might be empty if the tensor
* is nameless.
*/
executorch::aten::string_view name() const;

private:
// Let MethodMeta create TensorInfo.
friend class MethodMeta;
Expand All @@ -70,7 +76,8 @@ class TensorInfo final {
Span<const int32_t> sizes,
Span<const uint8_t> dim_order,
executorch::aten::ScalarType scalar_type,
const bool is_memory_planned);
const bool is_memory_planned,
executorch::aten::string_view name);

/**
* The sizes of the tensor.
Expand All @@ -88,6 +95,9 @@ class TensorInfo final {
*/
Span<const uint8_t> dim_order_;

/// The fully qualified name of the Tensor.
executorch::aten::string_view name_;

/// The scalar type of the tensor.
executorch::aten::ScalarType scalar_type_;

Expand Down Expand Up @@ -170,6 +180,21 @@ class MethodMeta final {
*/
Result<TensorInfo> output_tensor_meta(size_t index) const;

/**
* Get the number of attribute tensors in this method.
*
* @returns The number of attribute tensors.
*/
size_t num_attributes() const;

/**
* Get metadata about the specified attribute tensor.
*
* @param[in] index The index of the attribute tensor to look up.
* @returns The metadata on success, or an error on failure.
*/
Result<TensorInfo> attribute_tensor_meta(size_t index) const;

/**
* Get the number of memory-planned buffers this method requires.
*
Expand Down
5 changes: 4 additions & 1 deletion runtime/executor/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,10 @@ add_custom_command(
"${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.ptd"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleMultipleEntry.pte"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleSimpleTrain.pte"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleStateful.pte"
COMMAND
python3 -m test.models.export_program --modules
"ModuleAdd,ModuleAddHalf,ModuleDynamicCatUnallocatedIO,ModuleIndex,ModuleLinear,ModuleMultipleEntry,ModuleSimpleTrain"
"ModuleAdd,ModuleAddHalf,ModuleDynamicCatUnallocatedIO,ModuleIndex,ModuleLinear,ModuleMultipleEntry,ModuleSimpleTrain,ModuleStateful"
--outdir "${CMAKE_CURRENT_BINARY_DIR}" 2> /dev/null
COMMAND
python3 -m test.models.export_program --modules "ModuleLinear"
Expand All @@ -51,6 +52,7 @@ add_custom_target(
"${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.ptd"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleMultipleEntry.pte"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleSimpleTrain.pte"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleStateful.pte"
)

set(test_env
Expand All @@ -64,6 +66,7 @@ set(test_env
"ET_MODULE_LINEAR_DATA_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleLinearProgram.ptd"
"ET_MODULE_MULTI_ENTRY_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleMultipleEntry.pte"
"ET_MODULE_SIMPLE_TRAIN_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleSimpleTrain.pte"
"ET_MODULE_STATEFUL_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleStateful.pte"
)

et_cxx_test(
Expand Down
47 changes: 36 additions & 11 deletions runtime/executor/test/method_meta_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,26 +26,34 @@ using torch::executor::util::FileDataLoader;

class MethodMetaTest : public ::testing::Test {
protected:
void SetUp() override {
// Create a loader for the serialized ModuleAdd program.
const char* path = std::getenv("ET_MODULE_ADD_PATH");
void load_program(const char* path, const char* module_name) {
// Create a loader for the serialized program.
Result<FileDataLoader> loader = FileDataLoader::from(path);
ASSERT_EQ(loader.error(), Error::Ok);
loader_ = std::make_unique<FileDataLoader>(std::move(loader.get()));
loaders_.insert(
{module_name,
std::make_unique<FileDataLoader>(std::move(loader.get()))});

// Use it to load the program.
Result<Program> program = Program::load(
loader_.get(), Program::Verification::InternalConsistency);
loaders_[module_name].get(),
Program::Verification::InternalConsistency);
ASSERT_EQ(program.error(), Error::Ok);
program_ = std::make_unique<Program>(std::move(program.get()));
programs_.insert(
{module_name, std::make_unique<Program>(std::move(program.get()))});
}

void SetUp() override {
load_program(std::getenv("ET_MODULE_ADD_PATH"), "add");
load_program(std::getenv("ET_MODULE_STATEFUL_PATH"), "stateful");
}

private:
// Must outlive program_, but tests shouldn't need to touch it.
std::unique_ptr<FileDataLoader> loader_;
std::unordered_map<std::string, std::unique_ptr<FileDataLoader>> loaders_;

protected:
std::unique_ptr<Program> program_;
std::unordered_map<std::string, std::unique_ptr<Program>> programs_;
};

namespace {
Expand All @@ -67,7 +75,7 @@ void check_tensor(const TensorInfo& tensor_info) {
} // namespace

TEST_F(MethodMetaTest, MethodMetaApi) {
Result<MethodMeta> method_meta = program_->method_meta("forward");
Result<MethodMeta> method_meta = programs_["add"]->method_meta("forward");
ASSERT_EQ(method_meta.error(), Error::Ok);

// Appropriate amount of inputs
Expand Down Expand Up @@ -97,11 +105,12 @@ TEST_F(MethodMetaTest, MethodMetaApi) {

// Missing method fails
EXPECT_EQ(
program_->method_meta("not_a_method").error(), Error::InvalidArgument);
programs_["add"]->method_meta("not_a_method").error(),
Error::InvalidArgument);
}

TEST_F(MethodMetaTest, TensorInfoApi) {
Result<MethodMeta> method_meta = program_->method_meta("forward");
Result<MethodMeta> method_meta = programs_["add"]->method_meta("forward");
ASSERT_EQ(method_meta.error(), Error::Ok);

// Input 1
Expand Down Expand Up @@ -138,3 +147,19 @@ TEST_F(MethodMetaTest, TensorInfoApi) {
EXPECT_EQ(
method_meta->output_tensor_meta(-1).error(), Error::InvalidArgument);
}

TEST_F(MethodMetaTest, MethodMetaAttribute) {
Result<MethodMeta> method_meta =
programs_["stateful"]->method_meta("forward");
ASSERT_EQ(method_meta.error(), Error::Ok);

ASSERT_EQ(method_meta->num_attributes(), 1);
auto state = method_meta->attribute_tensor_meta(0);
ASSERT_TRUE(state.ok());

ASSERT_EQ(state->name(), "state");
ASSERT_FALSE(state->is_memory_planned());

auto bad_access = method_meta->attribute_tensor_meta(1);
ASSERT_EQ(bad_access.error(), Error::InvalidArgument);
}
26 changes: 26 additions & 0 deletions runtime/executor/test/method_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ class MethodTest : public ::testing::Test {
load_program(
std::getenv("ET_MODULE_DYNAMIC_CAT_UNALLOCATED_IO_PATH"), "cat");
load_program(std::getenv("ET_MODULE_LINEAR_PATH"), "linear");
load_program(std::getenv("ET_MODULE_STATEFUL_PATH"), "stateful");
load_program(
std::getenv("DEPRECATED_ET_MODULE_LINEAR_CONSTANT_BUFFER_PATH"),
"linear_constant_buffer");
Expand Down Expand Up @@ -339,6 +340,31 @@ TEST_F(MethodTest, ProgramDataSeparationTest) {
ASSERT_EQ(err, Error::Ok);
}

TEST_F(MethodTest, MethodGetAttributeTest) {
ManagedMemoryManager mmm(kDefaultNonConstMemBytes, kDefaultRuntimeMemBytes);
Result<Method> method =
programs_["stateful"]->load_method("forward", &mmm.get());
ASSERT_EQ(method.error(), Error::Ok);

auto res = method->get_attribute("state");
ASSERT_TRUE(res.ok());
// expect data to be empty
EXPECT_EQ(res->const_data_ptr(), nullptr);

int32_t data = 0;
res->set_data(&data);

// expect data to be set
EXPECT_EQ(res->const_data_ptr(), &data);

// Can execute the method.
Error err = method->execute();
ASSERT_EQ(err, Error::Ok);

// Expect the state to be incremented
EXPECT_EQ(res->const_data_ptr<int32_t>()[0], 1);
}

/*
* TODO(T161163608): Test is disabled due to a resize bug in tensor_index_out of
* the portable op lib
Expand Down
1 change: 1 addition & 0 deletions runtime/executor/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ def define_common_targets(is_fbcode = False):
"ET_MODULE_LINEAR_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleLinear.pte])",
"ET_MODULE_MULTI_ENTRY_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleMultipleEntry.pte])",
"ET_MODULE_SIMPLE_TRAIN_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleSimpleTrain.pte])",
"ET_MODULE_STATEFUL_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleStateful.pte])",
"ET_MODULE_LINEAR_PROGRAM_PATH": "$(location fbcode//executorch/test/models:exported_program_and_data[ModuleLinear.pte])",
"ET_MODULE_LINEAR_DATA_PATH": "$(location fbcode//executorch/test/models:exported_program_and_data[ModuleLinear.ptd])",
}
Expand Down
Loading
Loading