@@ -69,9 +69,11 @@ TensorInfo::TensorInfo(
69
69
Span<const int32_t > sizes,
70
70
Span<const uint8_t > dim_order,
71
71
executorch::aten::ScalarType scalar_type,
72
- const bool is_memory_planned)
72
+ const bool is_memory_planned,
73
+ executorch::aten::string_view name)
73
74
: sizes_(sizes),
74
75
dim_order_ (dim_order),
76
+ name_(name),
75
77
scalar_type_(scalar_type),
76
78
is_memory_planned_(is_memory_planned),
77
79
nbytes_(calculate_nbytes(sizes_, scalar_type_)) {}
@@ -96,6 +98,10 @@ size_t TensorInfo::nbytes() const {
96
98
return nbytes_;
97
99
}
98
100
101
+ executorch::aten::string_view TensorInfo::name () const {
102
+ return name_;
103
+ }
104
+
99
105
MethodMeta::MethodMeta (const executorch_flatbuffer::ExecutionPlan* s_plan)
100
106
: s_plan_(s_plan) {}
101
107
@@ -150,7 +156,9 @@ Result<TensorInfo> MethodMeta::input_tensor_meta(size_t index) const {
150
156
static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
151
157
tensor_value->allocation_info () != nullptr ||
152
158
tensor_value->data_buffer_idx () !=
153
- 0 ); // Count constant returns as memory planned.
159
+ 0 /* is_memory_planned */ ,
160
+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as memory
161
+ // planned.
154
162
}
155
163
156
164
size_t MethodMeta::num_outputs () const {
@@ -201,7 +209,60 @@ Result<TensorInfo> MethodMeta::output_tensor_meta(size_t index) const {
201
209
static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
202
210
tensor_value->allocation_info () != nullptr ||
203
211
tensor_value->data_buffer_idx () !=
204
- 0 ); // Count constant returns as memory planned.
212
+ 0 /* is_memory_planned */ ,
213
+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as memory
214
+ // planned.
215
+ }
216
+
217
+ size_t MethodMeta::num_attributes () const {
218
+ size_t counter = 0 ;
219
+ auto values = s_plan_->values ();
220
+ for (size_t i = 0 ; i < values->size (); ++i) {
221
+ auto value = values->Get (i);
222
+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
223
+ auto tensor_value = value->val_as_Tensor ();
224
+ if (tensor_value->extra_tensor_info () != nullptr &&
225
+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
226
+ nullptr ) {
227
+ ++counter;
228
+ }
229
+ }
230
+ }
231
+ return counter;
232
+ }
233
+
234
+ Result<TensorInfo> MethodMeta::attribute_tensor_meta (size_t index) const {
235
+ size_t counter = 0 ;
236
+ auto values = s_plan_->values ();
237
+ for (size_t i = 0 ; i < values->size (); ++i) {
238
+ auto value = values->Get (i);
239
+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
240
+ auto tensor_value = value->val_as_Tensor ();
241
+ if (tensor_value->extra_tensor_info () != nullptr &&
242
+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
243
+ nullptr ) {
244
+ if (counter == index ) {
245
+ auto t_name =
246
+ tensor_value->extra_tensor_info ()->fully_qualified_name ();
247
+ // Count constant returns as memory planned
248
+ return TensorInfo (
249
+ Span<const int32_t >(
250
+ tensor_value->sizes ()->data (), tensor_value->sizes ()->size ()),
251
+ Span<const uint8_t >(
252
+ tensor_value->dim_order ()->data (),
253
+ tensor_value->dim_order ()->size ()),
254
+ static_cast <executorch::aten::ScalarType>(
255
+ tensor_value->scalar_type ()),
256
+ tensor_value->allocation_info () != nullptr ||
257
+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
258
+ executorch::aten::string_view{t_name->c_str (), t_name->size ()});
259
+ }
260
+ ++counter;
261
+ }
262
+ }
263
+ }
264
+ ET_LOG (Error, " No attribute tensor found at index %zu" , index );
265
+ return Error::InvalidArgument;
205
266
}
206
267
207
268
size_t MethodMeta::num_memory_planned_buffers () const {
0 commit comments