@@ -69,9 +69,11 @@ TensorInfo::TensorInfo(
69
69
Span<const int32_t > sizes,
70
70
Span<const uint8_t > dim_order,
71
71
executorch::aten::ScalarType scalar_type,
72
- const bool is_memory_planned)
72
+ const bool is_memory_planned,
73
+ executorch::aten::string_view name)
73
74
: sizes_(sizes),
74
75
dim_order_ (dim_order),
76
+ name_(name),
75
77
scalar_type_(scalar_type),
76
78
is_memory_planned_(is_memory_planned),
77
79
nbytes_(calculate_nbytes(sizes_, scalar_type_)) {}
@@ -96,6 +98,10 @@ size_t TensorInfo::nbytes() const {
96
98
return nbytes_;
97
99
}
98
100
101
+ executorch::aten::string_view TensorInfo::name () const {
102
+ return name_;
103
+ }
104
+
99
105
MethodMeta::MethodMeta (const executorch_flatbuffer::ExecutionPlan* s_plan)
100
106
: s_plan_(s_plan) {}
101
107
@@ -149,8 +155,9 @@ Result<TensorInfo> MethodMeta::input_tensor_meta(size_t index) const {
149
155
tensor_value->dim_order ()->data (), tensor_value->dim_order ()->size ()),
150
156
static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
151
157
tensor_value->allocation_info () != nullptr ||
152
- tensor_value->data_buffer_idx () !=
153
- 0 ); // Count constant returns as memory planned.
158
+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
159
+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as
160
+ // memory planned.
154
161
}
155
162
156
163
size_t MethodMeta::num_outputs () const {
@@ -200,8 +207,60 @@ Result<TensorInfo> MethodMeta::output_tensor_meta(size_t index) const {
200
207
tensor_value->dim_order ()->data (), tensor_value->dim_order ()->size ()),
201
208
static_cast <executorch::aten::ScalarType>(tensor_value->scalar_type ()),
202
209
tensor_value->allocation_info () != nullptr ||
203
- tensor_value->data_buffer_idx () !=
204
- 0 ); // Count constant returns as memory planned.
210
+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
211
+ executorch::aten::string_view{nullptr , 0 }); // Count constant returns as
212
+ // memory planned.
213
+ }
214
+
215
+ size_t MethodMeta::num_attributes () const {
216
+ size_t counter = 0 ;
217
+ auto values = s_plan_->values ();
218
+ for (size_t i = 0 ; i < values->size (); ++i) {
219
+ auto value = values->Get (i);
220
+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
221
+ auto tensor_value = value->val_as_Tensor ();
222
+ if (tensor_value->extra_tensor_info () != nullptr &&
223
+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
224
+ nullptr ) {
225
+ ++counter;
226
+ }
227
+ }
228
+ }
229
+ return counter;
230
+ }
231
+
232
+ Result<TensorInfo> MethodMeta::attribute_tensor_meta (size_t index) const {
233
+ size_t counter = 0 ;
234
+ auto values = s_plan_->values ();
235
+ for (size_t i = 0 ; i < values->size (); ++i) {
236
+ auto value = values->Get (i);
237
+ if (value->val_type () == executorch_flatbuffer::KernelTypes::Tensor) {
238
+ auto tensor_value = value->val_as_Tensor ();
239
+ if (tensor_value->extra_tensor_info () != nullptr &&
240
+ tensor_value->extra_tensor_info ()->fully_qualified_name ()->c_str () !=
241
+ nullptr ) {
242
+ if (counter == index ) {
243
+ auto t_name =
244
+ tensor_value->extra_tensor_info ()->fully_qualified_name ();
245
+ // Count constant returns as memory planned
246
+ return TensorInfo (
247
+ Span<const int32_t >(
248
+ tensor_value->sizes ()->data (), tensor_value->sizes ()->size ()),
249
+ Span<const uint8_t >(
250
+ tensor_value->dim_order ()->data (),
251
+ tensor_value->dim_order ()->size ()),
252
+ static_cast <executorch::aten::ScalarType>(
253
+ tensor_value->scalar_type ()),
254
+ tensor_value->allocation_info () != nullptr ||
255
+ tensor_value->data_buffer_idx () != 0 /* is_memory_planned */ ,
256
+ executorch::aten::string_view{t_name->c_str (), t_name->size ()});
257
+ }
258
+ ++counter;
259
+ }
260
+ }
261
+ }
262
+ ET_LOG (Error, " No attribute tensor found at index %zu" , index );
263
+ return Error::InvalidArgument;
205
264
}
206
265
207
266
size_t MethodMeta::num_memory_planned_buffers () const {
0 commit comments