diff --git a/compiler/circledump/driver/Driver.cpp b/compiler/circledump/driver/Driver.cpp index 5b0871a91ae..f5fd9f8268a 100644 --- a/compiler/circledump/driver/Driver.cpp +++ b/compiler/circledump/driver/Driver.cpp @@ -50,7 +50,11 @@ int entry(int argc, char **argv) std::cout << "Dump: " << circle_path << std::endl << std::endl; - std::cout << circlemodel << std::endl; + circledump::ModelEx modelex; + modelex.model = circlemodel; + modelex.rawdata = &modelData; + + std::cout << modelex << std::endl; return 0; } diff --git a/compiler/circledump/include/circledump/Dump.h b/compiler/circledump/include/circledump/Dump.h index 594209a5def..2d25a6178c6 100644 --- a/compiler/circledump/include/circledump/Dump.h +++ b/compiler/circledump/include/circledump/Dump.h @@ -24,10 +24,16 @@ namespace circledump { -void dump_model(std::ostream &os, const circle::Model *model); +struct ModelEx +{ + const circle::Model *model; + const std::vector *rawdata; +}; + +void dump_model(std::ostream &os, const circledump::ModelEx &model); } // namespace circledump -std::ostream &operator<<(std::ostream &os, const circle::Model *model); +std::ostream &operator<<(std::ostream &os, const circledump::ModelEx &model); #endif // __CIRCLEDUMP_DUMP_H__ diff --git a/compiler/circledump/src/Dump.cpp b/compiler/circledump/src/Dump.cpp index 166931648f8..48413c7a636 100644 --- a/compiler/circledump/src/Dump.cpp +++ b/compiler/circledump/src/Dump.cpp @@ -341,9 +341,9 @@ void dump_sub_graph(std::ostream &os, mio::circle::Reader &reader) os << std::endl; } -void dump_model(std::ostream &os, const circle::Model *model) +void dump_model(std::ostream &os, const circle::Model *model, const std::vector *rawdata) { - mio::circle::Reader reader(model); + mio::circle::Reader reader(model, rawdata); uint32_t num_subgraph = reader.num_subgraph(); @@ -378,13 +378,17 @@ void dump_model(std::ostream &os, const circle::Model *model) os << std::endl; // dump buffer - os << "Buffers: B(index) (length) values, if any" << std::endl; + os << "Buffers: B(index) (length) values, if any; (length *) for large" << std::endl; for (uint32_t i = 0; i < buffers->size(); ++i) { + bool is_large; const uint8_t *buff_data; - size_t size = reader.buffer_info(i, &buff_data); + size_t size = reader.buffer_info(i, &buff_data, is_large); - os << "B(" << i << ") (" << size << ") "; + os << "B(" << i << ") (" << size; + if (is_large) + os << " *"; + os << ") "; if (buff_data != nullptr) { dump_buffer(os, buff_data, size, 16); @@ -460,8 +464,8 @@ void dump_model(std::ostream &os, const circle::Model *model) } // namespace circledump -std::ostream &operator<<(std::ostream &os, const circle::Model *model) +std::ostream &operator<<(std::ostream &os, const circledump::ModelEx &modelex) { - circledump::dump_model(os, model); + circledump::dump_model(os, modelex.model, modelex.rawdata); return os; } diff --git a/compiler/mio-circle08/CMakeLists.txt b/compiler/mio-circle08/CMakeLists.txt index 03e449d6e81..cee15c96993 100644 --- a/compiler/mio-circle08/CMakeLists.txt +++ b/compiler/mio-circle08/CMakeLists.txt @@ -19,7 +19,7 @@ add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/schema.fbs" DEPENDS "${SCHEMA_FILE}" ) -FlatBuffers_Target(mio_circle08 +FlatBuffersMuteable_Target(mio_circle08 OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen/mio/circle" INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen" SCHEMA_DIR "${CMAKE_CURRENT_BINARY_DIR}" diff --git a/compiler/mio-circle08/include/mio_circle/Reader.h b/compiler/mio-circle08/include/mio_circle/Reader.h index 723668f264e..f06e1eed05d 100644 --- a/compiler/mio-circle08/include/mio_circle/Reader.h +++ b/compiler/mio-circle08/include/mio_circle/Reader.h @@ -47,6 +47,7 @@ class Reader public: Reader(const ::circle::Model *model); + Reader(const ::circle::Model *model, const std::vector *rawdata); Reader() = delete; @@ -65,6 +66,7 @@ class Reader uint32_t num_subgraph() const { return _subgraphs->size(); } size_t buffer_info(uint32_t buf_idx, const uint8_t **buff_data); + size_t buffer_info(uint32_t buf_idx, const uint8_t **buff_data, bool &is_large); ::circle::BuiltinOperator builtin_code(const ::circle::Operator *op) const; std::string opcode_name(const ::circle::Operator *op) const; std::vector outputs(const ::circle::Operator *op) const; @@ -79,6 +81,8 @@ class Reader private: uint32_t _version; + const std::vector *_rawdata{nullptr}; + const CircleSubGraphs_t *_subgraphs{nullptr}; const CircleBuffers_t *_buffers{nullptr}; const CircleTensors_t *_tensors{nullptr}; diff --git a/compiler/mio-circle08/src/Reader.cpp b/compiler/mio-circle08/src/Reader.cpp index e4df6d04d54..26594c85eb1 100644 --- a/compiler/mio-circle08/src/Reader.cpp +++ b/compiler/mio-circle08/src/Reader.cpp @@ -45,6 +45,28 @@ Reader::Reader(const ::circle::Model *model) } } +Reader::Reader(const ::circle::Model *model, const std::vector *rawdata) +{ + if (model == nullptr) + { + throw std::runtime_error("Invalid model"); + } + + _rawdata = rawdata; + + _version = model->version(); + _subgraphs = model->subgraphs(); + _buffers = model->buffers(); + _metadata = model->metadata(); + _signature_defs = model->signature_defs(); + + auto opcodes = model->operator_codes(); + for (const ::circle::OperatorCode *opcode : *opcodes) + { + _op_codes.push_back(opcode); + } +} + size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data) { if (buff_data != nullptr) @@ -73,6 +95,47 @@ size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data) return 0; } +size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data, bool &is_large) +{ + is_large = false; + + if (buff_data != nullptr) + { + *buff_data = nullptr; + } + + if (buf_idx == 0) + return 0; + + if (auto *buffer = (*_buffers)[buf_idx]) + { + auto buffer_offset = buffer->offset(); + if (buffer->offset() > 1) + { + assert(_rawdata); + if (_rawdata == nullptr) + return 0; + + is_large = true; + *buff_data = reinterpret_cast(&_rawdata->at(buffer_offset)); + return buffer->size(); + } + else if (auto *array = buffer->data()) + { + if (size_t size = array->size()) + { + if (buff_data != nullptr) + { + *buff_data = reinterpret_cast(array->data()); + } + return size; + } + } + } + + return 0; +} + ::circle::BuiltinOperator Reader::builtin_code(const ::circle::Operator *op) const { uint32_t index = op->opcode_index(); diff --git a/compiler/mio-tflite2121/CMakeLists.txt b/compiler/mio-tflite2121/CMakeLists.txt index 371118be83c..3cd9662d460 100644 --- a/compiler/mio-tflite2121/CMakeLists.txt +++ b/compiler/mio-tflite2121/CMakeLists.txt @@ -15,7 +15,9 @@ endif(NOT TensorFlowSource_FOUND) message(STATUS "Build mio-tflite2121: TRUE") message(STATUS "Build mio-tflite2121: with ${TensorFlowSource_DIR}") -set(SCHEMA_FILE "${TensorFlowSource_DIR}/tensorflow/lite/schema/schema.fbs") +# TODO rollback to use TensorFlowSource_DIR after upgrade +#set(SCHEMA_FILE "${TensorFlowSource_DIR}/tensorflow/lite/schema/schema.fbs") +set(SCHEMA_FILE "${NNAS_PROJECT_SOURCE_DIR}/res/TensorFlowLiteSchema/2.16.1/schema.fbs") # NOTE Use copy of schema.fbs as to provide unified way for circle also add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/schema.fbs" @@ -24,7 +26,7 @@ add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/schema.fbs" DEPENDS "${SCHEMA_FILE}" ) -FlatBuffers_Target(mio_tflite2121 +FlatBuffersMuteable_Target(mio_tflite2121 OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen/mio/tflite" INCLUDE_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen" SCHEMA_DIR "${CMAKE_CURRENT_BINARY_DIR}" diff --git a/compiler/tflchef/core/src/ModelChef.cpp b/compiler/tflchef/core/src/ModelChef.cpp index 8b50b09f986..964cfb84c34 100644 --- a/compiler/tflchef/core/src/ModelChef.cpp +++ b/compiler/tflchef/core/src/ModelChef.cpp @@ -199,6 +199,8 @@ class ModelChef template void cook_graph(const T &graph, std::map &symbol_table); + bool finalize_ext_buffer(void); + public: const char *get_buffer_pointer(void) const; size_t get_size(void) const; @@ -223,6 +225,11 @@ class ModelChef std::vector> _operator_vec; std::string _graph_name; + + // store Buffer data to external of FB and use (Buffer) offset/size fields + bool _ext_offset = false; + std::map> _buffer_data_map; + std::string _ext_data; }; void ModelChef::init(void) @@ -355,16 +362,27 @@ template void ModelChef::cook_operands(const T &graph) sparse_uint8.emplace_back(arr[b]); } } - auto data = _flatbuffer_builder->CreateVector(sparse_uint8); - - // Create Buffer - tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; - buffer_builder.add_data(data); - auto buffer = buffer_builder.Finish(); + if (_ext_offset) + { + buffer_index = _buffer_vec.size(); + _buffer_data_map[buffer_index] = sparse_uint8; - // Update Buffer Index & Vector - buffer_index = _buffer_vec.size(); - _buffer_vec.emplace_back(buffer); + auto buffer = tflite::CreateBuffer(*_flatbuffer_builder, 0, 1, 1); + _buffer_vec.emplace_back(buffer); + //_buffer_offset_map[buffer_index] = buffer; + } + else + { + auto data = _flatbuffer_builder->CreateVector(sparse_uint8); + // Create Buffer + tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; + buffer_builder.add_data(data); + auto buffer = buffer_builder.Finish(); + + // Update Buffer Index & Vector + buffer_index = _buffer_vec.size(); + _buffer_vec.emplace_back(buffer); + } // save SparsityParameters auto traversal_order = _flatbuffer_builder->CreateVector(traversal_order_vec); @@ -398,16 +416,28 @@ template void ModelChef::cook_operands(const T &graph) sparse_uint8.emplace_back(arr[b]); } } - auto data = _flatbuffer_builder->CreateVector(sparse_uint8); + if (_ext_offset) + { + buffer_index = _buffer_vec.size(); + _buffer_data_map[buffer_index] = sparse_uint8; - // Create Buffer - tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; - buffer_builder.add_data(data); - auto buffer = buffer_builder.Finish(); + auto buffer = tflite::CreateBuffer(*_flatbuffer_builder, 0, 1, 1); + _buffer_vec.emplace_back(buffer); + //_buffer_offset_map[buffer_index] = buffer; + } + else + { + auto data = _flatbuffer_builder->CreateVector(sparse_uint8); - // Update Buffer Index & Vector - buffer_index = _buffer_vec.size(); - _buffer_vec.emplace_back(buffer); + // Create Buffer + tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; + buffer_builder.add_data(data); + auto buffer = buffer_builder.Finish(); + + // Update Buffer Index & Vector + buffer_index = _buffer_vec.size(); + _buffer_vec.emplace_back(buffer); + } // save SparsityParameters auto traversal_order = _flatbuffer_builder->CreateVector(traversal_order_vec); @@ -447,16 +477,28 @@ template void ModelChef::cook_operands(const T &graph) data_vec = data_packed; } - auto data = _flatbuffer_builder->CreateVector(data_vec); + if (_ext_offset) + { + buffer_index = _buffer_vec.size(); + _buffer_data_map[buffer_index] = data_vec; - // Create Buffer - tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; - buffer_builder.add_data(data); - auto buffer = buffer_builder.Finish(); + auto buffer = tflite::CreateBuffer(*_flatbuffer_builder, 0, 1, 1); + _buffer_vec.emplace_back(buffer); + //_buffer_offset_map[buffer_index] = buffer; + } + else + { + auto data = _flatbuffer_builder->CreateVector(data_vec); - // Update Buffer Index & Vector - buffer_index = _buffer_vec.size(); - _buffer_vec.emplace_back(buffer); + // Create Buffer + tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; + buffer_builder.add_data(data); + auto buffer = buffer_builder.Finish(); + + // Update Buffer Index & Vector + buffer_index = _buffer_vec.size(); + _buffer_vec.emplace_back(buffer); + } } } else @@ -690,18 +732,17 @@ void ModelChef::cook_graph(const T &graph, std::map &symbo assert(_operator_vec.empty()); // FIX_CALLER_UNLESS // default name for graph - std::string graph_name = _graph_name; if (graph.has_name()) - graph_name = graph.name(); + _graph_name = graph.name(); - auto lookup = [&symbol_table, &graph_name](const std::string &name) { + auto lookup = [&](const std::string &name) { if (symbol_table.find(name) != symbol_table.end()) return symbol_table.at(name); else if (name == "") return -1; // -1 in TFLite means that optional input tensor is empty. else { - std::string msg = "tflchef : input not found in " + graph_name + " graph"; + std::string msg = "tflchef : input not found in " + _graph_name + " graph"; throw std::runtime_error(msg.c_str()); } }; @@ -730,7 +771,7 @@ void ModelChef::cook_graph(const T &graph, std::map &symbo auto inputs = _flatbuffer_builder->CreateVector(input_vec); auto outputs = _flatbuffer_builder->CreateVector(output_vec); auto operators = _flatbuffer_builder->CreateVector(_operator_vec); - auto name = _flatbuffer_builder->CreateString(graph_name); + auto name = _flatbuffer_builder->CreateString(_graph_name); tflite::SubGraphBuilder subgraph_builder{*_flatbuffer_builder}; @@ -883,65 +924,74 @@ void ModelChef::gather_signature_defs(const ::tflchef::ModelRecipe &model_recipe } } -void ModelChef::cook(const ::tflchef::ModelRecipe &model_recipe) +bool ModelChef::finalize_ext_buffer(void) { - prepare_initial_buffer(); + // NOTE modification of std::string object in the middle may reallocate it. + // we will use std::string::reserve() to prevent this. - gather_operator_codes(model_recipe); + auto align16 = [](size_t &v) { + while (v % 16 != 0) + v++; + }; -#if 0 - // Create OperatorCode with Builtin Operator - _builtin_code_map = gather_builtincode_map(model_recipe); - for (auto const &opcode : _builtin_code_map) + // get total memory for flatbuffer + all buffer_data + size_t result_size = _flatbuffer_builder->GetSize(); + align16(result_size); + for (auto &it : _buffer_data_map) { - tflite::OperatorCodeBuilder code_builder{*_flatbuffer_builder}; - // 127 is BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES - // This is the way to handle deprecated builtin code - // See - // https://github.com/tensorflow/tensorflow/blob/a0afe8f9218be5eb9ed5dffc2dff652996da8c28/tensorflow/lite/schema/schema.fbs#L1061-L1077 - if (opcode.first < 127) - { - code_builder.add_deprecated_builtin_code(opcode.first); - } - else - { - code_builder.add_deprecated_builtin_code( - ::tflite::BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES); - } - code_builder.add_version(opcode.second); - code_builder.add_builtin_code(opcode.first); - auto code = code_builder.Finish(); - // Update OperatorCode vector - _code_vec.emplace_back(code); + std::vector &buffer_data = it.second; + result_size += buffer_data.size(); + align16(result_size); } + align16(result_size); + result_size += 16; // additional for safety - // Create OperatorCode with Custom Operator - std::set custom_code_set = gather_customcode_set(model_recipe); - _custom_code_vec = {custom_code_set.begin(), custom_code_set.end()}; + std::string result; + auto *buff_ptr = reinterpret_cast(_flatbuffer_builder->GetBufferPointer()); - for (auto opcode : _custom_code_vec) - { - auto custom_code = _flatbuffer_builder->CreateString(opcode); - tflite::OperatorCodeBuilder code_builder{*_flatbuffer_builder}; - code_builder.add_deprecated_builtin_code(tflite::BuiltinOperator_CUSTOM); - code_builder.add_custom_code(custom_code); - code_builder.add_builtin_code(tflite::BuiltinOperator_CUSTOM); - auto code = code_builder.Finish(); - // Update OperatorCode vector - _code_vec.emplace_back(code); - } -#endif + auto padalign16 = [](std::string &str) { + while (str.size() % 16 != 0) + str += '\0'; + }; -#if 0 - // Create an Empty Buffer - // - // Buffer 0 SHOULD be an empty buffer in TensorFlow Lite model file - // (Please refer to the comment for Tensor.buffer field in schema) + result.reserve(result_size); + result.append(buff_ptr, _flatbuffer_builder->GetSize()); + + auto mutable_model = tflite::GetMutableModel(result.data()); + auto mutable_buffers = mutable_model->mutable_buffers(); + bool ret = true; + + padalign16(result); + for (auto &it : _buffer_data_map) { - tflite::BufferBuilder buffer_builder{*_flatbuffer_builder}; - _buffer_vec.emplace_back(buffer_builder.Finish()); + int32_t buffer_index = it.first; + std::vector &buffer_data = it.second; + uint64_t offset = result.size(); + uint64_t size = buffer_data.size(); + + tflite::Buffer *mutable_buffer = mutable_buffers->GetMutableObject(buffer_index); + ret &= mutable_buffer->mutate_offset(offset); + ret &= mutable_buffer->mutate_size(size); + + result.append(buffer_data.begin(), buffer_data.end()); + padalign16(result); } -#endif + padalign16(result); + + // use final result + _ext_data = result; + + return ret; +} + +void ModelChef::cook(const ::tflchef::ModelRecipe &model_recipe) +{ + // use Custom/Buffer offset + _ext_offset = model_recipe.has_ext_offset() ? model_recipe.ext_offset() : false; + + prepare_initial_buffer(); + + gather_operator_codes(model_recipe); // // Create Main graph @@ -974,90 +1024,6 @@ void ModelChef::cook(const ::tflchef::ModelRecipe &model_recipe) gather_signature_defs(model_recipe); -#if 0 - // Create Signature-Def - // - for (int s = 0; s < model_recipe.signature_def_size(); ++s) - { - // load from recipe - const auto &rec_signature_def = model_recipe.signature_def(s); - - std::vector> tensormap_inputs; - std::vector> tensormap_outputs; - - // which subgraph index to cook - auto subgraph_index = 0; - if (rec_signature_def.has_subgraph_index()) - { - subgraph_index = rec_signature_def.subgraph_index(); - } - assert(subgraph_index < _symbol_tables.size()); - auto &symbol_table = _symbol_tables[subgraph_index]; - - // cook for inputs - for (int si = 0; si < rec_signature_def.inputs_size(); ++si) - { - // recipe for input TensorMap - auto rec_tm_input = rec_signature_def.inputs(si); - auto name = _flatbuffer_builder->CreateString(rec_tm_input.name()); - uint32_t tensor_index = 0; - // either tensor or tensor_index should exist - assert(rec_tm_input.has_tensor() || rec_tm_input.has_tensor_index()); - if (rec_tm_input.has_tensor()) - { - // we can get tensor_index from symbol_table - auto tensor = rec_tm_input.tensor(); - tensor_index = symbol_table[tensor]; - } - else - { - // or we can use tensor_index itself - tensor_index = rec_tm_input.tensor_index(); - } - - ::tflite::TensorMapBuilder tensormap_builder{*_flatbuffer_builder}; - tensormap_builder.add_name(name); - tensormap_builder.add_tensor_index(tensor_index); - tensormap_inputs.push_back(tensormap_builder.Finish()); - } - // cook for outputs, same as inputs - for (int so = 0; so < rec_signature_def.outputs_size(); ++so) - { - auto rec_tm_output = rec_signature_def.outputs(so); - auto name = _flatbuffer_builder->CreateString(rec_tm_output.name()); - uint32_t tensor_index = 0; - assert(rec_tm_output.has_tensor() || rec_tm_output.has_tensor_index()); - if (rec_tm_output.has_tensor()) - { - auto tensor = rec_tm_output.tensor(); - tensor_index = symbol_table[tensor]; - } - else - { - tensor_index = rec_tm_output.tensor_index(); - } - - ::tflite::TensorMapBuilder tensormap_builder{*_flatbuffer_builder}; - tensormap_builder.add_name(name); - tensormap_builder.add_tensor_index(tensor_index); - tensormap_outputs.push_back(tensormap_builder.Finish()); - } - - auto inputs = _flatbuffer_builder->CreateVector(tensormap_inputs); - auto outputs = _flatbuffer_builder->CreateVector(tensormap_outputs); - auto signature_key = _flatbuffer_builder->CreateString(rec_signature_def.signature_key()); - // TODO add validation for signature_key - - ::tflite::SignatureDefBuilder signature_def_builder{*_flatbuffer_builder}; - signature_def_builder.add_inputs(inputs); - signature_def_builder.add_outputs(outputs); - signature_def_builder.add_signature_key(signature_key); - signature_def_builder.add_subgraph_index(rec_signature_def.subgraph_index()); - - _signdef_vec.emplace_back(signature_def_builder.Finish()); - } -#endif - // Create "Model" arguments auto buffers = _flatbuffer_builder->CreateVector(_buffer_vec); auto signdefs = _flatbuffer_builder->CreateVector(_signdef_vec); @@ -1079,17 +1045,22 @@ void ModelChef::cook(const ::tflchef::ModelRecipe &model_recipe) // Finalize ::tflite::FinishModelBuffer(*_flatbuffer_builder, model); + + if (_ext_offset) + finalize_ext_buffer(); } const char *ModelChef::get_buffer_pointer(void) const { - // + if (_ext_offset) + return _ext_data.data(); return reinterpret_cast(_flatbuffer_builder->GetBufferPointer()); } size_t ModelChef::get_size(void) const { - // + if (_ext_offset) + return _ext_data.size(); return _flatbuffer_builder->GetSize(); } diff --git a/compiler/tflchef/proto/tflchef.proto b/compiler/tflchef/proto/tflchef.proto index e4ae5d9b65b..59995f13e71 100644 --- a/compiler/tflchef/proto/tflchef.proto +++ b/compiler/tflchef/proto/tflchef.proto @@ -719,4 +719,6 @@ message ModelRecipe { optional uint32 version = 6 [default = 1]; repeated Graph graph = 7; repeated SignatureDef signature_def = 8; + // store to external and use (Buffer) offset + optional bool ext_offset = 9 [default = false]; } diff --git a/compiler/tflchef/tests/ext_offset/test.recipe b/compiler/tflchef/tests/ext_offset/test.recipe new file mode 100644 index 00000000000..fb94e9c7b26 --- /dev/null +++ b/compiler/tflchef/tests/ext_offset/test.recipe @@ -0,0 +1,44 @@ +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 1 dim: 3 dim: 3 dim: 2 } +} +operand { + name: "ker" + type: FLOAT32 + shape { dim: 1 dim: 1 dim: 1 dim: 2 } + filler { + tag: "explicit" + arg: "1.1" + arg: "2.2" + } +} +operand { + name: "bias" + type: FLOAT32 + shape { dim: 1 } + filler { + tag: "constant" + arg: "3.3" + } +} +operand { + name: "ofm" + type: FLOAT32 + shape { dim: 1 dim: 3 dim: 3 dim: 1 } +} +operation { + type: "Conv2D" + conv2d_options { + padding: VALID + stride_w: 1 + stride_h: 1 + } + input: "ifm" + input: "ker" + input: "bias" + output: "ofm" +} +input: "ifm" +output: "ofm" +ext_offset: true diff --git a/compiler/tfldump/driver/Driver.cpp b/compiler/tfldump/driver/Driver.cpp index a3e748be1d1..1fb5725a6de 100644 --- a/compiler/tfldump/driver/Driver.cpp +++ b/compiler/tfldump/driver/Driver.cpp @@ -49,7 +49,11 @@ int entry(int argc, char **argv) std::cout << "Dump: " << tflite_path << std::endl << std::endl; - std::cout << tflmodel << std::endl; + tfldump::ModelEx modelex; + modelex.model = tflmodel; + modelex.rawdata = &modelData; + + std::cout << modelex << std::endl; return 0; } diff --git a/compiler/tfldump/include/tfldump/Dump.h b/compiler/tfldump/include/tfldump/Dump.h index af04bb1327d..c352967887b 100644 --- a/compiler/tfldump/include/tfldump/Dump.h +++ b/compiler/tfldump/include/tfldump/Dump.h @@ -24,9 +24,16 @@ namespace tfldump { -void dump_model(std::ostream &os, const tflite::Model *model); -} +struct ModelEx +{ + const tflite::Model *model; + const std::vector *rawdata; +}; + +void dump_model(std::ostream &os, const ModelEx &model); + +} // namespace tfldump -std::ostream &operator<<(std::ostream &os, const tflite::Model *model); +std::ostream &operator<<(std::ostream &os, const tfldump::ModelEx &model); #endif // __TFLDUMP_DUMP_H__ diff --git a/compiler/tfldump/src/Dump.cpp b/compiler/tfldump/src/Dump.cpp index 7139f9ca46c..34e0293ff01 100644 --- a/compiler/tfldump/src/Dump.cpp +++ b/compiler/tfldump/src/Dump.cpp @@ -340,9 +340,9 @@ void dump_sub_graph(std::ostream &os, tflread::Reader &reader) os << std::endl; } -void dump_model(std::ostream &os, const tflite::Model *model) +void dump_model(std::ostream &os, const tflite::Model *model, const std::vector *rawdata) { - tflread::Reader reader(model); + tflread::Reader reader(model, rawdata); uint32_t num_subgraph = reader.num_subgraph(); @@ -376,13 +376,17 @@ void dump_model(std::ostream &os, const tflite::Model *model) os << std::endl; // dump buffer - os << "Buffers: B(index) (length) values, if any" << std::endl; + os << "Buffers: B(index) (length) values, if any; (length *) for large" << std::endl; for (uint32_t i = 0; i < buffers->size(); ++i) { + bool is_large; const uint8_t *buff_data; - size_t size = reader.buffer_info(i, &buff_data); + size_t size = reader.buffer_info(i, &buff_data, is_large); - os << "B(" << i << ") (" << size << ") "; + os << "B(" << i << ") (" << size; + if (is_large) + os << " *"; + os << ") "; if (buff_data != nullptr) { dump_buffer(os, buff_data, size, 16); @@ -450,8 +454,8 @@ void dump_model(std::ostream &os, const tflite::Model *model) } // namespace tfldump -std::ostream &operator<<(std::ostream &os, const tflite::Model *model) +std::ostream &operator<<(std::ostream &os, const tfldump::ModelEx &modelex) { - tfldump::dump_model(os, model); + tfldump::dump_model(os, modelex.model, modelex.rawdata); return os; } diff --git a/compiler/tfldump/src/Read.cpp b/compiler/tfldump/src/Read.cpp index f55d86dda5b..d1b0899ae19 100644 --- a/compiler/tfldump/src/Read.cpp +++ b/compiler/tfldump/src/Read.cpp @@ -18,14 +18,17 @@ #include +#include #include #include namespace tflread { -Reader::Reader(const tflite::Model *model) +Reader::Reader(const tflite::Model *model, const std::vector *rawdata) { + _rawdata = rawdata; + _version = model->version(); _subgraphs = model->subgraphs(); _buffers = model->buffers(); @@ -39,16 +42,24 @@ Reader::Reader(const tflite::Model *model) } } -size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data) +size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data, bool &is_large) { *buff_data = nullptr; + is_large = false; if (buf_idx == 0) return 0; if (auto *buffer = (*_buffers)[buf_idx]) { - if (auto *array = buffer->data()) + auto buffer_offset = buffer->offset(); + if (buffer->offset() > 1) + { + is_large = true; + *buff_data = reinterpret_cast(&_rawdata->at(buffer_offset)); + return buffer->size(); + } + else if (auto *array = buffer->data()) { if (size_t size = array->size()) { @@ -56,6 +67,13 @@ size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data) return size; } } + else + { + if (buffer->offset() == 1 && buffer->size() == 1) + { + std::cerr << "Buffer " << buf_idx << " is invalid large buffer." << std::endl; + } + } } return 0; diff --git a/compiler/tfldump/src/Read.h b/compiler/tfldump/src/Read.h index fb4d330e70d..22be0b2424a 100644 --- a/compiler/tfldump/src/Read.h +++ b/compiler/tfldump/src/Read.h @@ -50,7 +50,7 @@ class Reader using TFliteSignatureDef_t = flatbuffers::Vector>; public: - Reader(const tflite::Model *model); + Reader(const tflite::Model *model, const std::vector *rawdata); Reader() = delete; @@ -68,7 +68,7 @@ class Reader uint32_t num_subgraph() const { return _subgraphs->size(); } - size_t buffer_info(uint32_t buf_idx, const uint8_t **buff_data); + size_t buffer_info(uint32_t buf_idx, const uint8_t **buff_data, bool &is_large); tflite::BuiltinOperator builtin_code(const tflite::Operator *op) const; std::string opcode_name(const tflite::Operator *op) const; @@ -80,6 +80,8 @@ class Reader private: uint32_t _version; + const std::vector *_rawdata{nullptr}; + const TFliteSubGraphs_t *_subgraphs{nullptr}; const TFliteBuffers_t *_buffers{nullptr}; const TFliteTensors_t *_tensors{nullptr}; diff --git a/compiler/tflite2circle/driver/Driver.cpp b/compiler/tflite2circle/driver/Driver.cpp index 6afe1b0f272..ab4b9566342 100644 --- a/compiler/tflite2circle/driver/Driver.cpp +++ b/compiler/tflite2circle/driver/Driver.cpp @@ -67,10 +67,12 @@ int entry(int argc, char **argv) auto flatbuffer_builder = std::make_unique(1024); // convert tflite to circle - tflite2circle::CircleModel circle_model{flatbuffer_builder}; + const std::vector &raw_data = tfl_model.raw_data(); + tflite2circle::CircleModel circle_model{flatbuffer_builder, raw_data}; circle_model.load_offsets(tfl_model.get_model()); circle_model.model_build(); + circle_model.finalize(); std::ofstream outfile{circle_path, std::ios::binary}; diff --git a/compiler/tflite2circle/include/CircleModel.h b/compiler/tflite2circle/include/CircleModel.h index 189cfaff23f..5e9f0335bed 100644 --- a/compiler/tflite2circle/include/CircleModel.h +++ b/compiler/tflite2circle/include/CircleModel.h @@ -55,6 +55,9 @@ struct MetaDataBufferLink using CIR = int32_t; }; +using BufferData = std::vector; +using MapBufferData = std::map; + template class Offset { private: @@ -68,6 +71,8 @@ template class Offset public: void set_signature_defs(const SignatureDefs *offset) { _tfl_signature_def_offsets = offset; } + void set_buffer_data_map(MapBufferData *map) { _buffer_data_map = map; } + void set_file_raw(const std::vector *raw) { _file_raw = raw; } public: void build(const TFLFlatBufVec *tflite_flatbuffer_vec); @@ -80,6 +85,9 @@ template class Offset CIRFlatBufVecOffset _circle_flatbuffer_vec_offset; // TODO revise this when Circle supports SignatureDef const SignatureDefs *_tfl_signature_def_offsets = nullptr; + // for extended buffer for size > 2G + const std::vector *_file_raw = nullptr; + MapBufferData *_buffer_data_map = nullptr; }; class CircleModel @@ -89,11 +97,12 @@ class CircleModel public: CircleModel(void) = delete; - CircleModel(FlatBufBuilder &fb); + CircleModel(FlatBufBuilder &fb, const std::vector &fr); public: void load_offsets(const tflite::Model *tfl_model); void model_build(void) const; + void finalize(void); const char *base(void) const; size_t size(void) const; @@ -101,10 +110,14 @@ class CircleModel uint32_t _version; Description _description; FlatBufBuilder &_fb; + const std::vector &_file_raw; std::unique_ptr> _operator_codes_offset; std::unique_ptr> _subGraphs_offset; std::unique_ptr> _buffers_offset; std::unique_ptr> _metadata_buffer_offset; + + MapBufferData _buffer_data_map; + std::string _fb_data_with_ext; }; } // namespace tflite2circle diff --git a/compiler/tflite2circle/include/TFLModel.h b/compiler/tflite2circle/include/TFLModel.h index 507667bb903..111a5b9fefc 100644 --- a/compiler/tflite2circle/include/TFLModel.h +++ b/compiler/tflite2circle/include/TFLModel.h @@ -38,6 +38,8 @@ class TFLModel public: const tflite::Model *get_model(void); + // NOTE TFLModel lifetime should be longer than users + const std::vector &raw_data(void) const { return _data; } public: bool verify_data(void); diff --git a/compiler/tflite2circle/src/CircleModel.cpp b/compiler/tflite2circle/src/CircleModel.cpp index c9465a0c615..5195f1cc291 100644 --- a/compiler/tflite2circle/src/CircleModel.cpp +++ b/compiler/tflite2circle/src/CircleModel.cpp @@ -43,15 +43,36 @@ template <> void Offset::build(const TFLFlatBufVec *tflite_flatbuffe for (auto it : *tflite_flatbuffer_vec) { flatbuffers::Offset> buffer_data; - if (it->data()) + const auto tflbuff_data = it->data(); + const auto tflbuff_offset = it->offset(); + const auto tflbuff_size = it->size(); + if (tflbuff_offset > 1) { - std::vector data_vec{it->data()->begin(), it->data()->end()}; - buffer_data = _fb->CreateVector(data_vec); + assert(_buffer_data_map && _file_raw); + if (_buffer_data_map && _file_raw) + { + int32_t buffer_index = buffers_vec.size(); + + auto *file_data_ptr = reinterpret_cast(_file_raw->data()) + tflbuff_offset; + std::vector buffer_data(file_data_ptr, file_data_ptr + tflbuff_size); + _buffer_data_map->emplace(buffer_index, buffer_data); + + auto buffer = circle::CreateBuffer(*_fb.get(), 0, 1, 1); + buffers_vec.emplace_back(buffer); + } + } + else + { + if (tflbuff_data) + { + std::vector data_vec{tflbuff_data->begin(), tflbuff_data->end()}; + buffer_data = _fb->CreateVector(data_vec); + } + circle::BufferBuilder circle_buffer_builder{*_fb}; + circle_buffer_builder.add_data(buffer_data); + auto circle_buffers = circle_buffer_builder.Finish(); + buffers_vec.emplace_back(circle_buffers); } - circle::BufferBuilder circle_buffer_builder{*_fb}; - circle_buffer_builder.add_data(buffer_data); - auto circle_buffers = circle_buffer_builder.Finish(); - buffers_vec.emplace_back(circle_buffers); } _circle_flatbuffer_vec_offset = _fb->CreateVector(buffers_vec); } @@ -376,8 +397,8 @@ template <> void Offset::build(const TFLFlatBufVec *tflite_fla _circle_flatbuffer_vec_offset = _fb->CreateVector(operator_code_vec); } -CircleModel::CircleModel(FlatBufBuilder &fb) - : _version{0}, _description{fb->CreateString("ONE-tflite2circle")}, _fb{fb} +CircleModel::CircleModel(FlatBufBuilder &fb, const std::vector &fr) + : _version{0}, _description{fb->CreateString("ONE-tflite2circle")}, _fb{fb}, _file_raw{fr} { // NOTHING TODO } @@ -390,6 +411,8 @@ void CircleModel::load_offsets(const tflite::Model *tfl_model) _metadata_buffer_offset = std::make_unique>(_fb); _subGraphs_offset->set_signature_defs(tfl_model->signature_defs()); + _buffers_offset->set_buffer_data_map(&_buffer_data_map); + _buffers_offset->set_file_raw(&_file_raw); _operator_codes_offset->build(tfl_model->operator_codes()); _subGraphs_offset->build(tfl_model->subgraphs()); @@ -412,11 +435,79 @@ void CircleModel::model_build(void) const circle::FinishModelBuffer(*_fb, model); } +void CircleModel::finalize(void) +{ + if (_buffer_data_map.empty()) + return; + + auto align16 = [](size_t &v) { + while (v % 16 != 0) + v++; + }; + + // get total memory for flatbuffer + all buffer_data + size_t result_size = _fb->GetSize(); + align16(result_size); + for (auto &it : _buffer_data_map) + { + BufferData &buffer_data = it.second; + result_size += buffer_data.size(); + align16(result_size); + } + align16(result_size); + result_size += 16; // for safety + + std::string result; + const char *buff_ptr = reinterpret_cast(_fb->GetBufferPointer()); + + auto padalign16 = [](std::string &str) { + while (str.size() % 16 != 0) + str += '\0'; + }; + + result.reserve(result_size); + result.append(buff_ptr, _fb->GetSize()); + + if (_buffer_data_map.size() > 0) + { + auto mutable_model = circle::GetMutableModel(result.data()); + auto mutable_buffers = mutable_model->mutable_buffers(); + + // pad to be 16 bytes aligned + padalign16(result); + for (auto &it : _buffer_data_map) + { + int32_t buffer_index = it.first; + BufferData &buffer_data = it.second; + uint64_t offset = result.size(); + uint64_t size = buffer_data.size(); + + circle::Buffer *mutable_buffer = mutable_buffers->GetMutableObject(buffer_index); + mutable_buffer->mutate_offset(offset); + mutable_buffer->mutate_size(size); + + result.append(buffer_data.begin(), buffer_data.end()); + padalign16(result); + } + padalign16(result); + } + + // use final result + _fb_data_with_ext = result; +} + const char *CircleModel::base(void) const { - return reinterpret_cast(_fb->GetBufferPointer()); + if (_buffer_data_map.empty()) + return reinterpret_cast(_fb->GetBufferPointer()); + return reinterpret_cast(_fb_data_with_ext.data()); } -size_t CircleModel::size(void) const { return _fb->GetSize(); } +size_t CircleModel::size(void) const +{ + if (_buffer_data_map.empty()) + return _fb->GetSize(); + return _fb_data_with_ext.size(); +} } // namespace tflite2circle diff --git a/infra/cmake/packages/FlatBuffers-23.5.26/FlatBuffersConfig.cmake b/infra/cmake/packages/FlatBuffers-23.5.26/FlatBuffersConfig.cmake index d98882a32cc..cc77b844d1d 100644 --- a/infra/cmake/packages/FlatBuffers-23.5.26/FlatBuffersConfig.cmake +++ b/infra/cmake/packages/FlatBuffers-23.5.26/FlatBuffersConfig.cmake @@ -132,4 +132,53 @@ if(FlatBuffers_FOUND) target_include_directories(${TGT} PUBLIC "${ARG_INCLUDE_DIR}") target_link_libraries(${TGT} PUBLIC flatbuffers-23.5.26) endfunction(FlatBuffers_Target) + + function(FlatBuffersMuteable_Target TGT) + set(oneValueArgs OUTPUT_DIR SCHEMA_DIR INCLUDE_DIR) + set(multiValueArgs SCHEMA_FILES) + cmake_parse_arguments(ARG "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + # Use OUTPUT_DIR as INCLUDE_DIR if INCLUDE_DIR is not specified + if(NOT ARG_INCLUDE_DIR) + set(ARG_INCLUDE_DIR ${ARG_OUTPUT_DIR}) + endif(NOT ARG_INCLUDE_DIR) + + get_filename_component(abs_output_dir ${ARG_OUTPUT_DIR} ABSOLUTE) + get_filename_component(abs_include_dir ${ARG_INCLUDE_DIR} ABSOLUTE) + get_filename_component(abs_schema_dir ${ARG_SCHEMA_DIR} ABSOLUTE) + + # Let's reset list variables before using them + # NOTE THIS DOES NOT AFFECT parent scope + unset(SCHEMA_FILES) + unset(OUTPUT_FILES) + + foreach(schema ${ARG_SCHEMA_FILES}) + get_filename_component(schema_fn "${schema}" NAME) + get_filename_component(dir "${schema}" DIRECTORY) + + get_filename_component(schema_fn_we "${schema_fn}" NAME_WE) + + list(APPEND SCHEMA_FILES "${abs_schema_dir}/${schema}") + list(APPEND OUTPUT_FILES "${abs_output_dir}/${schema_fn_we}_generated.h") + endforeach() + + # Generate headers + add_custom_command(OUTPUT ${OUTPUT_FILES} + COMMAND ${CMAKE_COMMAND} -E make_directory "${abs_output_dir}" + COMMAND "${FLATC_PATH}" -c --no-includes + --no-union-value-namespacing + --gen-object-api + --gen-mutable + -o "${abs_output_dir}" + ${SCHEMA_FILES} + DEPENDS ${SCHEMA_FILES} + COMMENT "Generate '${TGT}' headers") + + # NOTE This header-only library is deliberately declared as STATIC library + # to avoid possible scope issues related with generated files + add_library(${TGT} STATIC ${OUTPUT_FILES}) + set_target_properties(${TGT} PROPERTIES LINKER_LANGUAGE CXX) + target_include_directories(${TGT} PUBLIC "${ARG_INCLUDE_DIR}") + target_link_libraries(${TGT} PUBLIC flatbuffers-23.5.26) + endfunction(FlatBuffersMuteable_Target) endif(FlatBuffers_FOUND) diff --git a/res/TensorFlowLiteRecipes/Conv2D_006/test.recipe b/res/TensorFlowLiteRecipes/Conv2D_006/test.recipe new file mode 100644 index 00000000000..d4da00b9026 --- /dev/null +++ b/res/TensorFlowLiteRecipes/Conv2D_006/test.recipe @@ -0,0 +1,52 @@ +# test to store as buffer data to outside of flatbuffer + +operand { + name: "ifm" + type: FLOAT32 + shape { dim: 1 dim: 4 dim: 3 dim: 2 } +} +operand { + name: "ker" + type: FLOAT32 + shape { dim: 2 dim: 2 dim: 2 dim: 2 } + filler { + tag: "explicit" + arg: "1" arg: "2" arg: "-3" arg: "-4" + arg: "-5" arg: "6" arg: "-7" arg: "8" + arg: "4" arg: "-2" arg: "3" arg: "-1" + arg: "-8" arg: "-6" arg: "7" arg: "5" + } +} +operand { + name: "bias" + type: FLOAT32 + shape { dim: 2 } + filler { + tag: "explicit" + arg: "1" + arg: "2" + } +} +operand { + name: "ofm" + type: FLOAT32 + shape { dim: 1 dim: 2 dim: 2 dim: 2 } +} +operation { + type: "Conv2D" + conv2d_options { + padding: VALID + stride_w: 1 + stride_h: 2 + dilation_w_factor: 1 + dilation_h_factor: 1 + activation: RELU + } + input: "ifm" + input: "ker" + input: "bias" + output: "ofm" +} +input: "ifm" +output: "ofm" +ext_offset: true