Skip to content

Commit

Permalink
[application] add onnx example
Browse files Browse the repository at this point in the history
added basic onnx application example.

It loads `add_example.onnx` file and create nntrainer network graph.
(network structure is "input + bias = output")

Output of this example is as below:

================================================================================
          Layer name          Layer type    Output dimension         Input layer
================================================================================
               input               input             1:1:1:2
--------------------------------------------------------------------------------
                bias              weight             1:1:1:2
--------------------------------------------------------------------------------
                 add                 add             1:1:1:2               input
                                                                            bias
================================================================================

**Self evaluation:**
Build test: [x]Passed [ ]Failed [ ]Skipped
Run test: [x]Passed [ ]Failed [ ]Skipped

Signed-off-by: Seungbaek Hong <[email protected]>
  • Loading branch information
baek2sm committed Feb 26, 2025
1 parent 780f02d commit 97d6e12
Show file tree
Hide file tree
Showing 7 changed files with 145 additions and 47 deletions.
60 changes: 60 additions & 0 deletions Applications/ONNX/jni/Android.mk
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
LOCAL_PATH := $(call my-dir)

include $(CLEAR_VARS)

# ndk path
ifndef ANDROID_NDK
$(error ANDROID_NDK is not defined!)
endif

ifndef NNTRAINER_ROOT
NNTRAINER_ROOT := $(LOCAL_PATH)/../../..
endif

ML_API_COMMON_INCLUDES := ${NNTRAINER_ROOT}/ml_api_common/include
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer \
$(NNTRAINER_ROOT)/nntrainer/dataset \
$(NNTRAINER_ROOT)/nntrainer/models \
$(NNTRAINER_ROOT)/nntrainer/layers \
$(NNTRAINER_ROOT)/nntrainer/compiler \
$(NNTRAINER_ROOT)/nntrainer/graph \
$(NNTRAINER_ROOT)/nntrainer/optimizers \
$(NNTRAINER_ROOT)/nntrainer/tensor \
$(NNTRAINER_ROOT)/nntrainer/utils \
$(NNTRAINER_ROOT)/nntrainer/converter \
$(NNTRAINER_ROOT)/api \
$(NNTRAINER_ROOT)/api/ccapi/include \
${ML_API_COMMON_INCLUDES}

LOCAL_MODULE := nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libnntrainer.so

include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)

LOCAL_MODULE := ccapi-nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libccapi-nntrainer.so

include $(PREBUILT_SHARED_LIBRARY)

include $(CLEAR_VARS)

LOCAL_ARM_NEON := true
LOCAL_CFLAGS += -std=c++17 -Ofast -mcpu=cortex-a53 -Ilz4-nougat/lib
LOCAL_LDFLAGS += -Llz4-nougat/lib/obj/local/$(TARGET_ARCH_ABI)/
LOCAL_CXXFLAGS += -std=c++17 -frtti
LOCAL_CFLAGS += -pthread -fexceptions -fopenmp
LOCAL_LDFLAGS += -fexceptions
LOCAL_MODULE_TAGS := optional
LOCAL_ARM_MODE := arm
LOCAL_MODULE := nntrainer_onnx_example
LOCAL_LDLIBS := -llog -landroid -fopenmp

LOCAL_SRC_FILES := main.cpp

LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer

LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES)

include $(BUILD_EXECUTABLE)
4 changes: 4 additions & 0 deletions Applications/ONNX/jni/Application.mk
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
APP_ABI := arm64-v8a
APP_STL := c++_shared
APP_PLATFORM := android-29
APP_SUPPORT_FLEXIBLE_PAGE_SIZES := true
14 changes: 14 additions & 0 deletions Applications/ONNX/jni/add_example.onnx
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
pytorch2.3.1:{

input
biasoutput/Add"Add
main_graph*BbiasJ�B�?ɉ.�Z
input


b
output



B
35 changes: 35 additions & 0 deletions Applications/ONNX/jni/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
#include <iostream>
#include <layer.h>
#include <model.h>
#include <nntrainer-api-common.h>
#include <onnx.h>
#include <optimizer.h>
#include <sstream>
#include <util_func.h>

using ModelHandle = std::unique_ptr<ml::train::Model>;

int main() {
ModelHandle model = ml::train::loadONNX("../../../../Applications/ONNX/"
"jni/add_example.onnx");

model->setProperty({nntrainer::withKey("batch_size", 1)});

try {
model->compile();
} catch (const std::exception &e) {
std::cerr << "Error during compile: " << e.what() << "\n";
return 1;
}

try {
model->initialize();
} catch (const std::exception &e) {
std::cerr << "Error during initialize: " << e.what() << "\n";
return 1;
}

model->summarize(std::cout, ML_TRAIN_SUMMARY_MODEL);

return 0;
}
21 changes: 21 additions & 0 deletions Applications/ONNX/jni/meson.build
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
onnx_example_sources = [
'main.cpp',
]

onnx_example_dependencies = [app_utils_dep,
iniparser_dep,
nntrainer_dep,
nntrainer_ccapi_dep
]

if get_option('enable-test')
onnx_example_dependencies += [gtest_dep]
endif

e = executable('nntrainer_onnx_example',
onnx_example_sources,
include_directories: [include_directories('.')],
dependencies: onnx_example_dependencies,
install: get_option('install-app'),
install_dir: application_install_dir
)
2 changes: 1 addition & 1 deletion Applications/meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -29,5 +29,5 @@ if get_option('enable-tflite-backbone')
subdir('SimpleShot')
endif
subdir('PicoGPT/jni')

subdir('ONNX/jni')
subdir('SimpleFC/jni')
56 changes: 10 additions & 46 deletions nntrainer/schema/onnx_interpreter.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,51 +16,14 @@

#include <app_context.h>
#include <interpreter.h>
#include <iostream>
#include <layer.h>
#include <layer_node.h>
#include <model.h>
#include <nntrainer-api-common.h>
#include <onnx.pb.h>
#include <string>

/**
* @brief make "key=value" from key and value
*
* @tparam T type of a value
* @param key key
* @param value value
* @return std::string with "key=value"
*/
template <typename T>
static std::string withKey(const std::string &key, const T &value) {
std::stringstream ss;
ss << key << "=" << value;
return ss.str();
}

/**
* @brief make "key=value1,value2,...valueN" from key and multiple values
*
* @tparam T type of a value
* @param key key
* @param value list of values
* @return std::string with "key=value1,value2,...valueN"
*/
template <typename T>
static std::string withKey(const std::string &key,
std::initializer_list<T> value) {
if (std::empty(value)) {
throw std::invalid_argument("empty data cannot be converted");
}
std::stringstream ss;
ss << key << "=";
auto iter = value.begin();
for (; iter != value.end() - 1; ++iter) {
ss << *iter << ',';
}
ss << *iter;
return ss.str();
}
#include <util_func.h>

namespace nntrainer {
/**
Expand Down Expand Up @@ -103,8 +66,9 @@ class ONNXInterpreter {

// weight layer should be modified not to use input_shape as a parameter
layers.push_back(ml::train::createLayer(
"weight", {withKey("name", cleanName(initializer.name())),
withKey("dim", dim), withKey("input_shape", dim)}));
"weight", {nntrainer::withKey("name", cleanName(initializer.name())),
nntrainer::withKey("dim", dim),
nntrainer::withKey("input_shape", dim)}));
}

// Create input & constant tensor layer
Expand All @@ -120,8 +84,8 @@ class ONNXInterpreter {
if (input.name().find("input") !=
std::string::npos) { // Create input layer
layers.push_back(ml::train::createLayer(
"input", {withKey("name", cleanName(input.name())),
withKey("input_shape", dim)}));
"input", {nntrainer::withKey("name", cleanName(input.name())),
nntrainer::withKey("input_shape", dim)}));
} else { // Create constant tensor layer
throw std::runtime_error("Constant tensors are not supported yet.");
}
Expand Down Expand Up @@ -152,9 +116,9 @@ class ONNXInterpreter {
{cleanName(node.output()[0]), cleanName(node.name())});

layers.push_back(ml::train::createLayer(
"add",
{"name=" + cleanName(node.name()),
withKey("input_layers", inputNames[0] + "," + inputNames[1])}));
"add", {"name=" + cleanName(node.name()),
nntrainer::withKey("input_layers",
inputNames[0] + "," + inputNames[1])}));
} else {
throw std::runtime_error("Unsupported operation type: " +
node.op_type());
Expand Down

0 comments on commit 97d6e12

Please sign in to comment.