From d03d16431ea93021d077d5553468d7cae149a875 Mon Sep 17 00:00:00 2001 From: SaeHie Park Date: Mon, 10 Feb 2025 18:24:55 +0900 Subject: [PATCH] DRAFT initial circle-mlir project on-going draft to introduce initial circle-mlir project. Signed-off-by: SaeHie Park --- .github/workflows/run-circle-mlir-build.yml | 13 +- circle-mlir/Makefile.aa | 22 + circle-mlir/Makefile.sample | 72 ++ circle-mlir/README.md | 172 +++ circle-mlir/circle-mlir/CMakeLists.txt | 3 + circle-mlir/circle-mlir/lib/CMakeLists.txt | 1 + .../circle-mlir/lib/dialect/CMakeLists.txt | 1 + .../circle-mlir/lib/dialect/mlir/CircleOps.td | 38 + .../lib/dialect/src/CircleDialect.cpp | 2 +- .../lib/dialect/src/ShapeInference.cpp | 17 +- .../circle-mlir/lib/dialect/src/ops/AddOp.h | 66 + .../circle-mlir/lib/import/CMakeLists.txt | 1 + .../include/circle-mlir/import/CircleImport.h | 47 + .../lib/import/src/CircleImport.cpp | 1080 +++++++++++++++++ .../circle-mlir/lib/pass/CMakeLists.txt | 21 + .../include/circle-mlir/pass/CirclePass.h | 41 + .../circle-mlir/lib/pass/src/CirclePass.cpp | 207 ++++ .../lib/pass/src/ConvertHelper.cpp | 390 ++++++ .../circle-mlir/lib/pass/src/ConvertHelper.h | 162 +++ .../lib/pass/src/ConvertONNXToCirclePass.cpp | 158 +++ .../lib/pass/src/ConvertONNXToCirclePass.h | 32 + .../lib/pass/src/DumpCircleOpsPass.cpp | 48 + .../lib/pass/src/DumpCircleOpsPass.h | 65 + .../lib/pass/src/RewriteCirclePass.cpp | 106 ++ .../lib/pass/src/RewriteCirclePass.h | 32 + .../lib/pass/src/RewriteONNXPass.cpp | 77 ++ .../lib/pass/src/RewriteONNXPass.h | 32 + .../lib/pass/src/RuntimeVerifyPass.cpp | 62 + .../lib/pass/src/RuntimeVerifyPass.h | 33 + .../lib/pass/src/ShapeInferencePass.cpp | 272 +++++ .../lib/pass/src/ShapeInferencePass.h | 35 + .../pass/src/onnx/CompactReshapeConvReshape.h | 173 +++ circle-mlir/circle-mlir/tools/CMakeLists.txt | 1 + .../tools/onnx2circle/CMakeLists.txt | 45 + .../tools/onnx2circle/TestModels.cmake | 113 ++ .../tools/onnx2circle/src/cmdOptions.h | 34 + .../tools/onnx2circle/src/driverDebug.cpp | 170 +++ .../tools/onnx2circle/src/driverRelease.cpp | 135 +++ .../tools/onnx2circle/src/onnx2circle.cpp | 207 ++++ .../tools/onnx2circle/src/onnx2circle.h | 40 + .../onnx2circle/src/onnx2circle.test.cpp | 56 + .../circle-mlir/tools/onnx2circle/test.lst | 4 + circle-mlir/externals/CMakeLists.txt | 55 +- 43 files changed, 4336 insertions(+), 5 deletions(-) create mode 100644 circle-mlir/Makefile.aa create mode 100644 circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h create mode 100644 circle-mlir/circle-mlir/lib/import/include/circle-mlir/import/CircleImport.h create mode 100644 circle-mlir/circle-mlir/lib/import/src/CircleImport.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/CMakeLists.txt create mode 100644 circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp create mode 100644 circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h create mode 100644 circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h create mode 100644 circle-mlir/circle-mlir/tools/CMakeLists.txt create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp create mode 100644 circle-mlir/circle-mlir/tools/onnx2circle/test.lst diff --git a/.github/workflows/run-circle-mlir-build.yml b/.github/workflows/run-circle-mlir-build.yml index fdeb27709d7..5585a895ede 100644 --- a/.github/workflows/run-circle-mlir-build.yml +++ b/.github/workflows/run-circle-mlir-build.yml @@ -35,6 +35,7 @@ jobs: include: - ubuntu_code: jammy ubuntu_vstr: u2204 + one_comp_ver: 1.29.0 runs-on: ubuntu-latest @@ -45,11 +46,19 @@ jobs: name: circle-mlir ${{ matrix.ubuntu_vstr }} ${{ matrix.type }} test steps: + # TODO prepare circle-interpreter Debian package and install + - name: Install one-compiler + run: | + cd /var/tmp + ONE_COMPILER=one-compiler-${{ matrix.ubuntu_code }}_${{ matrix.one_comp_ver }}_amd64.deb + wget https://github.com/Samsung/ONE/releases/download/${{ matrix.one_comp_ver }}/${ONE_COMPILER} + ls -al . + dpkg -i ${ONE_COMPILER} + ls -al /usr/share/one/bin + - name: Checkout uses: actions/checkout@v4 - # TODO download circle-interpreter - # NOTE Docker image has pre-installed submodules in /workdir # NOTE Docker image has pre-installed python packages - name: Configure diff --git a/circle-mlir/Makefile.aa b/circle-mlir/Makefile.aa new file mode 100644 index 00000000000..f1768e015b9 --- /dev/null +++ b/circle-mlir/Makefile.aa @@ -0,0 +1,22 @@ +all: cfg debug test install + +cfg: + Python3_ROOT_DIR=/usr/bin cmake -B build/debug -S ./ \ + -DCMAKE_INSTALL_PREFIX=build/debug.install \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCIRCLE_MLIR_WORKDIR=/workdir + +cc: + Python3_ROOT_DIR=/usr/bin cmake -B build/debug -S ./ \ + -DCMAKE_INSTALL_PREFIX=build/debug.install \ + -DCMAKE_BUILD_TYPE=Debug + +debug: + cmake --build build/debug -j4 + +test: + CTEST_OUTPUT_ON_FAILURE=1 cmake --build build/debug --verbose -- test + +install: + cmake --build build/debug -j4 -- install + diff --git a/circle-mlir/Makefile.sample b/circle-mlir/Makefile.sample index 4b12f2aee34..0c77cc7b856 100644 --- a/circle-mlir/Makefile.sample +++ b/circle-mlir/Makefile.sample @@ -15,8 +15,12 @@ endif # TODO error handle if not found PYTHON3_PATH=$(shell dirname $(PYTHON3_CMD)) +# NOTE CIRCLEM_LIR_XXX is used for CMakeLists +# CIRCLEMLIR_XXX is used in this Makefile + CIRCLEMLIR_BUILD_DEBUG?=build/debug CIRCLEMLIR_BUILD_REL?=build/release +CIRCLEMLIR_BUILD_COV?=build/coverage CIRCLEMLIR_EXTS_DEBUG?=build/externals/debug CIRCLEMLIR_EXTS_REL?=build/externals/release @@ -40,7 +44,16 @@ help: @echo "make prepr : prepare externals for release (needed only once)" @echo "make cfgr : configure circle-mlir for release build" @echo "make rel : build for release" + @echo "make prepcov : prepare submodules for coverage test (needed only once)" + @echo "make cfgcov : configure circle-mlir for debug build with coverage test" + @echo "make debugcov : build for test coverage" + @echo "make testcov : run coverage test" + @echo "make gencov : generate test coverage report" + @echo "make cleancov : clean test coverage build" @echo "make testr : test for release" + @echo "make cfgdi : configure circle-mlir for debug build in Docker image" + @echo "make cfgcovdi : configure circle-mlir for debug build with coverage test in Docker image" + @echo "make cfgri : configure circle-mlir for release build in Docker image" @echo "make cleanr : clean release build" @echo "make cleanall : clean all build including overlay, externals" @@ -86,6 +99,38 @@ clean: rm -f $(CIRCLEMLIR_BUILD_DEBUG)/CMakeCache.txt rm -rf $(CIRCLEMLIR_BUILD_DEBUG)/circle-mlir/ +#------------------------------------------------------------------------------- +# for debug test coverage + +prepcov: _mkbuildcov + Python3_ROOT_DIR=$(CIRCLEMLIR_PY3_ROOT) \ + cmake -B $(CIRCLEMLIR_EXTS_DEBUG) -S ./externals -DCMAKE_BUILD_TYPE=Release + cmake --build $(CIRCLEMLIR_EXTS_DEBUG) -j$(CIRCLEMLIR_BUILD_JOBS) + +cfgcov: _mkbuildcov + cmake -B $(CIRCLEMLIR_BUILD_COV) -S ./ \ + -DCIRCLE_MLIR_EXTERNALS=$(CIRCLEMLIR_EXTS_DEBUG) \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DENABLE_COVERAGE=ON + +debugcov: + CM_PASS_DUMP=2 \ + cmake --build $(CIRCLEMLIR_BUILD_COV) -j$(CIRCLEMLIR_BUILD_JOBS) + +# NOTE to configure in Docker, use "make cfgcovdi" + +testcov: + CM_PASS_DUMP=2 \ + CTEST_OUTPUT_ON_FAILURE=1 \ + cmake --build $(CIRCLEMLIR_BUILD_COV) --verbose -- test + +gencov: + bash infra/tools/gen-coverage-report circle-mlir + +cleancov: + rm -f $(CIRCLEMLIR_BUILD_COV)/CMakeCache.txt + rm -rf $(CIRCLEMLIR_BUILD_COV)/circle-mlir/ + #------------------------------------------------------------------------------- # for release @@ -109,6 +154,33 @@ cleanr: rm -f $(CIRCLEMLIR_BUILD_REL)/CMakeCache.txt rm -rf $(CIRCLEMLIR_BUILD_REL)/circle-mlir/ +#------------------------------------------------------------------------------- +# for debug build in Docker +# +# no need to make for overlay, prep as prepared in Docker image +# run make for 'cfgdi' +# then make for 'debug', 'test' + +cfgdi: _mkbuild + cmake -B $(CIRCLEMLIR_BUILD_DEBUG) -S ./ \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCIRCLE_MLIR_WORKDIR=/workdir + +# for test converage build in Docker +cfgcovdi: _mkbuildcov + cmake -B $(CIRCLEMLIR_BUILD_COV) -S ./ \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DCIRCLE_MLIR_WORKDIR=/workdir \ + -DENABLE_COVERAGE=ON + +# for release build in Docker + +cfgri: _mkbuild + cmake -B $(CIRCLEMLIR_BUILD_REL) -S ./ \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DCIRCLE_MLIR_WORKDIR=/workdir #------------------------------------------------------------------------------- diff --git a/circle-mlir/README.md b/circle-mlir/README.md index 6fd75e5dda5..e5671fa04ac 100644 --- a/circle-mlir/README.md +++ b/circle-mlir/README.md @@ -1,3 +1,175 @@ # circle-mlir Circle MLIR dialect and tools + +## Tools provided + +_onnx2circle_ +- conversion tool of ONNX to Circle model for `compiler` +- to replace not-maintained-anymore onnx-tensorflow package + +## How to build + +Use provided `Makefile.sample` or create your own `Makefile` +``` +ln -s Makefile.sample Makefile +``` +- `Makefile` is in `.gitignore` to let developers use own Makefile. + +### Prerequisite + +``` +sudo apt-get install build-essential cmake git fakeroot +sudo apt-get install autoconf automake libtool unzip wget +sudo apt-get install devscripts debmake debhelper lcov +sudo apt-get install python3 python3-pip python3-venv python3-dev python3-all dh-python + +python3 -m pip install --upgrade pip setuptools +python3 -m pip install yapf==0.43.0 numpy==1.26.4 h5py==3.8.0 einops +``` + +### Prepare externals + +### Debug build + +Prepare overlay +``` +make overlay +``` + +Build submodules in venv +``` +source infra/overlay/venv/bin/activate +make prep +``` +NOTE `llvm-project` is built as `Debug` which may require 32G or more RAM. +- if build fails for some reason, please change back to + `-DCMAKE_BUILD_TYPE=Release` in `prep:` target in `Makefile.sample` file. +- build and test needs venv python packages. + +NOTE `overlay` and `submodules` builds are needed only once. + +Configure and build +``` +make cfg +make debug +``` + +Test build +``` +make test +``` +- optionally, set `ONE_COMPILER_ROOT` to alternate PATH for local ONE build + ``` + ONE_COMPILER_ROOT=/home/user/one/build/install make test + ``` + +To clean up existing build results +``` +make clean +``` + +To clean up also `overlay` and `submodules` +``` +make cleanall +``` +- NOTE when using `CIRCLE_MLIR_LOCALINST`, need to manually clean up this folder + +### Release build + +Release build is available as follows. +Others not mentioned are same as above Debug build. + +Build submodules in venv +``` +source infra/overlay/venv/bin/activate +make prepr +deactivate +``` + +Configure and build +``` +make cfgr +make rel +``` + +Test build +``` +make testr +``` + +### Test coverage + +To get test coverage report, run as following commands. +- assume you already have done `make overlay` and `make prepcov` +- you can skip `make prepcov` step if you are using local installation with `CIRCLE_MLIR_LOCALINST` +- or you can reuse `CIRCLE_MLIR_LOCALINST` for existing debug or release build submodules with +`cfgcov` target such as `CIRCLE_MLIR_LOCALINST=$(pwd)/build/debug/submodules make cfgcov` +``` +source infra/overlay/venv/bin/activate +make cfgcov +deactivate + +make debugcov +make testcov +make gencov +``` + +Open `converage/html/index.html` file in web browser to see the reports. + +To generate from second run and so on in your local machine, you will have to +remove existing files before running `gencov` +``` +rm -rf coverage +make gencov +``` + +To run this with Docker image, use `cfgcovdi` target instead of `cfgcov`. +``` +make cfgcovdi +make debugcov +make testcov +make gencov +``` + + +## Local format check + +Install prerequiste package. +``` +sudo apt-get install clang-format-12 python3 python3-pip +python3 -m pip install yapf==0.32.0 +``` + +Run format checker. +``` +bash ./infra/tools/format +``` +or with `Makefile` from `Makefile.sample` +``` +make format +``` + +## Dump debug logs + +To see logs during conversion with `onnx2circle` tool, set `CM_PASS_DUMP=1` for +preprocessing ONNX and ONNX to circle conversion, or set `CM_PASS_DUMP=2` to see +additional logs for circle rewrite. + +``` +CM_PASS_DUMP=2 onnx2circle input.onnx output.circle +``` + +You can give `-debug` option to see general MLIR logs or `-debug-only=o2c` +option to see only logs from onnx2circle. + +``` +onnx2circle -debug-only=o2c input.onnx output.circle +``` + +## TensorFlow source code + +Some source codes are referenced from TensorFlow and the file path is added to +inside our source. + +Current codes are from `v2.12.1` tag. diff --git a/circle-mlir/circle-mlir/CMakeLists.txt b/circle-mlir/circle-mlir/CMakeLists.txt index 17da97eaeec..4091ef8746b 100644 --- a/circle-mlir/circle-mlir/CMakeLists.txt +++ b/circle-mlir/circle-mlir/CMakeLists.txt @@ -1,3 +1,6 @@ include(UseMLIR) +include(UseAbseil) + add_subdirectory(lib) +add_subdirectory(tools) add_subdirectory(tools-test) diff --git a/circle-mlir/circle-mlir/lib/CMakeLists.txt b/circle-mlir/circle-mlir/lib/CMakeLists.txt index 66765821a81..5154e5499ac 100644 --- a/circle-mlir/circle-mlir/lib/CMakeLists.txt +++ b/circle-mlir/circle-mlir/lib/CMakeLists.txt @@ -4,5 +4,6 @@ add_subdirectory(arser) add_subdirectory(schema) add_subdirectory(dialect) add_subdirectory(utils) +add_subdirectory(pass) add_subdirectory(import) add_subdirectory(export) diff --git a/circle-mlir/circle-mlir/lib/dialect/CMakeLists.txt b/circle-mlir/circle-mlir/lib/dialect/CMakeLists.txt index a5c3ae90acf..c8fcf048469 100644 --- a/circle-mlir/circle-mlir/lib/dialect/CMakeLists.txt +++ b/circle-mlir/circle-mlir/lib/dialect/CMakeLists.txt @@ -18,6 +18,7 @@ target_include_directories(cirmlir_dialect PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/in # use generated files add_dependencies(cirmlir_dialect circle_mlir_gen_inc) target_include_directories(cirmlir_dialect PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) +target_link_libraries(cirmlir_dialect PUBLIC abseil_cpp) target_link_libraries(cirmlir_dialect PUBLIC circle_schema) target_link_libraries(cirmlir_dialect PUBLIC cirmlir_coverage) diff --git a/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td b/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td index 1c0989feec7..d2f07f977ca 100644 --- a/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td +++ b/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td @@ -296,6 +296,44 @@ class CIR_ConvOp($_op))">>, + ResultsBroadcastableShape, + DeclareOpInterfaceMethods, + Pure, + Commutative, + // TODO enable QuantizableResult, + ]> { + let summary = "Addition operator"; + + let description = [{ + Element-wise addition operation. + }]; + + let arguments = ( + // TODO add more dtypes + ins CIR_TensorOf<[F32, I32, I64]>:$lhs, + CIR_TensorOf<[F32, I32, I64]>:$rhs, + CIR_AFAttr:$fused_activation_function); + + let results = (outs CIR_TensorOf<[F32, I32, I64]>:$output); + + let hasFolder = 1; + + let hasCustomAssemblyFormat = 1; + + let extraClassDefinition = [{ + ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) { + return parseOneResultSameOperandTypeOp(parser, result); + } + void $cppClass::print(OpAsmPrinter &p) { + return printOneResultOp(getOperation(), p); + } + }]; + + let hasOptions = 1; +} def CIR_ConstOp : Op bool inferBinShapes(BINOP &op, SmallVector } // namespace -// TODO add AddOp +//===----------------------------------------------------------------------===// +// AddOp +//===----------------------------------------------------------------------===// + +void AddOp::inferShapes() +{ + AddOp op = *this; + SmallVector inferred; + if (!inferBinShapes(op, inferred)) + return; + + auto input0_op = getOperand(0); + auto input0_type = input0_op.getType().cast(); + RankedTensorType inferred_type = RankedTensorType::get(inferred, input0_type.getElementType()); + getResult().setType(inferred_type); +} } // namespace Circle } // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h b/circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h new file mode 100644 index 00000000000..89ea40fe1c9 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// from tensorflow/compiler/mlir/lite/ir/tfl_ops.cc + +#ifndef __CIRCLE_MLIR_DIALECT_OPS_ADD_OP_H__ +#define __CIRCLE_MLIR_DIALECT_OPS_ADD_OP_H__ + +#include "circle-mlir/dialect/CircleDialect.h" + +namespace mlir +{ +namespace Circle +{ + +// Return true if the given Add operation has the CPU kernel supported shapes. +bool VerifyAddOpShapeConstraints(AddOp op) +{ + auto element_type = getElementTypeOrSelf(op.getOutput().getType()); + + // Allows F32 and I32 outputs when the operands have valid shapes, + // which are broadcastable shapes up to four dimensions or have same shapes. + // TODO support Quantized Type + if (element_type.isF32() || IsI32Type(element_type) || IsI64Type(element_type)) + { + return VerifyOperandsHaveSameShapesOrBroadcastableShape( + /*op=*/op.getOperation(), /*indices=*/ArrayRef{0, 1}, + /*max_bcast_rank=*/4); + } + + return false; +} + +//===----------------------------------------------------------------------===// +// AddOp +//===----------------------------------------------------------------------===// + +OpFoldResult AddOp::fold(FoldAdaptor adaptor) +{ + auto operands = adaptor.getOperands(); + // TODO(b/142478136): Handle fused ops. + if (getFusedActivationFunction() != "NONE") + return {}; + return ConstFoldBinaryOp( + getType(), operands, [](APFloat a, APFloat b) { return a + b; }, + [](APInt a, APInt b) { return a + b; }); +} + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_DIALECT_OPS_ADD_OP_H__ diff --git a/circle-mlir/circle-mlir/lib/import/CMakeLists.txt b/circle-mlir/circle-mlir/lib/import/CMakeLists.txt index e0a3f1ae3a0..bbd1615061c 100644 --- a/circle-mlir/circle-mlir/lib/import/CMakeLists.txt +++ b/circle-mlir/circle-mlir/lib/import/CMakeLists.txt @@ -1,4 +1,5 @@ set(SRC + src/CircleImport.cpp src/CircleOperator.cpp ) diff --git a/circle-mlir/circle-mlir/lib/import/include/circle-mlir/import/CircleImport.h b/circle-mlir/circle-mlir/lib/import/include/circle-mlir/import/CircleImport.h new file mode 100644 index 00000000000..173d655a0d8 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/import/include/circle-mlir/import/CircleImport.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// from tensorflow/compiler/mlir/lite/flatbuffer_import.h + +#ifndef __CIRCLE_MLIR_IMPORT_CIRCLE_IMPORT_H__ +#define __CIRCLE_MLIR_IMPORT_CIRCLE_IMPORT_H__ + +#include +#include + +#include +#include +#include +#include +#include + +namespace mlir +{ +namespace Circle +{ + +mlir::OwningOpRef +FlatBufferToMlir(absl::string_view buffer, mlir::MLIRContext *context, mlir::Location base_loc, + bool use_external_constant = false, + const std::vector &ordered_input_arrays = {}, + const std::vector &ordered_output_arrays = {}, + bool experimental_prune_unreachable_nodes_unconditionally = false); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_IMPORT_CIRCLE_IMPORT_H__ diff --git a/circle-mlir/circle-mlir/lib/import/src/CircleImport.cpp b/circle-mlir/circle-mlir/lib/import/src/CircleImport.cpp new file mode 100644 index 00000000000..b40af2fa5e1 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/import/src/CircleImport.cpp @@ -0,0 +1,1080 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// from tensorflow/lite/core/macros.h +// maximum size of a valid flatbuffer +inline constexpr unsigned int flatbuffer_size_max = 2147483648; + +// from tensorflow/compiler/mlir/lite/flatbuffer_import.cc + +#include "circle-mlir/import/CircleImport.h" + +#include "CircleOperator.h" + +#include +#include +#include +#include + +#include +#include +#include +#include // llvm::join +#include // llvm::is_contained +#include +#include +#include // m_Constant +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#define ASSIGN_OR_RETURN(lhs, rexpr) \ + auto optvalue = (rexpr); \ + if (!optvalue.has_value()) \ + { \ + llvm::errs() << "Invalid value\n"; \ + return {}; \ + } \ + lhs = std::move(optvalue).value() + +namespace circle +{ + +// Node edge.second depends on node edge.first. +using ControlEdge = std::pair; +using ControlEdges = std::vector; + +} // namespace circle + +// from tensorflow/lite/experimental/remat/metadata_util.h +namespace circle +{ + +/// Control dependencies for the model is the collection of control dependencies +/// for its subgraphs. +using ModelControlDependencies = std::vector; + +} // namespace circle + +namespace mlir +{ +namespace Circle +{ + +namespace +{ + +// from tensorflow/compiler/mlir/lite/offset_buffer.h +inline bool IsValidBufferOffset(const int64_t offset) { return offset > 1; } + +// from tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h +constexpr StringRef kModelIndexPathAttr = "model.index_path"; +constexpr StringRef kModelExportedNamesAttr = "model.exported_names"; + +bool IsQuantized(const circle::TensorT &tensor) +{ + return (tensor.quantization != nullptr) && !tensor.quantization->zero_point.empty(); +} + +// Create the MLIR NamedLoc location corresponding to a given tensor +mlir::Location TensorLoc(const circle::TensorT &tensor, mlir::Builder builder, mlir::Location base) +{ + if (tensor.name.empty()) + { + return base; + } + return mlir::NameLoc::get(builder.getStringAttr(tensor.name), base); +} + +// Create the MLIR Location corresponding to a given op. This is an +// experimental/debugging feature and production code should not rely on names +// of intermediate tensors since importer doesn't guarantee to preserve tensor +// names except output tensors. +Location OpLoc(const circle::OperatorT &op, + const std::vector> &tensors, mlir::Builder builder, + mlir::Location base) +{ + if (op.outputs.empty()) + return base; + + llvm::SmallVector locations; + locations.reserve(op.outputs.size()); + for (auto tensor_index : op.outputs) + { + locations.push_back(TensorLoc(*tensors[tensor_index], builder, base)); + } + return mlir::FusedLoc::get(builder.getContext(), locations); +} + +std::optional GetTensorType(const circle::TensorT &tensor, Builder builder, + bool is_constant = false, + bool is_intermediate = false, + bool get_storage = false) +{ + mlir::Type elem_type = circle::ConvertElementType(tensor.type, builder); + if (tensor.type == circle::TensorType_VARIANT) + { + // TODO implement with mlir::TF::VariantType conversion + llvm::errs() << "NYI GetTensorType VARIANT.\n"; + return {}; + } + if (IsQuantized(tensor)) + { + llvm::errs() << "NYI GetTensorType Quantized.\n"; + return {}; + } + + // Intermediate tensors with calibration value (but not scale and zero points) + // should return calibrated quantized type. + if (is_intermediate && tensor.quantization != nullptr && !IsQuantized(tensor)) + { + llvm::errs() << "NYI GetTensorType Calibrated Quantized.\n"; + return {}; + } + + if (tensor.shape.empty() && (is_constant || tensor.has_rank)) + { + return RankedTensorType::get({}, elem_type); + } + + if (!tensor.shape_signature.empty()) + { + llvm::errs() << "NYI GetTensorType Tensor shape_signature.\n"; + return {}; + } + + if (!tensor.shape.empty()) + { + llvm::SmallVector shape(tensor.shape.begin(), tensor.shape.end()); + return GetTypeFromTensorShape(shape, elem_type); + } + + return UnrankedTensorType::get(elem_type); +} + +mlir::Operation *ConvertMinMaxToStatsOp(const circle::TensorT &tensor, mlir::OpBuilder b, + mlir::Value res) +{ + // TODO implement + (void)tensor; + (void)b; + (void)res; + return nullptr; +} + +// Returns true if this is a basic LSTM op. +bool IsBasicLSTMOp(circle::BuiltinOptionsUnion op_union) +{ + if (const auto *op = op_union.AsLSTMOptions()) + { + return op->kernel_type == circle::LSTMKernelType_BASIC; + } + else + { + return false; + } +} + +// Gets the MLIR op name with the dialect name for the flatbuffer operator. +std::string GetMlirOpName(const circle::OperatorT &op, const circle::OperatorCodeT &op_code) +{ + if (IsBasicLSTMOp(op.builtin_options)) + { + return std::string("Circle.basic_lstm"); + } + return GetMlirOpNameFromOpCode(op_code); +} + +// The buffers in Circle flatbuffers have their contents stored as a vector of +// bytes that represent host endianness values. +// The read_size parameter is present to allow reading both float16 and float32s +// without a case split. +template llvm::SmallVector ReadAsHostEndian(llvm::ArrayRef bytes) +{ + llvm::SmallVector ret; + size_t read_size = sizeof(T); + // NOTE original code used `int` for `bytes_len` + size_t bytes_len = bytes.size(); + assert(bytes_len % read_size == 0); + + size_t elem_count = bytes_len / read_size; + ret.reserve(elem_count); + + const char *data_ptr = reinterpret_cast(bytes.data()); + for (size_t i = 0; i < elem_count; i++) + { + T val = llvm::support::endian::readNext(data_ptr); + ret.push_back(mlir::APInt(sizeof(T) * 8, val)); + } + return ret; +} + +std::optional ConvertFloatBuffer(mlir::RankedTensorType shaped_type, + const std::vector &buffer) +{ + size_t bytes_len = buffer.size(); + mlir::Type elem_type = shaped_type.getElementType(); + + // The bytes of floats are stored little-endian. + switch (elem_type.getIntOrFloatBitWidth()) + { + // TODO 16 + case 32: + { + assert(bytes_len % 4 == 0); + int elem_count = bytes_len / 4; + std::vector values; + values.reserve(elem_count); + + const char *data = reinterpret_cast(buffer.data()); + + for (int i = 0; i < elem_count; i++) + { + uint32_t bit_repr = + llvm::support::endian::readNext(data); + values.push_back(absl::bit_cast(bit_repr)); + } + auto num = shaped_type.getNumElements(); + return mlir::ElementsAttr(DenseElementsAttr::get(shaped_type, ArrayRef(values))); + } + } + llvm::errs() << "Unsupported bit width: " << elem_type.getIntOrFloatBitWidth() << "\n"; + return {}; +} + +std::optional ConvertIntBuffer(mlir::RankedTensorType shaped_type, + const std::vector &buffer, + bool truncate = false) +{ + mlir::Type elem_type = shaped_type.getElementType(); + unsigned bit_width; + if (auto itype = elem_type.dyn_cast()) + { + bit_width = itype.getWidth(); + } + else if (auto qtype = elem_type.dyn_cast()) + { + llvm::errs() << "NYI ConvertIntBuffer QuantizedType\n"; + return {}; + } + else + { + llvm::errs() << "Unsupported integer constant type\n"; + return {}; + } + + llvm::SmallVector values; + switch (bit_width) + { + case 8: + return mlir::ElementsAttr( + mlir::DenseElementsAttr::get(shaped_type, ArrayRef(buffer))); + case 16: + values = ReadAsHostEndian(buffer); + break; + case 32: + values = ReadAsHostEndian(buffer); + break; + case 64: + values = ReadAsHostEndian(buffer); + break; + default: + llvm::errs() << "Cannot handle bit width " << bit_width << "\n"; + return {}; + } + + if (truncate) + { + llvm::errs() << "NYI ConvertIntBuffer truncate.\n"; + return {}; + } + + return mlir::ElementsAttr(mlir::DenseElementsAttr::get(shaped_type, values)); +} + +std::optional BuildExternalConstOp(const circle::TensorT &tensor, int32_t buffer_index, + mlir::OpBuilder builder, mlir::Location loc) +{ + // TODO implement + (void)tensor; + (void)buffer_index; + (void)builder; + (void)loc; + llvm::errs() << "NYI BuildExternalConstOp\n"; + assert(false); // assert is used to know when this is used for some model + return {}; +} + +std::optional BuildConstOp(const circle::TensorT &tensor, + const std::vector &buffer, bool is_variable, + mlir::OpBuilder builder, mlir::Location loc, + bool use_stablehlo_constant) +{ + if (tensor.sparsity != nullptr) + { + // TODO support sparsity + llvm::errs() << "NYI BuildConstOp sparse\n"; + return {}; + } + + if (is_variable) + { + // TODO support variable + llvm::errs() << "NYI BuildConstOp variable\n"; + return {}; + } + + ASSIGN_OR_RETURN(auto type, GetTensorType(tensor, builder, + /*is_constant=*/true, + /*is_intermediate=*/false, + /*get_storage=*/true)); + auto shaped_type = type.dyn_cast(); + if (!shaped_type) + { + llvm::errs() << "Constant doesn't have a shape\n"; + return {}; + } + + mlir::ElementsAttr value; + if (IsQuantized(tensor)) + { + // TODO support quantized + llvm::errs() << "NYI BuildConstOp quantized\n"; + return {}; + } + + auto elem_type = shaped_type.getElementType(); + if (auto float_type = elem_type.dyn_cast()) + { + ASSIGN_OR_RETURN(value, ConvertFloatBuffer(shaped_type, buffer)); + } + else if (elem_type.isa()) + { + ASSIGN_OR_RETURN(value, ConvertIntBuffer(shaped_type, buffer)); + } + // TODO support StringType (TensorType_STRING) + // TODO support ComplexType + else + { + llvm::errs() << "Constant of unsupported type\n"; + return {}; + } + + if (use_stablehlo_constant) + { + // TODO support stablehlo + llvm::errs() << "NYI BuildConstOp stablehlo\n"; + return {}; + } + auto op = builder.create(loc, value); + return op.getOperation(); +} + +// TODO(krzysd) Handle function calls +std::optional +ConvertOp(const circle::OperatorT &op, const std::vector &vals_map, + const std::vector &intermediate_types, mlir::Value optional_arg_marker, + const std::vector> &op_codes, + const std::vector &func_names, + const std::vector> &tensors, mlir::Location loc, + mlir::OpBuilder builder, const circle::Model *model_ptr) +{ + const circle::OperatorCodeT &op_code = *op_codes.at(op.opcode_index); + + const std::string op_name = GetMlirOpName(op, op_code); + + mlir::OperationState op_state(loc, op_name); + + for (auto input_num : op.inputs) + { + if (input_num == -1) + { + assert(optional_arg_marker != nullptr); + op_state.addOperands({optional_arg_marker}); + } + else + { + op_state.addOperands({vals_map.at(input_num)}); + } + } + + for (auto output_num : op.outputs) + { + auto &tensor = *tensors.at(output_num); + auto type_or_err = GetTensorType(tensor, builder); + if (!type_or_err.has_value()) + { + return {}; + } + auto type = std::move(type_or_err).value(); + + if (op_name == "Circle.quantize") + { + // Special case for quantize: return type must also be in qtype attribute + op_state.addAttribute("qtype", mlir::TypeAttr::get(type)); + } + else if (op_name == "Circle.reshape" && op_state.operands.size() == 1) + { + // Special case for reshape: the second op is optional in the old + // converter and kernel, so we create the second operand, which is + // required by the new converter, from the reshape op's option. + auto new_shape = op.builtin_options.AsReshapeOptions()->new_shape; + auto shape_type = GetTypeFromTensorShape({static_cast(new_shape.size())}, + builder.getIntegerType(32)); + + mlir::SmallVector shape; + for (auto s : new_shape) + { + shape.push_back(builder.getI32IntegerAttr(ConvertToCircleSize(s))); + } + auto output_shape = DenseElementsAttr::get(shape_type, shape); + auto shape_op = builder.create(loc, output_shape); + op_state.addOperands({shape_op}); + } + + op_state.addTypes({type}); + } + + // While the last several tensors could be optional tensors for an circle op, the + // number of input operands could vary. Gets the min/max number of + // operands from circle op name. + // Also, since the above code special-handles the `circle.reshape` op and add an + // additional input, we put these function block here. + llvm::MinMax input_min_max = mlir::OperandNumbersMinMax(op_name); + int input_max_num = input_min_max.Max; + int op_input_num = op_state.operands.size(); + if (input_max_num != 0 && input_max_num > op_input_num) + { + // If the number of current inputs is less than the op definition, fill in + // with `none` value, + llvm::SmallVector none_operands( + input_max_num - op_input_num, + builder.create(loc, builder.getNoneType(), builder.getUnitAttr())); + op_state.addOperands(llvm::ArrayRef(none_operands)); + } + + // TODO support lstm + // TODO support while + // TODO support unidirectional_sequence_lstm + if (op_name == "Circle.reshape") + { + // Flattern reshape ops when more than one dimension shape operand is given. + mlir::DenseIntElementsAttr shape_attr; + if (matchPattern(op_state.operands[1], m_Constant(&shape_attr))) + { + auto shape_ty = op_state.operands[1].getType().dyn_cast(); + if (shape_ty != nullptr && shape_ty.hasRank() && shape_ty.getRank() > 1) + { + llvm::SmallVector shape; + int32_t dim_size = 0; + for (const auto &dim : llvm::enumerate(shape_attr.getValues())) + { + shape.push_back( + builder.getI32IntegerAttr(ConvertToCircleSize(dim.value().getSExtValue()))); + ++dim_size; + } + auto shape_type = + GetTypeFromTensorShape({static_cast(dim_size)}, builder.getIntegerType(32)); + auto output_shape = mlir::DenseElementsAttr::get(shape_type, shape); + auto shape_op = builder.create(loc, output_shape); + op_state.operands[1] = shape_op; + } + } + } + // TODO check why stablehlo is used + /* + if (op_name == "stablehlo.reduce" || op_name == "stablehlo.reduce_window" || + op_name == "stablehlo.sort" || op_name == "stablehlo.scatter") { + op_state.addRegion(); + } + if (op_name == "stablehlo.while") { + op_state.addRegion(); + op_state.addRegion(); + } + */ + + llvm::SmallVector attrs; + auto builtin_code = circle::GetBuiltinCode(&op_code); + if (builtin_code == circle::BuiltinOperator_CUSTOM) + { + auto status = true; + + std::vector custom_options; + + // TODO enable to support large custom options + assert(not IsValidBufferOffset(op.large_custom_options_offset)); + /* + if (IsValidBufferOffset(op.large_custom_options_offset)) + { + custom_options.resize(op.large_custom_options_size); + memcpy(custom_options.data(), + reinterpret_cast(model_ptr) + op.large_custom_options_offset, + op.large_custom_options_size); + } + else + */ + { + custom_options = op.custom_options; + } + + status = + mlir::CustomOptionsToAttributes(op_code.custom_code, custom_options, builder, loc, &attrs); + if (!status) + { + return {}; + } + } + else + { + mlir::BuiltinOptionsToAttributes(op.builtin_options, builder, attrs); + // TODO enable BuiltinOptions2 + // mlir::BuiltinOptions2ToAttributes(op.builtin_options_2, builder, attrs); + } + op_state.addAttributes(attrs); + + // TODO handle CallOnce, If and While subgraphs + // TODO handle StableHLO + + (void)intermediate_types; + (void)func_names; + (void)model_ptr; + + return builder.create(op_state); +} + +// Returns indices of the given tensors in the subgraph. Returns error if a +// tensor name cannot be found in the subgraph. +std::optional> GetTensorIndices(const circle::SubGraphT &subgraph, + const std::vector &tensor_names) +{ + throw std::runtime_error("NYI GetTensorIndices"); + // NOTE enable codes when necessary + /* + absl::flat_hash_map name_to_index; + + for (const auto &index_and_tensor : llvm::enumerate(subgraph.tensors)) + { + name_to_index[index_and_tensor.value()->name] = index_and_tensor.index(); + } + + std::vector indices; + indices.reserve(tensor_names.size()); + + for (const auto &name : tensor_names) + { + auto found = name_to_index.find(name); + if (found != name_to_index.end()) + { + indices.push_back(found->second); + } + else + { + llvm::errs() << "Could not find tensor in subgraph: " << name << "\n"; + return {}; + } + } + + return indices; + */ +} + +// Given a list of tensor indices, returns true if any of the tensors have +// non-empty name strings. +bool HasNonEmptyNames(const circle::SubGraphT &subgraph, llvm::ArrayRef indices) +{ + return llvm::any_of(indices, [&](int i) { return !subgraph.tensors.at(i)->name.empty(); }); +} + +// Given a list of tensor indices, returns a array of strings of tensor names +// wrapped in a NamedAttribute. +mlir::Attribute BuildEntryFunctionAttribute(const circle::SubGraphT &subgraph, + mlir::Builder *builder, llvm::ArrayRef indices) +{ + auto tensor_names = llvm::map_range(indices, [&](int i) { return subgraph.tensors.at(i)->name; }); + // NOTE single line "argumments(tensor_names.begin(), tensor_names.end());" gives corrupted names + auto names_vect = llvm::to_vector(tensor_names); + llvm::SmallVector argumments; + for (auto &item : names_vect) + argumments.push_back(item); + return builder->getStrArrayAttr(argumments); +} + +// We want to adjust the func op according to some cross ops information. +std::optional PostProcessFuncOp(mlir::func::FuncOp func) +{ + OpBuilder builder(func); + // TODO walk with QConstOp when ready + return func; +} + +// There are control nodes at each end of each control edge. For each of them, +// we store the source vertices of the incoming edges (if any) and the control +// node's output token. To improve testability, we use an ordered set for the +// source vertices. +struct ControlNodeDesc +{ + std::set incoming; + std::optional outgoing; +}; + +using ControlNodes = llvm::DenseMap; + +// Helper function: After op has been emitted as the MLIR representation of +// a subgraph's operators[op_index], check *control_nodes whether it needs to be +// wrapped in a ControlNode because it's at either end of a control edge from +// the metadata. If it is, wrap it in a ControlNode, store the resulting +// ControlType token in *control_nodes, and return the non-ControlType (i.e., +// tensor) results. If it isn't, just return the original operator's results. +mlir::ResultRange MaybeWrapInControlNode(mlir::Operation *op, OpBuilder op_builder, int op_index, + Location op_loc, ControlNodes *control_nodes) +{ + const ControlNodes::iterator maybe_control_node = control_nodes->find(op_index); + if (maybe_control_node == control_nodes->end()) + { + return op->getResults(); + } + // TODO enable control node + (void)op_builder; + (void)op_loc; + llvm::errs() << "NYI MaybeWrapInControlNode\n"; + assert(false); // assert is used to know when this is used for some model + return op->getResults(); +} + +// Build a FuncOp from a circle SubGraph +// The buffers are directly taken +// from the deserialized flatbuffer as we do not have the type information to +// interpret them until this point. The base_loc parameter is the location of +// the flatbuffer as a whole (usually a file). If ordered_output_arrays is not +// empty, then the imported mlir function will only return nodes in +// ordered_output_arrays in the same order. If signature is not null, then the +// inputs/outputs in signature will be attached to the FuncOp. +std::optional +ConvertSubgraph(const circle::SubGraphT &subgraph, llvm::StringRef name, + const std::vector> &op_codes, + const std::vector &func_names, + const std::vector> &buffers, Location base_loc, + mlir::Builder builder, bool is_entry_point, bool use_external_constant, + const std::vector &ordered_input_arrays, + const std::vector &ordered_output_arrays, + bool experimental_prune_unreachable_nodes_unconditionally, + const circle::SignatureDefT *signature, const circle::ControlEdges &control_edges, + const circle::Model *model_ptr, bool use_stablehlo_constant) +{ + // Populate from metadata. + ControlNodes control_nodes; + for (const auto [from, to] : control_edges) + { + control_nodes.try_emplace(from); + control_nodes[to].incoming.insert(from); + } + + llvm::SmallVector ret_types; + llvm::SmallVector input_types; + + auto func_loc = mlir::NameLoc::get(builder.getStringAttr(name), base_loc); + std::vector func_inputs = subgraph.inputs; + if (is_entry_point && !ordered_input_arrays.empty()) + { + if (!experimental_prune_unreachable_nodes_unconditionally) + { + // TODO(b/149922113): Resolve input-arrays/pruning flags interaction. + llvm::errs() << "input-arrays should be used with experimental pruning flag\n"; + return {}; + } + ASSIGN_OR_RETURN(func_inputs, GetTensorIndices(subgraph, ordered_input_arrays)); + } + + for (int input : func_inputs) + { + auto &tensor = *subgraph.tensors.at(input); + auto type_or_err = GetTensorType(tensor, builder); + if (!type_or_err.has_value()) + { + llvm::errs() << "Error reading argument types\n"; + return {}; + } + auto type = std::move(type_or_err).value(); + input_types.push_back(type); + } + + llvm::SmallVector is_op_output(subgraph.tensors.size(), false); + for (auto &op : subgraph.operators) + { + for (auto output : op->outputs) + { + is_op_output[output] = true; + } + } + + std::vector func_outputs = subgraph.outputs; + if (is_entry_point && !ordered_output_arrays.empty()) + { + ASSIGN_OR_RETURN(func_outputs, GetTensorIndices(subgraph, ordered_output_arrays)); + } + + for (auto output : func_outputs) + { + const bool is_func_input = + std::find(func_inputs.begin(), func_inputs.end(), output) != func_inputs.end(); + bool is_constant = !is_op_output[output] && !is_func_input; + + auto type_or_err = GetTensorType(*subgraph.tensors.at(output), builder, is_constant); + if (!type_or_err.has_value()) + { + llvm::errs() << "Error reading return types\n"; + return {}; + } + auto type = std::move(type_or_err).value(); + ret_types.push_back(type); + } + auto func_type = builder.getFunctionType(input_types, ret_types); + + // Construct function object + auto func = mlir::func::FuncOp::create(func_loc, name, func_type, /* attrs= */ {}); + func.addEntryBlock(); + auto &body = func.getBody(); + mlir::OpBuilder op_builder{body}; + + std::vector vals_map(subgraph.tensors.size(), nullptr); + Value maybe_optional_arg_marker = nullptr; + + // Get or construct MLIR values for each input + for (int i = 0, e = func_inputs.size(); i < e; i++) + { + auto input_tensor = func_inputs[i]; + const auto &tensor = *subgraph.tensors.at(input_tensor); + auto loc = TensorLoc(tensor, builder, base_loc); + if (vals_map[input_tensor]) + { + llvm::errs() << "Duplicate input arguments\n"; + return {}; + } + mlir::Value input_value = func.getArgument(i); + + // If the `tensor` has min/max and doesn't have scale/zero_point + // information, a stats op is created to use the input_value, then the + // `tensor` should be mapped to the result of this new stats op. + if (auto stats_op = ConvertMinMaxToStatsOp(tensor, op_builder, input_value)) + { + vals_map[input_tensor] = stats_op->getResult(0); + } + else + { + vals_map[input_tensor] = input_value; + } + } + + // Set entry_function attribute + if (is_entry_point) + { + // NOTE we need attribute something like this, in MLIR + // attributes {input_names = ["input_1", "input_2"], output_names = ["output_1"]} + if (HasNonEmptyNames(subgraph, func_inputs)) + { + auto names = BuildEntryFunctionAttribute(subgraph, &builder, func_inputs); + func->setAttr("input_names", names); + } + if (HasNonEmptyNames(subgraph, func_outputs)) + { + auto names = BuildEntryFunctionAttribute(subgraph, &builder, func_outputs); + func->setAttr("output_names", names); + } + } + else + { + func.setPrivate(); + } + + // Set signature on function. + if (signature) + { + throw std::runtime_error("'signature' is expected to be nullptr"); + // TODO revive SetSignature + // SetSignature(func, signature, subgraph.tensors); + } + + absl::flat_hash_set pruned_subgraph_ops; + if (experimental_prune_unreachable_nodes_unconditionally) + { + throw std::runtime_error("'experimental_prune...' is expected to be false"); + // TODO prune subgraph for experimental_prune_unreachable_nodes_unconditionally + // ASSIGN_OR_RETURN(pruned_subgraph_ops, PruneSubgraph(subgraph, func_inputs, func_outputs)); + } + + // Construct MLIR operators from Circle operators + for (const auto &it : llvm::enumerate(subgraph.operators)) + { + auto &op = it.value(); + + if (experimental_prune_unreachable_nodes_unconditionally && !pruned_subgraph_ops.contains(op)) + { + continue; + } + + for (auto input_num : op->inputs) + { + // The operators in a graph are topologically sorted + // and so if no previous operation has produced a tensor + // it must be a constant. + if (input_num == -1) + { + if (maybe_optional_arg_marker == nullptr) + { + maybe_optional_arg_marker = op_builder + .create( + base_loc, builder.getNoneType(), builder.getUnitAttr()) + .getResult(); + } + } + else if (!vals_map.at(input_num)) + { + auto &const_tensor = *subgraph.tensors[input_num]; + auto const_loc = TensorLoc(const_tensor, builder, base_loc); + std::optional op_or_err; + std::vector buffer; + // TODO enable to support external tensor files + /* + // Check if constant tensor is stored outside of the flatbuffers. + if (IsValidBufferOffset(buffers[const_tensor.buffer]->offset)) + { + const uint8_t *file_begin_ptr = + reinterpret_cast(model_ptr->allocation()->base()); + buffer = std::vector(file_begin_ptr + buffers[const_tensor.buffer]->offset, + file_begin_ptr + buffers[const_tensor.buffer]->offset + + buffers[const_tensor.buffer]->size); + + auto shape = const_tensor.shape; + } + else + */ + { + buffer = buffers[const_tensor.buffer]->data; + } + op_or_err = + use_external_constant + ? BuildExternalConstOp(const_tensor, const_tensor.buffer, op_builder, const_loc) + : BuildConstOp(const_tensor, buffer, const_tensor.is_variable, op_builder, const_loc, + use_stablehlo_constant); + if (!op_or_err.has_value()) + { + llvm::errs() << "Failed to create ConstOp\n"; + return {}; + } + vals_map[input_num] = op_or_err.value()->getResult(0); + } + } + + // Intermediate tensors for LSTMs are used to carry quantization range + // in their types, so we only need and extract their types. + std::vector intermediate_types; + intermediate_types.reserve(5); + for (auto intermediate : op->intermediates) + { + ASSIGN_OR_RETURN(auto type, GetTensorType(*subgraph.tensors[intermediate], builder, + /*is_constant=*/false, /*is_intermediate=*/true)); + intermediate_types.emplace_back(type); + } + + auto op_loc = OpLoc(*op, subgraph.tensors, builder, base_loc); + + // If there's an optional argument, maybe_optional_arg_marker has been set + // to a valid Value + ASSIGN_OR_RETURN(auto *mlir_op, ConvertOp(*op, vals_map, intermediate_types, + maybe_optional_arg_marker, op_codes, func_names, + subgraph.tensors, op_loc, op_builder, model_ptr)); + + // Add the results to the value maps. There are two cases: 1. the result + // tensor does not have min/max values, the original op result is used + // directly; 2. the result tensor has some min/max values, a stats op is + // created, then the result of the stats op is used. + for (const auto &pair : llvm::enumerate( + MaybeWrapInControlNode(mlir_op, op_builder, it.index(), op_loc, &control_nodes))) + { + int output_tensor_index = op->outputs[pair.index()]; + auto &tensor = *subgraph.tensors[output_tensor_index]; + if (auto stats_op = ConvertMinMaxToStatsOp(tensor, op_builder, pair.value())) + { + vals_map[output_tensor_index] = stats_op->getResult(0); + } + else + { + vals_map[output_tensor_index] = pair.value(); + } + } + } + + // Construct return values + llvm::SmallVector return_operands; + for (auto index : func_outputs) + { + if (!vals_map.at(index)) + { + auto &const_tensor = *subgraph.tensors[index]; + auto const_loc = TensorLoc(const_tensor, builder, base_loc); + std::optional op_or_err; + std::vector buffer; + // TODO enable to support external tensor files + /* + // Check if constant tensor is stored outside of the flatbuffers. + if (IsValidBufferOffset(buffers[const_tensor.buffer]->offset)) + { + const uint8_t *file_begin_ptr = reinterpret_cast(model_ptr); + + buffer = std::vector(file_begin_ptr + buffers[const_tensor.buffer]->offset, + file_begin_ptr + buffers[const_tensor.buffer]->offset + + buffers[const_tensor.buffer]->size); + + auto shape = const_tensor.shape; + } + else + */ + { + buffer = buffers[const_tensor.buffer]->data; + } + op_or_err = use_external_constant + ? BuildExternalConstOp(const_tensor, const_tensor.buffer, op_builder, const_loc) + : BuildConstOp(const_tensor, buffer, const_tensor.is_variable, op_builder, + const_loc, use_stablehlo_constant); + if (!op_or_err.has_value()) + { + llvm::errs() << "Failed to create ConstOp\n"; + return {}; + } + vals_map[index] = op_or_err.value()->getResult(0); + } + return_operands.push_back(vals_map[index]); + } + + op_builder.create(base_loc, return_operands); + + return PostProcessFuncOp(func); +} + +std::string SubgraphName(bool set_implicit_main_func, unsigned index, + const circle::SubGraphT &subgraph) +{ + if (index == 0 && set_implicit_main_func) + { + return "main_graph"; + } + if (subgraph.name.empty()) + { + return llvm::formatv("fn_{0}", index).str(); + } + return subgraph.name; +} + +} // namespace + +mlir::OwningOpRef +FlatBufferToMlir(absl::string_view buffer, mlir::MLIRContext *context, mlir::Location base_loc, + bool use_external_constant, const std::vector &ordered_input_arrays, + const std::vector &ordered_output_arrays, + bool experimental_prune_unreachable_nodes_unconditionally) +{ + // Only run validator on models less than 2GB + if (buffer.length() < flatbuffer_size_max) + { + flatbuffers::Verifier base_verifier(reinterpret_cast(buffer.data()), + buffer.size()); + if (!circle::VerifyModelBuffer(base_verifier)) + { + llvm::errs() << "The model is not a valid Flatbuffer buffer.\n"; + return nullptr; + } + } + + auto circle_model = circle::GetModel(buffer.data()); + std::unique_ptr model(circle_model->UnPack()); + + auto builder = Builder(context); + + circle::ModelControlDependencies model_control_dependencies(model->subgraphs.size()); + + bool use_stablehlo_constant = false; + + // TODO iterate model->metadata + + std::vector func_names; + for (auto &subgraph : model->subgraphs) + { + func_names.push_back(subgraph->name); + } + + auto module = mlir::ModuleOp::create(base_loc); + + // We currently don't use this to make decisions, but we could + // use it in exports or if there are breaking changes + module->setAttr("circle.schema_version", builder.getI32IntegerAttr(model->version)); + if (!model->description.empty()) + { + module->setAttr("circle.description", builder.getStringAttr(model->description)); + } + + absl::flat_hash_map subgraph_to_signature_map; + for (int i = 0; i < model->signature_defs.size(); i++) + { + auto *signature_def = model->signature_defs[i].get(); + const uint32_t subgraph_index = signature_def->subgraph_index; + subgraph_to_signature_map[subgraph_index] = signature_def; + } + + const bool set_implicit_main_func = subgraph_to_signature_map.size() <= 1; + for (const auto &e : llvm::enumerate(model->subgraphs)) + { + auto &subgraph = e.value(); + std::string name = SubgraphName(set_implicit_main_func, e.index(), *subgraph); + uint32_t subgraph_index = static_cast(e.index()); + bool is_entry_point = + set_implicit_main_func ? e.index() == 0 : subgraph_to_signature_map.contains(subgraph_index); + circle::SignatureDefT *signature_def = subgraph_to_signature_map.contains(subgraph_index) + ? subgraph_to_signature_map.at(subgraph_index) + : nullptr; + + auto func_or_error = ConvertSubgraph( + *subgraph, name, model->operator_codes, func_names, model->buffers, base_loc, builder, + is_entry_point, use_external_constant, ordered_input_arrays, ordered_output_arrays, + experimental_prune_unreachable_nodes_unconditionally, signature_def, + model_control_dependencies[subgraph_index], circle_model, use_stablehlo_constant); + + if (!func_or_error.has_value()) + { + llvm::errs() << "Could not translate function '" << subgraph->name << "'\n"; + return nullptr; + } + // NOTE std::move is from TF for 'StatusOr' but here we use 'std::optional' + // TODO revise this for any issues that may happen + module.push_back(std::move(func_or_error).value()); + } + + return mlir::OwningOpRef(module); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt b/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt new file mode 100644 index 00000000000..030d6f22ef4 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt @@ -0,0 +1,21 @@ +set(SRC + src/CirclePass.cpp + src/ConvertONNXToCirclePass.cpp + src/ConvertHelper.cpp + src/RewriteCirclePass.cpp + src/RewriteONNXPass.cpp + src/DumpCircleOpsPass.cpp + src/RuntimeVerifyPass.cpp + src/ShapeInferencePass.cpp +) + +add_library(cirmlir_pass STATIC ${SRC}) +cir_mlir_static_flags(cirmlir_pass) +cir_onnx_static_flags(cirmlir_pass) +target_include_directories(cirmlir_pass PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) +target_include_directories(cirmlir_pass PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) +target_link_libraries(cirmlir_pass PUBLIC cirmlir_dialect) +target_link_libraries(cirmlir_pass PUBLIC cirmlir_coverage) + +add_dependencies(cirmlir_pass circle_mlir_gen_inc) +target_include_directories(cirmlir_pass PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) diff --git a/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h b/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h new file mode 100644 index 00000000000..264d846411b --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_CIRCLE_PASS_H__ +#define __CIRCLE_MLIR_PASS_CIRCLE_PASS_H__ + +#include +#include +#include + +namespace mlir +{ +namespace Circle +{ + +int preprocessONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int shapeInferenceONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int convertToCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int postProcessCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int shapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int dynaShapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int dumpCircleOps(llvm::raw_fd_ostream &os, mlir::MLIRContext &context, + mlir::OwningOpRef &module); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_CIRCLE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp new file mode 100644 index 00000000000..2595453e3fb --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "circle-mlir/pass/CirclePass.h" + +#include "ConvertONNXToCirclePass.h" +#include "RewriteCirclePass.h" +#include "RewriteONNXPass.h" +#include "DumpCircleOpsPass.h" +#include "RuntimeVerifyPass.h" +#include "ShapeInferencePass.h" + +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +namespace +{ + +// TODO move to somewhere common +template T safecast(const char *, const T &); + +template <> int safecast(const char *s, const int &value) +{ + return (s == nullptr) ? value : std::stoi(s); +} + +} // namespace + +int preprocessONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int dump = safecast(std::getenv("CM_ONNX_DUMP"), 0); + std::function shouldPrintBeforePass; + std::function shouldPrintAfterPass; + shouldPrintBeforePass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + shouldPrintAfterPass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + pm.enableIRPrinting(shouldPrintBeforePass, shouldPrintAfterPass, false, false, false, + llvm::errs()); + + int result = 0; + pm.addNestedPass(onnx_mlir::createDecomposeONNXToONNXPass()); + // Replace ONNXReturnOp with func::ReturnOp. + pm.addPass(onnx_mlir::createStandardFuncReturnPass()); + pm.addPass(createRewriteONNXPass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + { + // TODO show error message if needed + result = -1; + } + + return result; +} + +int shapeInferenceONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int result = 0; + pm.addPass(onnx_mlir::createShapeInferencePass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + { + // TODO show error message if needed + result = -1; + } + + return result; +} + +int convertToCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int dump = safecast(std::getenv("CM_PASS_DUMP"), 0); + std::function shouldPrintBeforePass; + std::function shouldPrintAfterPass; + shouldPrintBeforePass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + shouldPrintAfterPass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + pm.enableIRPrinting(shouldPrintBeforePass, shouldPrintAfterPass, false, false, false, + llvm::errs()); + + int result = 0; + pm.addPass(CreateRuntimeVerifyPass()); + pm.addPass(mlir::createCanonicalizerPass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + +int postProcessCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int dump = safecast(std::getenv("CM_PASS_DUMP"), 0); + std::function shouldPrintBeforePass; + std::function shouldPrintAfterPass; + shouldPrintBeforePass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + shouldPrintAfterPass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + pm.enableIRPrinting(shouldPrintBeforePass, shouldPrintAfterPass, false, false, false, + llvm::errs()); + + int result = 0; + int64_t dyna_count = 0; + int64_t prev_count = 0; + int64_t same_count = 0; + pm.addPass(CreateShapeInferencePass(dyna_count)); + pm.addPass(mlir::createCanonicalizerPass()); // to make Op::fold() call + pm.addPass(createRewriteCirclePass()); + pm.addPass(CreateRuntimeVerifyPass()); + // NOTE as couldn't find how to make pm.run() continue till there is nothing to shape infer, + // this loop is to emulate that kind of feature. + // 1/ loop until all has become static shape + // 2/ continue loop while dynamic shape node count has changed + // 3/ exit loop if count has not changed for 20 times, to prevent infinite loops + // TODO find a correct way to continue shape infer. + while (same_count < 20) + { + dyna_count = 0; + auto runres = pm.run(*module); + if (mlir::failed(runres)) + { + result = -1; + break; + } + if (dyna_count == 0) + break; + if (dyna_count == prev_count) + same_count++; + else if (dyna_count < prev_count) + same_count = 0; + + prev_count = dyna_count; + } + + return result; +} + +int shapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int result = 0; + pm.addPass(CreateShapeValidatePass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + +int dynaShapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int result = 0; + pm.addPass(CreateDynaShapeValidatePass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + +int dumpCircleOps(llvm::raw_fd_ostream &os, mlir::MLIRContext &context, + mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + DumpCircleOpsPass::GetOStream_t gos = [&](void) -> llvm::raw_fd_ostream & { return os; }; + + int result = 0; + auto pass = std::make_unique(); + pass->ostream(gos); + pm.addPass(std::move(pass)); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp new file mode 100644 index 00000000000..6dfce7efbd7 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ConvertHelper.h" + +#include "circle-mlir/dialect/NameUtils.h" + +#include // from @llvm-project +#include // from @llvm-project +#include +#include + +#include +#include + +namespace mlir +{ +namespace Circle +{ + +std::string GetOperationName(mlir::Operation *op) +{ + assert(op != nullptr); + + mlir::Location opLoc = op->getLoc(); + auto name = mlir::GetNameFromLoc(opLoc); + if (!name.empty()) + return name; + + // TOO remove this when not used anymore + auto strattr = op->getAttrOfType("onnx_node_name"); + if (strattr) + return strattr.str(); + + // Use operator type as name if there is no name + // TODO revise this to better implementation + static uint64_t sequence = 1; + auto seqstr = std::to_string(sequence); + sequence++; + return op->getName().getStringRef().str() + seqstr; +} + +DenseIntElementsAttr GetI1ElementsAttr(ArrayRef values, Builder *builder) +{ + mlir::RankedTensorType ty = + GetTypeFromTensorShape({static_cast(values.size())}, builder->getI1Type(), {}); + return DenseIntElementsAttr::get(ty, values); +} + +DenseIntElementsAttr GetI32ElementsAttr(ArrayRef values, Builder *builder) +{ + mlir::RankedTensorType ty = + GetTypeFromTensorShape({static_cast(values.size())}, builder->getI32Type(), {}); + return DenseIntElementsAttr::get(ty, values); +} + +namespace +{ + +template bool ExtractConstantIntValues(mlir::Value &input, std::vector &values) +{ + mlir::DenseElementsAttr dataAttr; + + if (auto constOp = dyn_cast_or_null(input.getDefiningOp())) + { + dataAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (dataAttr == nullptr) + { + auto disValueAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (disValueAttr) + dataAttr = disValueAttr.toDenseElementsAttr(); + } + } + else if (auto constOp2 = dyn_cast_or_null(input.getDefiningOp())) + dataAttr = constOp2.getValueAttr().dyn_cast_or_null(); + else + return false; + + if (dataAttr == nullptr) + return false; + + auto valueIt = dataAttr.getValues().begin(); + auto valueEd = dataAttr.getValues().end(); + for (; valueIt != valueEd; ++valueIt) + { + T value = static_cast((*valueIt).getSExtValue()); + values.push_back(value); + } + return true; +} + +template bool ExtractConstantFloatValues(mlir::Value &input, std::vector &values) +{ + mlir::DenseElementsAttr dataAttr; + + if (auto constOp = dyn_cast_or_null(input.getDefiningOp())) + { + dataAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (dataAttr == nullptr) + { + auto disValueAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (disValueAttr) + dataAttr = disValueAttr.toDenseElementsAttr(); + } + } + else if (auto constOp2 = dyn_cast_or_null(input.getDefiningOp())) + dataAttr = constOp2.getValueAttr().dyn_cast_or_null(); + else + return false; + + if (dataAttr == nullptr) + return false; + + auto valueIt = dataAttr.getValues().begin(); + auto valueEd = dataAttr.getValues().end(); + for (; valueIt != valueEd; ++valueIt) + { + T value = static_cast((*valueIt).convertToFloat()); + values.push_back(value); + } + return true; +} + +} // namespace + +bool ExtractConstantValues(mlir::Value &input, std::vector &values) +{ + return ExtractConstantIntValues(input, values); +} + +bool ExtractConstantValues(mlir::Value &input, std::vector &values) +{ + return ExtractConstantIntValues(input, values); +} + +bool ExtractConstantValues(mlir::Value &input, std::vector &values) +{ + return ExtractConstantFloatValues(input, values); +} + +namespace +{ + +template void ExtractArrayAttrIntValues(mlir::ArrayAttr &array, std::vector &values) +{ + for (int i = 0; i < array.size(); ++i) + { + auto v = GetIntValue(array, i); + values.push_back(v); + } +} + +} // namespace + +void ExtractArrayAttrValues(mlir::ArrayAttr &array, std::vector &values) +{ + ExtractArrayAttrIntValues(array, values); +} + +mlir::Value CreateNoValue(mlir::ConversionPatternRewriter &rewriter) +{ + return rewriter.create(rewriter.getUnknownLoc(), rewriter.getNoneType(), + rewriter.getUnitAttr()); +} + +mlir::RankedTensorType GetChnLastType(mlir::RankedTensorType tensor_type) +{ + auto tensor_shape = tensor_type.getShape(); + // NCHW to NHWC + auto to_nhwc = {tensor_shape[0], tensor_shape[2], tensor_shape[3], tensor_shape[1]}; + return mlir::RankedTensorType::get(to_nhwc, tensor_type.getElementType()); +} + +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, float value, + const std::string &name) +{ + mlir::Type f32 = rewriter.getF32Type(); + mlir::RankedTensorType f32type = mlir::RankedTensorType::get({}, f32); + llvm::SmallVector values; + values.push_back(value); + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return rewriter.create(constLoc, mlir::DenseFPElementsAttr::get(f32type, values)); +} + +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &reference, float value) +{ + auto rtype = reference.getType().dyn_cast_or_null(); + if (not rtype) + return {}; + if (not rtype.getElementType().isF32()) + return {}; + auto shape = rtype.getShape(); + if (shape.size() == 0) + return {}; + + // TODO revise to better value filling + int64_t numElements = 1; + for (size_t dim = 0; dim < shape.size(); ++dim) + numElements = numElements * shape[dim]; + + llvm::SmallVector values; + for (int64_t c = 0; c < numElements; ++c) + values.push_back(value); + + return rewriter.create(opLoc, mlir::DenseFPElementsAttr::get(rtype, values)); +} + +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Value &reference, + float value, const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateConst(rewriter, constLoc, reference, value); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + int64_t value) +{ + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType scalar_type = RankedTensorType::get({}, i32); + auto avalue = static_cast(value); + auto attr = mlir::DenseElementsAttr::get(scalar_type, {avalue}); + return rewriter.create(opLoc, attr); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, int64_t value, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + auto const_op = CreateI32Const(rewriter, constLoc, value); + return const_op; +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + llvm::ArrayRef source) +{ + auto ssize = static_cast(source.size()); + std::vector values; + for (int32_t i = 0; i < ssize; ++i) + values.push_back(source[i]); + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType ptype = RankedTensorType::get({ssize}, i32); + return rewriter.create(opLoc, DenseIntElementsAttr::get(ptype, values)); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, + llvm::ArrayRef source, const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateI32Const(rewriter, constLoc, source); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + std::vector &source) +{ + auto num = static_cast(source.size()); + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType ptype = RankedTensorType::get({num}, i32); + return rewriter.create(opLoc, DenseIntElementsAttr::get(ptype, source)); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, std::vector &source, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateI32Const(rewriter, constLoc, source); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &source) +{ + std::vector values; + if (!ExtractConstantValues(source, values)) + return {}; + + mlir::RankedTensorType stype = source.getType().dyn_cast_or_null(); + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType si16stype = RankedTensorType::get(stype.getShape(), i32); + return rewriter.create(opLoc, DenseIntElementsAttr::get(si16stype, values)); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Value &source, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateI32Const(rewriter, constLoc, source); +} + +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Location &opLoc, mlir::Value &reference, + mlir::Value &source) +{ + // TODO support other dtypes + // TODO support more source shape like 1xN, Nx1, ... + // TODO revise to better form when known + + // check reference is rank4, F32 + auto rtype = reference.getType().dyn_cast_or_null(); + auto rshape = rtype.getShape(); + if (not(rtype.getElementType().isF32() && rshape.size() == 4)) + return source; + + // check source is rank1, F32, same number of elements + auto stype = source.getType().dyn_cast_or_null(); + auto sshape = stype.getShape(); + if (sshape.size() == rshape.size()) + return source; + if (not(stype.getElementType().isF32() && sshape.size() == 1 && rshape[1] == sshape[0])) + return source; + + int32_t C = rshape[1]; + + mlir::DenseElementsAttr dataAttr; + if (auto constOp = dyn_cast(source.getDefiningOp())) + { + dataAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (dataAttr == nullptr) + { + auto disValueAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (disValueAttr) + dataAttr = disValueAttr.toDenseElementsAttr(); + } + } + else if (auto constOp2 = dyn_cast(source.getDefiningOp())) + dataAttr = constOp2.getValueAttr().dyn_cast_or_null(); + else + return source; + if (dataAttr == nullptr) + return source; + + auto valueIt = dataAttr.getValues().begin(); + auto valueEd = dataAttr.getValues().end(); + llvm::SmallVector values; + for (; valueIt != valueEd; ++valueIt) + { + float val = (*valueIt).convertToFloat(); + values.push_back(val); + } + + mlir::Type f32 = rewriter.getF32Type(); + mlir::RankedTensorType ttype = mlir::RankedTensorType::get({1, C, 1, 1}, f32); + return rewriter.create(opLoc, mlir::DenseFPElementsAttr::get(ttype, values)); +} + +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Value &reference, mlir::Value &source, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateConstBroadcastChn(rewriter, constLoc, reference, source); +} + +bool GetPads(std::optional<::mlir::ArrayAttr> pads, std::vector &values) +{ + bool process = false; + if (pads.has_value()) + { + auto value = pads.value(); + // NOTE assert for not rank 4: this is for debug build to break the execution + assert(value.size() == 4); + // NOTE skip processing pads if not rank 4 + if (value.size() != 4) + return process; + for (int i = 0; i < value.size(); ++i) + { + auto v = GetIntValue(value, i); + values.push_back(v); + if (v) + process = true; + } + } + return process; +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h new file mode 100644 index 00000000000..6b512ae5883 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_CONVERT_HELPER_H__ +#define __CIRCLE_MLIR_PASS_CONVERT_HELPER_H__ + +#include + +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +inline const char ACT_NONE[]{"NONE"}; +inline const char ACT_RELU[]{"RELU"}; +inline const char ACT_RELU6[]{"RELU6"}; +inline const char ACT_TANH[]{"TANH"}; + +// Get name of the Op +std::string GetOperationName(mlir::Operation *op); + +// Returns 1D 1-bit dense elements attribute with the given values. +DenseIntElementsAttr GetI1ElementsAttr(ArrayRef values, Builder *builder); + +// Returns 1D 32-bit dense elements attribute with the given values. +DenseIntElementsAttr GetI32ElementsAttr(ArrayRef values, Builder *builder); + +bool ExtractConstantValues(mlir::Value &input, std::vector &values); +bool ExtractConstantValues(mlir::Value &input, std::vector &values); +bool ExtractConstantValues(mlir::Value &input, std::vector &values); + +void ExtractArrayAttrValues(mlir::ArrayAttr &array, std::vector &values); + +// Create NoValueOp, used for No Bias +mlir::Value CreateNoValue(mlir::ConversionPatternRewriter &rewriter); + +// Get output type of op with channel last order +mlir::RankedTensorType GetChnLastType(mlir::RankedTensorType tensor_type); + +// Create ConstOp with scalar type and float value +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, float value, + const std::string &name); +// Create ConstOp with type 'reference' and 'value' values +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &reference, float value); +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Value &reference, + float value, const std::string &name); + +// Create scalar ConstOp with value +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + int64_t value); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, int64_t value, + const std::string &name); + +// Create int32_t 1D ConstOp with ArrayRef +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + llvm::ArrayRef values); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, + llvm::ArrayRef source, const std::string &name); + +// Create int32_t 1D ConstOp with std:vector +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + std::vector &source); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, std::vector &source, + const std::string &name); + +// Create int32_t ConstOp from int32_t/int64_t +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &source); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Value &source, + const std::string &name); + +// Create ConstOp with broadcast applied to rank of 'reference' with values from 'source' +// - 'reference' is rank-4, NCHW format +// - 'source' is vector with C elemnts +// - return rank-4 with shape 1xCx1x1 +// - only F32 is supported for now +// - return source if any condition does not match +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Location &opLoc, mlir::Value &reference, + mlir::Value &source); +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Value &reference, mlir::Value &source, + const std::string &name); + +// Get integer value of array[index] +template TYPE GetIntValue(mlir::ArrayAttr array, int index) +{ + return static_cast(array.getValue()[index].cast().getInt()); +} + +// Get value from pads if available as return true +// if false, we do not need to process pads value +bool GetPads(std::optional<::mlir::ArrayAttr> pads, std::vector &values); + +#define CHECK_VALID_RANK_2(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (VALUE.getRank() != 2) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_4(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (VALUE.getRank() != 4) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_2_4(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (!(VALUE.getRank() == 2 || VALUE.getRank() == 4)) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_3_4(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (!(VALUE.getRank() == 3 || VALUE.getRank() == 4)) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_ATLEAST(VALUE, NUM) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (VALUE.getRank() < NUM) \ + return mlir::failure(); \ + } while (0) + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_CONVERT_HELPER_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp new file mode 100644 index 00000000000..d77c0068bef --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019-2022 The IBM Research Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ConvertONNXToCirclePass.h" + +#define DEBUG_TYPE "o2c" +#include + +// NOTE lets use names from ONNX Op for the conversion class and the file name. +// ONNX: ONNXAbcdOp +// class: ConvAbcd +// file: AbcdOp.h + +#include + +#include +#include +#include +#include + +// from onnx-mlir source +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +namespace +{ + +// Convert for binary input with Activation; such as Add, Sub, Mul, Div, ... +template +class ConvBinaryT : public mlir::OpConversionPattern +{ +public: + using mlir::OpConversionPattern::OpConversionPattern; + using OpAdaptor = typename ONNXOpT::Adaptor; + + mlir::LogicalResult matchAndRewrite(ONNXOpT op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override + { + mlir::Value lhs = adaptor.getA(); + mlir::Value rhs = adaptor.getB(); + + rewriter.replaceOpWithNewOp(op, op.getType(), lhs, rhs, "NONE"); + + return mlir::success(); + } +}; + +} // namespace + +namespace +{ + +inline bool isCircleFloat(mlir::Type type) +{ + return type.isa(); +} + +inline bool isCircleInt(mlir::Type type) +{ + mlir::IntegerType intType = type.dyn_cast(); + if (intType) + { + std::set intWidth{1, 8, 16, 32, 64}; + auto w = intType.getWidth(); + if (intWidth.find(w) != intWidth.end()) + { + return intType.isSignless() || (w == 16 && intType.isSigned()) || + (w == 8 && intType.isUnsigned()); + } + } + return false; +} + +} // namespace + +struct ConvertONNXToCirclePass + : public mlir::PassWrapper> +{ + ConvertONNXToCirclePass() = default; + ConvertONNXToCirclePass(const ConvertONNXToCirclePass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "onnx-to-circle"; } + + llvm::StringRef getDescription() const override { return "ONNX to Circle"; } + + Option target{*this, "target", ::llvm::cl::desc("ONNX dialect to Circle dialect"), + ::llvm::cl::init("")}; + + void runOnOperation() final; +}; + +void ConvertONNXToCirclePass::runOnOperation() +{ + mlir::func::FuncOp function = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::ConversionTarget target(getContext()); + + TypeConverter typeConverter; + typeConverter.addConversion([](Type type) -> std::optional { + // TODO support mode dtypes + // NOTE Conv2D without bias is NoneType + if (isCircleFloat(type) || isCircleInt(type) || type.isa()) + return type; + LLVM_DEBUG({ llvm::dbgs() << "TypeConverter Type None\n"; }); + return std::nullopt; + }); + typeConverter.addConversion([&](TensorType type) -> std::optional { + if (typeConverter.isLegal(type.getElementType())) + return type; + LLVM_DEBUG({ llvm::dbgs() << "TypeConverter TensorType None\n"; }); + return std::nullopt; + }); + + target.addLegalDialect(); + target.addLegalDialect(); + + mlir::RewritePatternSet patterns(context); + // NOTE use name from ONNX Op, suffix T for templates + patterns.insert>(typeConverter, context); + + auto res = mlir::applyFullConversion(function, target, std::move(patterns)); + if (mlir::failed(res)) + { + return signalPassFailure(); + } +} + +std::unique_ptr createConvertONNXToCirclePass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h new file mode 100644 index 00000000000..c7a22ae7251 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_CONVERT_ONNX2CIRCLE_PASS_H__ +#define __CIRCLE_MLIR_PASS_CONVERT_ONNX2CIRCLE_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr createConvertONNXToCirclePass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_CONVERT_ONNX2CIRCLE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp new file mode 100644 index 00000000000..5b2206d8118 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "DumpCircleOpsPass.h" + +#include + +#include +#include + +namespace mlir +{ +namespace Circle +{ + +void DumpCircleOpsPass::runOnOperation() +{ + mlir::func::FuncOp func = getOperation(); + + for (auto ®ion : func->getRegions()) + dumpRegion(region); +} + +void DumpCircleOpsPass::dumpRegion(mlir::Region ®ion) +{ + region.walk([&](mlir::Operation *op) { ostream() << op->getName() << "\n"; }); + + region.walk([&](mlir::Operation *op) { + for (auto ®ion : op->getRegions()) + dumpRegion(region); + }); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h new file mode 100644 index 00000000000..a93451bc417 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_DUMP_CIRCLE_OPS_PASS_H__ +#define __CIRCLE_MLIR_PASS_DUMP_CIRCLE_OPS_PASS_H__ + +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +struct DumpCircleOpsPass + : public mlir::PassWrapper> +{ + DumpCircleOpsPass() = default; + DumpCircleOpsPass(const DumpCircleOpsPass &pass) + : mlir::PassWrapper>() + { + _getOStream = pass._getOStream; + } + + llvm::StringRef getArgument() const override { return "circle-dump-ops"; } + + llvm::StringRef getDescription() const override { return "Dump Circle ops"; } + + Option target{*this, "target", ::llvm::cl::desc("Dump Circle operators"), + ::llvm::cl::init("")}; + + void runOnOperation() final; + +protected: + void dumpRegion(mlir::Region ®ion); + +public: + using GetOStream_t = std::function; + + void ostream(GetOStream_t os) { _getOStream = os; } + llvm::raw_fd_ostream &ostream(void) { return _getOStream(); } + +protected: + GetOStream_t _getOStream = nullptr; +}; + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_DUMP_CIRCLE_OPS_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp new file mode 100644 index 00000000000..2281f4a3e1e --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define DEBUG_TYPE "o2c" +#include + +#include "RewriteCirclePass.h" +#include "ConvertHelper.h" + +#include + +#include +#include +#include +#include +#include + +namespace mlir +{ +namespace Circle +{ + +struct RewriteCirclePass + : public mlir::PassWrapper> +{ + RewriteCirclePass() = default; + RewriteCirclePass(const RewriteCirclePass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "circle-rewrite"; } + + llvm::StringRef getDescription() const override { return "Rewrite Circle ops"; } + + Option target{*this, "target", + ::llvm::cl::desc("Rewrite Circle dialect to Circle dialect"), + ::llvm::cl::init("")}; + + void runOnOperation() final; + +private: + // Apply canonicalization, mainly constant folding, on the function. + void applyCanonicalization(); + // Apply activation fusion + void applyActivationFusion(); +}; + +void RewriteCirclePass::runOnOperation() +{ + // canonicalization + applyCanonicalization(); + // activation fusion + applyActivationFusion(); +} + +void RewriteCirclePass::applyCanonicalization() +{ + mlir::func::FuncOp func = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + + func->walk([&](Operation *op) { + op->getRegisteredInfo()->getCanonicalizationPatterns(patterns, context); + }); +} + +void RewriteCirclePass::applyActivationFusion() +{ + mlir::func::FuncOp func = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + + // TODO enable Tanh after circle-interpreter works + // patterns.add>(context); + + // TODO enable this after https://github.com/Samsung/ONE/pull/10921 lands + // and NPU_Compiler is ready + // patterns.add>(context); + + // TODO add more patterns + + (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); +} + +std::unique_ptr createRewriteCirclePass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h new file mode 100644 index 00000000000..d238353fe6a --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_REWRITE_CIRCLE_PASS_H__ +#define __CIRCLE_MLIR_PASS_REWRITE_CIRCLE_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr createRewriteCirclePass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_REWRITE_CIRCLE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp new file mode 100644 index 00000000000..7303bb7e2bb --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define DEBUG_TYPE "o2c" +#include + +#include "RewriteONNXPass.h" + +#include +#include +#include +#include +#include + +// from onnx-mlir source +#include + +#include "onnx/CompactReshapeConvReshape.h" + +namespace mlir +{ +namespace Circle +{ + +struct RewriteONNXPass + : public mlir::PassWrapper> +{ + RewriteONNXPass() = default; + RewriteONNXPass(const RewriteONNXPass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "circle-onnx-rewrite"; } + + llvm::StringRef getDescription() const override { return "Rewrite ONNX ops"; } + + Option target{*this, "target", + ::llvm::cl::desc("Rewrite ONNX dialect to ONNX dialect"), + ::llvm::cl::init("")}; + + void runOnOperation() final; +}; + +void RewriteONNXPass::runOnOperation() +{ + mlir::func::FuncOp func = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + + patterns.add(context); // remove unnecessary reshapes + // TODO add more patterns + + (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); +} + +std::unique_ptr createRewriteONNXPass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h new file mode 100644 index 00000000000..5c905c97be8 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_REWRITE_ONNX_PASS_H__ +#define __CIRCLE_MLIR_PASS_REWRITE_ONNX_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr createRewriteONNXPass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_REWRITE_ONNX_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp new file mode 100644 index 00000000000..2f61ff0c555 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "RuntimeVerifyPass.h" + +#include +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +struct RuntimeVerifyPass + : public mlir::PassWrapper> +{ + RuntimeVerifyPass() = default; + RuntimeVerifyPass(const RuntimeVerifyPass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "circle-runtime-verify"; } + llvm::StringRef getDescription() const override { return "Circle Runtime Verify"; } + + void runOnOperation(void) final; +}; + +void RuntimeVerifyPass::runOnOperation(void) +{ + getOperation().walk([&](CirRuntimeVerifyOpInterface op) { + if (mlir::failed(op.VerifyCirRuntimeConstraints(op.getOperation(), true))) + signalPassFailure(); + }); +} + +// Verifies circle runtime constraints. +std::unique_ptr CreateRuntimeVerifyPass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h new file mode 100644 index 00000000000..7f67d9b3b24 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_RUNTIME_VERIFY_PASS_H__ +#define __CIRCLE_MLIR_PASS_RUNTIME_VERIFY_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr CreateRuntimeVerifyPass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_RUNTIME_VERIFY_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp new file mode 100644 index 00000000000..4eaf015705c --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define DEBUG_TYPE "o2c" +#include + +#include "ShapeInferencePass.h" + +#include +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +/// The ShapeInferencePass is a pass that performs intra-procedural +/// shape inference. +/// +/// Algorithm: +/// +/// 1) Build a worklist containing all the operations that return a +/// dynamically shaped tensor: these are the operations that need shape +/// inference. +/// 2) Iterate on the worklist: +/// a) find an operation to process: the next ready operation in the +/// worklist has all of its arguments non-generic, +/// b) if no operation is found, break out of the loop, +/// c) remove the operation from the worklist, +/// d) infer the shape of its output from the argument types. +/// 3) If the worklist is empty, the algorithm succeeded. +/// +struct ShapeInferencePass + : public mlir::PassWrapper> +{ + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ShapeInferencePass) + void runOnOperation() override + { + auto f = getOperation(); + + // Populate the worklist with the operations that need shape inference: + // these are operations that return a dynamic shape. + llvm::SmallPtrSet opWorklist; + int64_t op_count = 0; + f.walk([&](mlir::Operation *op) { + if (returnsDynamicShape(op)) + { + opWorklist.insert(op); + op_count++; + } + }); + + // TODO remove this when this pass runs again if there is any change in the graph + if (_dynacount) + *_dynacount = op_count; + + // Iterate on the operations in the worklist until all operations have been + // inferred or no change happened (fix point). + while (!opWorklist.empty()) + { + // Find the next operation ready for inference, that is an operation + // with all operands already resolved (non-generic). + auto nextop = llvm::find_if(opWorklist, allOperandsInferred); + if (nextop == opWorklist.end()) + break; + + Operation *op = *nextop; + opWorklist.erase(op); + + // Ask the operation to infer its output shapes. + LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); + if (auto shapeOp = dyn_cast(op)) + { + shapeOp.inferShapes(); + if (returnsDynamicShape(op)) + { + LLVM_DEBUG({ + mlir::Location opLoc = op->getLoc(); + llvm::dbgs() << "-- " << opLoc << " still has dynamic shape\n"; + }); + } + } + else + { + LLVM_DEBUG({ + mlir::Location opLoc = op->getLoc(); + llvm::dbgs() << "-- " << opLoc << " has dynamic shape but no CirShapeInference\n"; + }); + } + } + + // If the operation worklist isn't empty, this indicates a failure. + if (!opWorklist.empty()) + { + f.emitWarning("Shape inference still has dynamic shapes, ") + << opWorklist.size() << " operations couldn't be inferred\n"; + while (!opWorklist.empty()) + { + Operation *op = *opWorklist.begin(); + LLVM_DEBUG(llvm::dbgs() << "Shape inference left: " << *op << "\n"); + opWorklist.erase(op); + } + } + + // set function shape to that from last op. + // this is to update when function shape is unknown at beginning and then + // fixed to known with shape inference. + Operation *returnOp = f.getBody().back().getTerminator(); + assert(returnOp && "function must return"); + FunctionType fty = f.getFunctionType(); + assert(f.getNumResults() == returnOp->getNumOperands() && + "returned results count much match function type"); + f.setType(fty.clone(fty.getInputs(), returnOp->getOperandTypes())); + } + + /// A utility method that returns if the given operation has all of its + /// operands inferred. + static bool allOperandsInferred(Operation *op) + { + return llvm::all_of(op->getOperands(), [](mlir::Value operand) { + // ignore for NoValueOp + auto no_value = dyn_cast_or_null(operand.getDefiningOp()); + if (no_value) + return true; + auto resType = operand.getType(); + return llvm::isa(resType); + }); + } + + /// A utility method that returns if the given operation has a dynamically + /// shaped result. + static bool returnsDynamicShape(Operation *op) + { + // return false to ignore for NoValueOp, as it doesn't have ShapedType + auto no_value = dyn_cast_or_null(*op); + if (no_value) + return false; + + return llvm::any_of(op->getResultTypes(), [](Type resultType) { + // Checks whether each dimension is all dynamic if it is ShapedType. + if (ShapedType shapedType = resultType.dyn_cast()) + { + if (not shapedType.hasRank()) + return true; + int rank = shapedType.getRank(); + for (int i = 0; i < rank; ++i) + if (shapedType.isDynamicDim(i)) + return true; + return false; + } + // Non-shaped types are considered dynamic + return true; + }); + } + + int64_t *_dynacount = nullptr; +}; + +struct ShapeValidatePass + : public mlir::PassWrapper> +{ + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ShapeValidatePass) + void runOnOperation() override + { + auto f = getOperation(); + + llvm::SmallPtrSet opWorklist; + f.walk([&](mlir::Operation *op) { + if (returnsDynamicShape(op)) + opWorklist.insert(op); + }); + + if (!opWorklist.empty()) + { + f.emitError("Shape validation found node with unknown shape.\n"); + // TODO dump ops when necessary + signalPassFailure(); + } + } + + static bool returnsDynamicShape(Operation *op) + { + return llvm::any_of(op->getResultTypes(), [](Type resultType) { + if (ShapedType shapedType = resultType.dyn_cast()) + { + int rank = shapedType.getRank(); + for (int i = 0; i < rank; ++i) + if (shapedType.isDynamicDim(i)) + return true; + } + return false; + }); + } +}; + +struct AnyShapeValidatePass + : public mlir::PassWrapper> +{ + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AnyShapeValidatePass) + void runOnOperation() override + { + auto f = getOperation(); + + llvm::SmallPtrSet opWorklist; + f.walk([&](mlir::Operation *op) { + if (returnsFullDynamicShape(op)) + opWorklist.insert(op); + }); + + if (!opWorklist.empty()) + { + f.emitError("Shape validation found node with full dynamic shape.\n"); + // TODO dump ops when necessary + signalPassFailure(); + } + } + + static bool returnsFullDynamicShape(Operation *op) + { + return llvm::any_of(op->getResultTypes(), [](Type resultType) { + if (ShapedType shapedType = resultType.dyn_cast()) + { + int rank = shapedType.getRank(); + for (int i = 0; i < rank; ++i) + if (not shapedType.isDynamicDim(i)) + return false; + return true; + } + return false; + }); + } +}; + +// Create a Shape Inference pass. +std::unique_ptr CreateShapeInferencePass(int64_t &dynaCount) +{ + auto inst = std::make_unique(); + inst->_dynacount = &dynaCount; + return inst; +} + +std::unique_ptr CreateShapeValidatePass(void) +{ + return std::make_unique(); +} + +// test helper to check input model having output with all dynamic dim +// if output has any static dim, it is success +std::unique_ptr CreateDynaShapeValidatePass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h new file mode 100644 index 00000000000..e833e1d0430 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_SHAPE_INFERENCE_PASS_H__ +#define __CIRCLE_MLIR_PASS_SHAPE_INFERENCE_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr CreateShapeInferencePass(int64_t &dynaCount); +std::unique_ptr CreateShapeValidatePass(void); +std::unique_ptr CreateDynaShapeValidatePass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_SHAPE_INFERENCE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h b/circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h new file mode 100644 index 00000000000..90914a2ac26 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_ONNX_COMPACT_RESHAPE_CONV_RESHAPE_H__ +#define __CIRCLE_MLIR_PASS_ONNX_COMPACT_RESHAPE_CONV_RESHAPE_H__ + +#include "ConvertHelper.h" + +namespace mlir +{ +namespace Circle +{ + +namespace +{ + +// check if values are rank 3 1N1 +int64_t check_R3_1N1(std::vector &values) +{ + if (values.size() == 3) + { + if (values[0] == 1 && values[2] == 1) + return values[1]; + } + return 0; +} + +// check if values are rank 4 1N11 +int64_t check_R4_1N11(std::vector &values) +{ + if (values.size() == 4) + { + if (values[0] == 1 && values[2] == 1 && values[3] == 1) + return values[1]; + } + return 0; +} + +mlir::ArrayAttr duplicate(mlir::PatternRewriter &rewriter, mlir::ArrayAttr input) +{ + mlir::SmallVector temp_v; + size_t size = input.size(); + for (size_t i = 0; i < size; i++) + { + auto val = input[i].dyn_cast().getInt(); + temp_v.push_back(val); + temp_v.push_back(val); + } + return rewriter.getI64ArrayAttr(temp_v); +} + +} // namespace + +// Find sequence with I/O shape of 1N11 -> 1N1 -> 1M1 -> 1M11 +// (1N11)- ONNXReshape -(1N1)- ONNXConv -(1M1)- ONNXReshape -(1M11) +// Relace with with I/O shape of 1N11 -> 1M11 +// (1N11)- ONNXConv -(1M11) +// NOTE +// ShuffleFaceNet end part has this sequence. +// onnx-tf does like this. +// don't know why ShuffleFaceNet creates with torch.nn.Conv1d(); +struct CompactReshapeConvReshape : public OpRewritePattern +{ + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult matchAndRewrite(mlir::ONNXReshapeOp reshape2_op, + mlir::PatternRewriter &rewriter) const override + { + // check Conv-Reshape sequence with shape + mlir::Operation *is_conv = reshape2_op.getOperand(0).getDefiningOp(); + mlir::Operation *is_const_r2 = reshape2_op.getOperand(1).getDefiningOp(); + bool bis_conv = mlir::isa_and_nonnull(is_conv); + bool bis_const = mlir::isa_and_nonnull(is_const_r2); + if (!bis_conv || !bis_const) + return mlir::failure(); + + // check if 'shape' value is 1N11 + mlir::Value const_r2_op = cast(is_const_r2); + std::vector shape_values_r2; + if (!ExtractConstantValues(const_r2_op, shape_values_r2)) + return mlir::failure(); + if (check_R4_1N11(shape_values_r2) == 0) + return mlir::failure(); + + auto conv_op = cast(is_conv); + + // check Reshape-Conv sequence + mlir::Operation *is_reshape = conv_op.getOperand(0).getDefiningOp(); + bool bis_reshape = mlir::isa_and_nonnull(is_reshape); + if (!bis_reshape) + return mlir::failure(); + + // check Reshape shape is constant and 1N1 + auto reshape1_op = cast(is_reshape); + mlir::Operation *is_const_r1 = reshape1_op.getOperand(1).getDefiningOp(); + bis_const = mlir::isa_and_nonnull(is_const_r1); + if (!bis_const) + return mlir::failure(); + mlir::Value const_r1_op = cast(is_const_r1); + std::vector shape_values_r1; + if (!ExtractConstantValues(const_r1_op, shape_values_r1)) + return mlir::failure(); + if (check_R3_1N1(shape_values_r1) == 0) + return mlir::failure(); + + // Get Conv-weight, check shape is OI1, and create new weight with OI11 + mlir::Operation *is_conv_w = conv_op.getOperand(1).getDefiningOp(); + bis_const = mlir::isa_and_nonnull(is_conv_w); + if (!bis_const) + return mlir::failure(); + mlir::Value const_w_op = cast(is_conv_w); + auto w_type = const_w_op.getType().dyn_cast_or_null(); + if (!w_type.getElementType().isF32()) + return mlir::failure(); + auto w_shape = w_type.getShape(); + if (w_shape.size() != 3) + return mlir::failure(); + + // Now, op sequence and shape values match + + // Create weight with 4D OI11 + std::vector weight_values; + if (!ExtractConstantValues(const_w_op, weight_values)) + return mlir::failure(); + + int64_t w_s_O = w_shape[0]; + int64_t w_s_I = w_shape[1]; + int64_t w_s_2 = w_shape[2]; + auto w_rttype = + mlir::RankedTensorType::get({w_s_O, w_s_I, w_s_2, w_s_2}, rewriter.getF32Type()); + mlir::Location opLoc = const_w_op.getLoc(); + mlir::Attribute empty_sparse; + mlir::Attribute attr_value = + mlir::DenseElementsAttr::get(w_rttype, llvm::ArrayRef(weight_values)); + mlir::Value new_kernel = rewriter.create(opLoc, empty_sparse, attr_value); + + // Get input of first Reshape to be used for input of New Conv + mlir::Value bias = conv_op.getOperand(2); + mlir::Value input_r1_op = reshape1_op.getOperand(0); + + // some attributes needs suplicate in size to match 1D -> 2D + mlir::StringAttr auto_pad = conv_op.getAutoPadAttr(); + mlir::ArrayAttr dilations = duplicate(rewriter, conv_op.getDilationsAttr()); + mlir::IntegerAttr group = conv_op.getGroupAttr(); + mlir::ArrayAttr kernel_shape = duplicate(rewriter, conv_op.getKernelShapeAttr()); + mlir::ArrayAttr pads = duplicate(rewriter, conv_op.getPadsAttr()); + mlir::ArrayAttr strides = duplicate(rewriter, conv_op.getStridesAttr()); + + rewriter.replaceOpWithNewOp(reshape2_op, reshape2_op.getType(), input_r1_op, + new_kernel, bias, auto_pad, dilations, group, + kernel_shape, pads, strides); + + return mlir::success(); + } +}; + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_ONNX_COMPACT_RESHAPE_CONV_RESHAPE_H__ diff --git a/circle-mlir/circle-mlir/tools/CMakeLists.txt b/circle-mlir/circle-mlir/tools/CMakeLists.txt new file mode 100644 index 00000000000..144cba418d4 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(onnx2circle) diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt b/circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt new file mode 100644 index 00000000000..3e0e5c0de8b --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt @@ -0,0 +1,45 @@ +set(SRC + src/onnx2circle.cpp +) + +string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) +if(BUILD_TYPE_LOWER STREQUAL "debug") + list(APPEND SRC src/driverDebug.cpp) +else() + list(APPEND SRC src/driverRelease.cpp) + set(RELEASE_BUILD ON) +endif() + +add_executable(onnx2circle ${SRC}) +cir_mlir_static_flags(onnx2circle) +cir_onnx_static_flags(onnx2circle) +cir_onnx_tools_flags(onnx2circle) +target_link_libraries(onnx2circle PUBLIC cirmlir_dialect) +target_link_libraries(onnx2circle PUBLIC cirmlir_pass) +target_link_libraries(onnx2circle PUBLIC cirmlir_export) +target_link_libraries(onnx2circle PUBLIC cirmlir_coverage) +if(RELEASE_BUILD) + target_link_libraries(onnx2circle PUBLIC arser) +endif() + +install(TARGETS onnx2circle DESTINATION bin) + +if(NOT ENABLE_TEST) + return() +endif() + +include(TestModels.cmake) + +set(SRC_TEST + src/onnx2circle.cpp + src/onnx2circle.test.cpp +) + +GTest_AddTest_Public(onnx2circle_test ${SRC_TEST}) +cir_mlir_static_flags(onnx2circle_test) +cir_onnx_static_flags(onnx2circle_test) +cir_onnx_tools_flags(onnx2circle_test) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_dialect) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_pass) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_export) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_coverage) diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake b/circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake new file mode 100644 index 00000000000..a359afd379f --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake @@ -0,0 +1,113 @@ +set(FILE_DEPS ) + +# ConvertUnitModel used in test.lst +set(TEST_MODELS ) +macro(ConvertUnitModel MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND TEST_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ConvertUnitModel) + +set(TEST_NEG_MODELS ) +macro(ConvertUnitModelNEG MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND TEST_NEG_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ConvertUnitModelNEG) + +set(VALIDATE_SHAPEINF_MODELS) +macro(ValidateShapeInf MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND VALIDATE_SHAPEINF_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ValidateShapeInf) + +# ValidateDynaShapeInf is to test output should have dynamic shape from dynamic shape input +set(VALIDATE_DYNASHAPEINF_MODELS) +macro(ValidateDynaShapeInf MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND VALIDATE_DYNASHAPEINF_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ValidateDynaShapeInf) + +# Read "test.lst" +include("test.lst") + +add_custom_target(onnx2circle_deps ALL DEPENDS ${FILE_DEPS}) + +foreach(MODEL IN ITEMS ${TEST_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_test_${MODEL} + COMMAND "$" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) +endforeach() + +foreach(MODEL IN ITEMS ${TEST_NEG_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_neg_test_${MODEL} + COMMAND "$" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) + set_tests_properties(onnx2circle_neg_test_${MODEL} PROPERTIES WILL_FAIL TRUE) +endforeach() + +foreach(MODEL IN ITEMS ${VALIDATE_SHAPEINF_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_valshapeinf_${MODEL} + COMMAND "$" "--check_shapeinf" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) +endforeach() + +foreach(MODEL IN ITEMS ${VALIDATE_DYNASHAPEINF_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_valdynshapeinf_${MODEL} + COMMAND "$" "--check_dynshapeinf" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) +endforeach() diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h b/circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h new file mode 100644 index 00000000000..8df741e4583 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMD_OPTIONS_H__ +#define __CMD_OPTIONS_H__ + +namespace opts +{ + +inline const char *__opt_save_ops = "Save operators list instead of .circle "; +inline const char *__opt_unroll_rnn_d = "Unroll RNN Op if exist"; +inline const char *__opt_unroll_lstm_d = "Unroll LSTM Op if exist"; +inline const char *__opt_edbuf_d = "Tensorflow experimental_disable_batchmatmul_unfold"; +inline const char *__opt_keep_io_order_d = "Rename I/O to match order (obsolete)"; +inline const char *__opt_save_int_d = "Save intermediate files (obsolete)"; +inline const char *__opt_check_shapeinf = "Validate shape inference"; +inline const char *__opt_check_dynshapeinf = "Validate dynamic shape inference"; + +} // namespace opts + +#endif // __CMD_OPTIONS_H__ diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp new file mode 100644 index 00000000000..35c570cdbf1 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onnx2circle.h" +#include "cmdOptions.h" + +#include +#include +#include + +#define DEBUG_TYPE "o2c" +#include + +#include +#include + +#include +#include +#include + +namespace opts +{ + +llvm::cl::OptionCategory O2CirCat("onnx2circle options"); +llvm::cl::OptionCategory O2CObsol("obsolete options"); + +static llvm::cl::opt InputFilename(llvm::cl::Positional, llvm::cl::desc(""), + llvm::cl::Required, llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OutputFilename(llvm::cl::Positional, llvm::cl::desc(""), + llvm::cl::Required, llvm::cl::cat(O2CirCat)); + +// Note: If you look at the help desctription in this program, +// it is configured to accept as the third Positional argument +// which is set from the `onnx-mlir` submodule. +// This option is not used in actual work. + +static llvm::cl::opt OptSaveOPS("save_ops", llvm::cl::desc(__opt_save_ops), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt RunSingleInstance("o2c-single", llvm::cl::desc("run single instance"), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptUnrollRNN("unroll_rnn", llvm::cl::desc(__opt_unroll_rnn_d), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptUnrollLSTM("unroll_lstm", llvm::cl::desc(__opt_unroll_lstm_d), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptExpDisBMMUnfold("experimental_disable_batchmatmul_unfold", + llvm::cl::desc(__opt_edbuf_d), llvm::cl::init(false), + llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptKeepIOOrder("keep_io_order", llvm::cl::desc(__opt_keep_io_order_d), + llvm::cl::init(false), llvm::cl::cat(O2CObsol)); + +static llvm::cl::opt OptSaveIntermediate("save_intermediate", + llvm::cl::desc(__opt_save_int_d), + llvm::cl::init(false), llvm::cl::cat(O2CObsol)); + +// shape inference validation +static llvm::cl::opt OptCheckShapeInf("check_shapeinf", llvm::cl::desc(__opt_check_shapeinf), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); +static llvm::cl::opt OptCheckDynShapeInf("check_dynshapeinf", + llvm::cl::desc(__opt_check_dynshapeinf), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +} // namespace opts + +class SingleRun +{ +public: + static void Ensure(void) + { + SingleRun::_lock_fd = -1; + int rc = -1; + int retry = 100; // retry for 10 seconds + + do + { + rc = -1; + SingleRun::_lock_fd = open(_lock_file, O_CREAT | O_RDWR, 0660); + if (_lock_fd > 0) + { + rc = flock(SingleRun::_lock_fd, LOCK_EX | LOCK_NB); + if (rc == 0) + break; + close(SingleRun::_lock_fd); + SingleRun::_lock_fd = -1; + } + usleep(100 * 1000); // wait for 100 msecs + if (--retry < 0) + { + std::cerr << "Failed to SingleRun::Ensure." << std::endl; + break; + } + } while (rc != 0); + } + + static void Release(void) + { + if (SingleRun::_lock_fd > 0) + { + close(_lock_fd); + _lock_fd = -1; + } + if (_lock_file) + { + unlink(_lock_file); + } + } + +private: + static int _lock_fd; + static const char *const _lock_file; +}; + +int SingleRun::_lock_fd = -1; +const char *const SingleRun::_lock_file = "/tmp/onnx2cirlce_run_single.lock"; + +void onexit() { SingleRun::Release(); } + +int main(int argc, char *argv[]) +{ + std::atexit(onexit); + + llvm::cl::ParseCommandLineOptions(argc, argv, ""); + + LLVM_DEBUG({ + llvm::dbgs() << "onnx2circle debug enter\n"; + llvm::dbgs() << "Source model: " << opts::InputFilename << "\n"; + llvm::dbgs() << "Target model: " << opts::OutputFilename << "\n"; + }); + + if (!llvm::sys::fs::exists(opts::InputFilename)) + { + std::cerr << "Source model: " << opts::InputFilename << " not found." << std::endl; + return -1; + } + + if (opts::RunSingleInstance) + SingleRun::Ensure(); + + O2Cparam param; + param.sourcefile = opts::InputFilename; + param.targetfile = opts::OutputFilename; + param.save_ops = opts::OptSaveOPS; + param.unroll_rnn = opts::OptUnrollRNN; + param.unroll_lstm = opts::OptUnrollLSTM; + param.unfold_batchmatmul = !opts::OptExpDisBMMUnfold; + param.check_shapeinf = opts::OptCheckShapeInf; + param.check_dynshapeinf = opts::OptCheckDynShapeInf; + + auto result = entry(param); + LLVM_DEBUG({ llvm::dbgs() << "Conversion done: " << result << "\n"; }); + return result; +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp new file mode 100644 index 00000000000..24cb834bba0 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onnx2circle.h" +#include "cmdOptions.h" + +#include + +#include + +using namespace opts; + +std::string get_copyright(void) +{ + std::string str; + str = "Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved\r\n"; + str += "Licensed under the Apache License, Version 2.0\r\n"; + str += "https://github.sec.samsung.net/one-project/circle-mlir"; + return str; +} + +void print_version(void) +{ + std::cout << "onnx2circle version " << __version << std::endl; + std::cout << get_copyright() << std::endl; +} + +void print_version_only(void) { std::cout << __version; } + +int safe_main(int argc, char *argv[]) +{ + arser::Arser arser; + + arser::Helper::add_version(arser, print_version); + + arser.add_argument("--version_only") + .nargs(0) + .required(false) + .default_value(false) + .help("Show version number only and exit") + .exit_with(print_version_only); + + arser.add_argument("--save_ops") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_save_ops); + + arser.add_argument("--unroll_rnn") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_unroll_rnn_d); + + arser.add_argument("--unroll_lstm") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_unroll_lstm_d); + + arser.add_argument("--experimental_disable_batchmatmul_unfold") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_edbuf_d); + + // ignored obsolete options + arser.add_argument("--keep_io_order") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_keep_io_order_d); + + arser.add_argument("--save_intermediate") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_save_int_d); + + arser.add_argument("--check_shapeinf") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_check_shapeinf); + + arser.add_argument("--check_dynshapeinf") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_check_dynshapeinf); + + // two positional arguments + arser.add_argument("onnx").help("Input ONNX file"); + arser.add_argument("circle").help("Output Circle file"); + + arser.parse(argc, argv); + + O2Cparam param; + param.sourcefile = arser.get("onnx"); + param.targetfile = arser.get("circle"); + param.save_ops = arser.get("--save_ops"); + param.unroll_rnn = arser.get("--unroll_rnn"); + param.unroll_lstm = arser.get("--unroll_lstm"); + param.unfold_batchmatmul = !arser.get("--experimental_disable_batchmatmul_unfold"); + param.check_shapeinf = arser.get("--check_shapeinf"); + param.check_dynshapeinf = arser.get("--check_dynshapeinf"); + + return entry(param); +} + +int main(int argc, char *argv[]) +{ + try + { + return safe_main(argc, argv); + } + catch (const std::exception &err) + { + std::cout << err.what() << '\n'; + } + return -1; +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp new file mode 100644 index 00000000000..a2cb0ee8382 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019-2022 The IBM Research Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onnx2circle.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// ONNX-MLIR +#include +#include +#include + +// CIRCLE-MLIR +#include +#include +#include + +#include +#include + +namespace onnx2circle +{ + +// from ONNX-MLIR src/Compiler/CompilerUtils.cpp +std::string dirName(llvm::StringRef inputFilename) +{ + llvm::SmallVector path(inputFilename.begin(), inputFilename.end()); + llvm::sys::path::remove_filename(path); + return std::string(path.data(), path.size()); +} + +// from ONNX-MLIR src/Compiler/CompilerUtils.cpp +void registerDialects(mlir::MLIRContext &context) +{ + context.getOrLoadDialect(); + + context.getOrLoadDialect(); + context.getOrLoadDialect(); +} + +int loadONNX(const std::string &onnx_path, mlir::MLIRContext &context, + mlir::OwningOpRef &module) +{ + llvm::StringRef inputFilename(onnx_path); + std::string errorMessage; + if (inputFilename.endswith(".mlir")) + { + auto input = mlir::openInputFile(inputFilename, &errorMessage); + if (!input) + { + llvm::errs() << errorMessage << "\n"; + llvm::errs().flush(); + return -1; + } + + // Parse the input mlir. + llvm::SourceMgr sourceMgr; + mlir::SourceMgrDiagnosticHandler sourceMgrHandler(sourceMgr, &context); + sourceMgr.AddNewSourceBuffer(std::move(input), llvm::SMLoc()); + module = mlir::parseSourceFile(sourceMgr, &context); + if (!module) + { + llvm::errs() << "Error can't load file " << inputFilename << "\n"; + llvm::errs().flush(); + return -1; + } + } + else if (inputFilename.endswith(".onnx")) + { + onnx_mlir::ImportOptions options; + options.useOnnxModelTypes = onnx_mlir::useOnnxModelTypes; + options.invokeOnnxVersionConverter = onnx_mlir::invokeOnnxVersionConverter; + options.shapeInformation = onnx_mlir::shapeInformation; + options.allowSorting = onnx_mlir::allowSorting; + options.externalDataDir = dirName(inputFilename); + + int rc = + onnx_mlir::ImportFrontendModelFile(inputFilename, context, module, &errorMessage, options); + if (rc != onnx_mlir::CompilerSuccess) + { + llvm::errs() << "Error can't load file " << inputFilename << "\n"; + llvm::errs() << errorMessage << "\n"; + llvm::errs().flush(); + return -1; + } + } + else + { + llvm::errs() << "Unknown model file extension.\n"; + llvm::errs().flush(); + return -1; + } + + return 0; +} + +int convertToCircle(const O2Cparam ¶m) +{ + const std::string &sourcefile = param.sourcefile; + const std::string &targetfile = param.targetfile; + + mlir::MLIRContext context; + registerDialects(context); + + mlir::OwningOpRef module; + auto result = loadONNX(sourcefile, context, module); + if (result != 0) + return result; + + result = mlir::Circle::preprocessONNX(context, module); + if (result != 0) + return result; + + result = mlir::Circle::shapeInferenceONNX(context, module); + if (result != 0) + return result; + + result = mlir::Circle::convertToCircle(context, module); + if (result != 0) + return result; + + result = mlir::Circle::postProcessCircle(context, module); + if (result != 0) + return result; + + if (param.check_shapeinf) + { + result = mlir::Circle::shapeValidateCircle(context, module); + if (result != 0) + return result; + } + if (param.check_dynshapeinf) + { + // output should have any static shape from dynamic input + result = mlir::Circle::dynaShapeValidateCircle(context, module); + if (result != 0) + return result; + } + + std::string error_msg; + if (param.save_ops) + { + std::string output_filename = targetfile + ".ops"; + auto output = mlir::openOutputFile(output_filename, &error_msg); + if (!error_msg.empty()) + { + llvm::errs() << "Failed: " << error_msg << "\n"; + return -1; + } + result = mlir::Circle::dumpCircleOps(output->os(), context, module); + if (result == 0) + output->keep(); + + return result; + } + + std::string serialized_flatbuffer; + if (!mlir::Circle::MlirToFlatBufferTranslateFunction(module.get(), &serialized_flatbuffer)) + return -1; + auto output = mlir::openOutputFile(targetfile, &error_msg); + // TODO error handle + output->os() << serialized_flatbuffer; + output->keep(); + + return 0; +} + +} // namespace onnx2circle + +// NOTE sync version number with 'infra/debian/*/changelog' for upgrade +const char *__version = "0.2.0"; + +int entry(const O2Cparam ¶m) +{ + int result = onnx2circle::convertToCircle(param); + return result; +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h new file mode 100644 index 00000000000..8fc7f6f82ed --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONNX2CIRCLE_H__ +#define __ONNX2CIRCLE_H__ + +#include + +struct O2Cparam +{ + std::string sourcefile; + std::string targetfile; + + bool save_ops = false; + bool unroll_rnn = false; + bool unroll_lstm = false; + bool unfold_batchmatmul = false; + bool check_shapeinf = false; + bool check_dynshapeinf = false; + // TODO add more if necessary +}; + +extern const char *__version; + +int entry(const O2Cparam ¶m); + +#endif // __ONNX2CIRCLE_H__ diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp new file mode 100644 index 00000000000..c5ed732207b --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +// delcare methods of onnx2circle.cpp to test +namespace onnx2circle +{ + +int loadONNX(const std::string &onnx_path, mlir::MLIRContext &context, + mlir::OwningOpRef &module); + +} // namespace onnx2circle + +#include + +TEST(LoadONNXTest, NonExistFile_NEG) +{ + mlir::MLIRContext context; + mlir::OwningOpRef module; + + std::string invalid_filename = "/no_such_folder/no_such_file_in_storage.mlir"; + auto result = onnx2circle::loadONNX(invalid_filename, context, module); + ASSERT_NE(0, result); + + invalid_filename = "/no_such_folder/no_such_file_in_storage.onnx"; + result = onnx2circle::loadONNX(invalid_filename, context, module); + ASSERT_NE(0, result); +} + +TEST(LoadONNXTest, NotSupportedExtension_NEG) +{ + std::string invalid_filename = "somefile.blabla"; + + mlir::MLIRContext context; + mlir::OwningOpRef module; + + auto result = onnx2circle::loadONNX(invalid_filename, context, module); + ASSERT_NE(0, result); +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/test.lst b/circle-mlir/circle-mlir/tools/onnx2circle/test.lst new file mode 100644 index 00000000000..627d96bd957 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/test.lst @@ -0,0 +1,4 @@ +## EXAMPLE +# +# ConvertUnitModel(test_mode.mlir) +# diff --git a/circle-mlir/externals/CMakeLists.txt b/circle-mlir/externals/CMakeLists.txt index 5bc5d81bfd7..ddf08ae6edb 100644 --- a/circle-mlir/externals/CMakeLists.txt +++ b/circle-mlir/externals/CMakeLists.txt @@ -15,9 +15,55 @@ if(NOT CMAKE_BUILD_TYPE) endif(NOT CMAKE_BUILD_TYPE) message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") +# NOTE: ld.gold has some problem with pybind11 +option(CIRCLE_MLIR_USE_GOLD "Use ld.gold linker for llvm-project" OFF) +option(CIRCLE_MLIR_USE_CLANG "Use clang and lld for llvm-project and onnx-mlir" OFF) + include(ExternalProject) -set(EXTERNALS_BUILD_INST_DIR ${CMAKE_BINARY_DIR}) +if(DEFINED ENV{CIRCLE_MLIR_LOCALINST}) + set(CIRCLE_MLIR_LOCALINST $ENV{CIRCLE_MLIR_LOCALINST}) +endif() + +if(DEFINED ENV{CIRCLE_MLIR_USE_GOLD}) + set(CIRCLE_MLIR_USE_GOLD ON) +endif() + +if(DEFINED ENV{CIRCLE_MLIR_USE_CLANG}) + set(CIRCLE_MLIR_USE_CLANG ON) +endif() + +if(CIRCLE_MLIR_USE_GOLD AND CIRCLE_MLIR_USE_CLANG) + message(FATAL_ERROR "CIRCLE_MLIR_USE_GOLD and CIRCLE_MLIR_USE_CLANG are exclusive option.") +endif() + +# Use gcc + gold +if(CIRCLE_MLIR_USE_GOLD) + set(CIRCLE_MLIR_GOLD_OPTION -DCMAKE_EXE_LINKER_FLAGS='-Wl,-no-keep-memory,-fuse-ld=gold' -DLLVM_USE_LINKER=gold) + message(STATUS "Use linker ld.gold: ${CIRCLE_MLIR_GOLD_OPTION}") +endif() + +# Use clang + lld +if(CIRCLE_MLIR_USE_CLANG) + find_program(CLANG_COMPILER clang) + if(NOT EXISTS ${CLANG_COMPILER}) + message(FATAL_ERROR "Clang is not available.") + endif() + find_program(LLD_LINKER lld) + if(NOT EXISTS ${LLD_LINKER}) + message(FATAL_ERROR "LLD is not available.") + endif() + set(CIRCLE_MLIR_CLANG_OPTION -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++) + set(CIRCLE_MLIR_CLANG_OPTION ${CIRCLE_MLIR_CLANG_OPTION} -DLLVM_ENABLE_LLD=ON) + message(STATUS "Use Clang and LLD linker: ${CIRCLE_MLIR_CLANG_OPTION}") +endif() + +if(CIRCLE_MLIR_LOCALINST) + message(STATUS "CIRCLE_MLIR_LOCALINST=${CIRCLE_MLIR_LOCALINST}") + set(EXTERNALS_BUILD_INST_DIR ${CIRCLE_MLIR_LOCALINST}) +else() + set(EXTERNALS_BUILD_INST_DIR ${CMAKE_BINARY_DIR}) +endif() set(FB_BUILD_DIR "${EXTERNALS_BUILD_INST_DIR}/flatbuffers-build") set(FB_INSTALL_DIR "${EXTERNALS_BUILD_INST_DIR}/flatbuffers-install") @@ -66,8 +112,14 @@ ExternalProject_Add(externals-llvm-project INSTALL_DIR "${LP_INSTALL_DIR}" CMAKE_ARGS -DLLVM_ENABLE_PROJECTS=mlir -DLLVM_TARGETS_TO_BUILD=host + -DLLVM_ENABLE_TERMINFO=OFF + -DLLVM_ENABLE_ZLIB=OFF + -DLLVM_ENABLE_ZSTD=OFF + -DLLVM_INCLUDE_TESTS=OFF -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX='${LP_INSTALL_DIR}' + ${CIRCLE_MLIR_GOLD_OPTION} + ${CIRCLE_MLIR_CLANG_OPTION} -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_RTTI=ON # NOTE externals-flatbuffers externals-abseil-cpp are added to DEPENDS only @@ -105,6 +157,7 @@ ExternalProject_Add(externals-onnx-mlir CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH='${CMAKE_PREFIX_PATH}' -DCMAKE_INSTALL_PREFIX='${OM_INSTALL_DIR}' + ${CIRCLE_MLIR_CLANG_OPTION} -DMLIR_DIR='${LP_BUILD_DIR}/lib/cmake/mlir' -DPython3_ROOT_DIR='$ENV{Python3_ROOT_DIR}' -DONNX_MLIR_BUILD_TESTS=OFF