diff --git a/.github/workflows/run-circle-mlir-build.yml b/.github/workflows/run-circle-mlir-build.yml index fdeb27709d7..5585a895ede 100644 --- a/.github/workflows/run-circle-mlir-build.yml +++ b/.github/workflows/run-circle-mlir-build.yml @@ -35,6 +35,7 @@ jobs: include: - ubuntu_code: jammy ubuntu_vstr: u2204 + one_comp_ver: 1.29.0 runs-on: ubuntu-latest @@ -45,11 +46,19 @@ jobs: name: circle-mlir ${{ matrix.ubuntu_vstr }} ${{ matrix.type }} test steps: + # TODO prepare circle-interpreter Debian package and install + - name: Install one-compiler + run: | + cd /var/tmp + ONE_COMPILER=one-compiler-${{ matrix.ubuntu_code }}_${{ matrix.one_comp_ver }}_amd64.deb + wget https://github.com/Samsung/ONE/releases/download/${{ matrix.one_comp_ver }}/${ONE_COMPILER} + ls -al . + dpkg -i ${ONE_COMPILER} + ls -al /usr/share/one/bin + - name: Checkout uses: actions/checkout@v4 - # TODO download circle-interpreter - # NOTE Docker image has pre-installed submodules in /workdir # NOTE Docker image has pre-installed python packages - name: Configure diff --git a/circle-mlir/Makefile.aa b/circle-mlir/Makefile.aa new file mode 100644 index 00000000000..f1768e015b9 --- /dev/null +++ b/circle-mlir/Makefile.aa @@ -0,0 +1,22 @@ +all: cfg debug test install + +cfg: + Python3_ROOT_DIR=/usr/bin cmake -B build/debug -S ./ \ + -DCMAKE_INSTALL_PREFIX=build/debug.install \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCIRCLE_MLIR_WORKDIR=/workdir + +cc: + Python3_ROOT_DIR=/usr/bin cmake -B build/debug -S ./ \ + -DCMAKE_INSTALL_PREFIX=build/debug.install \ + -DCMAKE_BUILD_TYPE=Debug + +debug: + cmake --build build/debug -j4 + +test: + CTEST_OUTPUT_ON_FAILURE=1 cmake --build build/debug --verbose -- test + +install: + cmake --build build/debug -j4 -- install + diff --git a/circle-mlir/Makefile.sample b/circle-mlir/Makefile.sample index 4b12f2aee34..0c77cc7b856 100644 --- a/circle-mlir/Makefile.sample +++ b/circle-mlir/Makefile.sample @@ -15,8 +15,12 @@ endif # TODO error handle if not found PYTHON3_PATH=$(shell dirname $(PYTHON3_CMD)) +# NOTE CIRCLEM_LIR_XXX is used for CMakeLists +# CIRCLEMLIR_XXX is used in this Makefile + CIRCLEMLIR_BUILD_DEBUG?=build/debug CIRCLEMLIR_BUILD_REL?=build/release +CIRCLEMLIR_BUILD_COV?=build/coverage CIRCLEMLIR_EXTS_DEBUG?=build/externals/debug CIRCLEMLIR_EXTS_REL?=build/externals/release @@ -40,7 +44,16 @@ help: @echo "make prepr : prepare externals for release (needed only once)" @echo "make cfgr : configure circle-mlir for release build" @echo "make rel : build for release" + @echo "make prepcov : prepare submodules for coverage test (needed only once)" + @echo "make cfgcov : configure circle-mlir for debug build with coverage test" + @echo "make debugcov : build for test coverage" + @echo "make testcov : run coverage test" + @echo "make gencov : generate test coverage report" + @echo "make cleancov : clean test coverage build" @echo "make testr : test for release" + @echo "make cfgdi : configure circle-mlir for debug build in Docker image" + @echo "make cfgcovdi : configure circle-mlir for debug build with coverage test in Docker image" + @echo "make cfgri : configure circle-mlir for release build in Docker image" @echo "make cleanr : clean release build" @echo "make cleanall : clean all build including overlay, externals" @@ -86,6 +99,38 @@ clean: rm -f $(CIRCLEMLIR_BUILD_DEBUG)/CMakeCache.txt rm -rf $(CIRCLEMLIR_BUILD_DEBUG)/circle-mlir/ +#------------------------------------------------------------------------------- +# for debug test coverage + +prepcov: _mkbuildcov + Python3_ROOT_DIR=$(CIRCLEMLIR_PY3_ROOT) \ + cmake -B $(CIRCLEMLIR_EXTS_DEBUG) -S ./externals -DCMAKE_BUILD_TYPE=Release + cmake --build $(CIRCLEMLIR_EXTS_DEBUG) -j$(CIRCLEMLIR_BUILD_JOBS) + +cfgcov: _mkbuildcov + cmake -B $(CIRCLEMLIR_BUILD_COV) -S ./ \ + -DCIRCLE_MLIR_EXTERNALS=$(CIRCLEMLIR_EXTS_DEBUG) \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DENABLE_COVERAGE=ON + +debugcov: + CM_PASS_DUMP=2 \ + cmake --build $(CIRCLEMLIR_BUILD_COV) -j$(CIRCLEMLIR_BUILD_JOBS) + +# NOTE to configure in Docker, use "make cfgcovdi" + +testcov: + CM_PASS_DUMP=2 \ + CTEST_OUTPUT_ON_FAILURE=1 \ + cmake --build $(CIRCLEMLIR_BUILD_COV) --verbose -- test + +gencov: + bash infra/tools/gen-coverage-report circle-mlir + +cleancov: + rm -f $(CIRCLEMLIR_BUILD_COV)/CMakeCache.txt + rm -rf $(CIRCLEMLIR_BUILD_COV)/circle-mlir/ + #------------------------------------------------------------------------------- # for release @@ -109,6 +154,33 @@ cleanr: rm -f $(CIRCLEMLIR_BUILD_REL)/CMakeCache.txt rm -rf $(CIRCLEMLIR_BUILD_REL)/circle-mlir/ +#------------------------------------------------------------------------------- +# for debug build in Docker +# +# no need to make for overlay, prep as prepared in Docker image +# run make for 'cfgdi' +# then make for 'debug', 'test' + +cfgdi: _mkbuild + cmake -B $(CIRCLEMLIR_BUILD_DEBUG) -S ./ \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCIRCLE_MLIR_WORKDIR=/workdir + +# for test converage build in Docker +cfgcovdi: _mkbuildcov + cmake -B $(CIRCLEMLIR_BUILD_COV) -S ./ \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DCIRCLE_MLIR_WORKDIR=/workdir \ + -DENABLE_COVERAGE=ON + +# for release build in Docker + +cfgri: _mkbuild + cmake -B $(CIRCLEMLIR_BUILD_REL) -S ./ \ + -DONNX2CIRCLE_TEST_MODELS_SINGLE=ON \ + -DCMAKE_BUILD_TYPE=Release \ + -DCIRCLE_MLIR_WORKDIR=/workdir #------------------------------------------------------------------------------- diff --git a/circle-mlir/README.md b/circle-mlir/README.md index 6fd75e5dda5..e5671fa04ac 100644 --- a/circle-mlir/README.md +++ b/circle-mlir/README.md @@ -1,3 +1,175 @@ # circle-mlir Circle MLIR dialect and tools + +## Tools provided + +_onnx2circle_ +- conversion tool of ONNX to Circle model for `compiler` +- to replace not-maintained-anymore onnx-tensorflow package + +## How to build + +Use provided `Makefile.sample` or create your own `Makefile` +``` +ln -s Makefile.sample Makefile +``` +- `Makefile` is in `.gitignore` to let developers use own Makefile. + +### Prerequisite + +``` +sudo apt-get install build-essential cmake git fakeroot +sudo apt-get install autoconf automake libtool unzip wget +sudo apt-get install devscripts debmake debhelper lcov +sudo apt-get install python3 python3-pip python3-venv python3-dev python3-all dh-python + +python3 -m pip install --upgrade pip setuptools +python3 -m pip install yapf==0.43.0 numpy==1.26.4 h5py==3.8.0 einops +``` + +### Prepare externals + +### Debug build + +Prepare overlay +``` +make overlay +``` + +Build submodules in venv +``` +source infra/overlay/venv/bin/activate +make prep +``` +NOTE `llvm-project` is built as `Debug` which may require 32G or more RAM. +- if build fails for some reason, please change back to + `-DCMAKE_BUILD_TYPE=Release` in `prep:` target in `Makefile.sample` file. +- build and test needs venv python packages. + +NOTE `overlay` and `submodules` builds are needed only once. + +Configure and build +``` +make cfg +make debug +``` + +Test build +``` +make test +``` +- optionally, set `ONE_COMPILER_ROOT` to alternate PATH for local ONE build + ``` + ONE_COMPILER_ROOT=/home/user/one/build/install make test + ``` + +To clean up existing build results +``` +make clean +``` + +To clean up also `overlay` and `submodules` +``` +make cleanall +``` +- NOTE when using `CIRCLE_MLIR_LOCALINST`, need to manually clean up this folder + +### Release build + +Release build is available as follows. +Others not mentioned are same as above Debug build. + +Build submodules in venv +``` +source infra/overlay/venv/bin/activate +make prepr +deactivate +``` + +Configure and build +``` +make cfgr +make rel +``` + +Test build +``` +make testr +``` + +### Test coverage + +To get test coverage report, run as following commands. +- assume you already have done `make overlay` and `make prepcov` +- you can skip `make prepcov` step if you are using local installation with `CIRCLE_MLIR_LOCALINST` +- or you can reuse `CIRCLE_MLIR_LOCALINST` for existing debug or release build submodules with +`cfgcov` target such as `CIRCLE_MLIR_LOCALINST=$(pwd)/build/debug/submodules make cfgcov` +``` +source infra/overlay/venv/bin/activate +make cfgcov +deactivate + +make debugcov +make testcov +make gencov +``` + +Open `converage/html/index.html` file in web browser to see the reports. + +To generate from second run and so on in your local machine, you will have to +remove existing files before running `gencov` +``` +rm -rf coverage +make gencov +``` + +To run this with Docker image, use `cfgcovdi` target instead of `cfgcov`. +``` +make cfgcovdi +make debugcov +make testcov +make gencov +``` + + +## Local format check + +Install prerequiste package. +``` +sudo apt-get install clang-format-12 python3 python3-pip +python3 -m pip install yapf==0.32.0 +``` + +Run format checker. +``` +bash ./infra/tools/format +``` +or with `Makefile` from `Makefile.sample` +``` +make format +``` + +## Dump debug logs + +To see logs during conversion with `onnx2circle` tool, set `CM_PASS_DUMP=1` for +preprocessing ONNX and ONNX to circle conversion, or set `CM_PASS_DUMP=2` to see +additional logs for circle rewrite. + +``` +CM_PASS_DUMP=2 onnx2circle input.onnx output.circle +``` + +You can give `-debug` option to see general MLIR logs or `-debug-only=o2c` +option to see only logs from onnx2circle. + +``` +onnx2circle -debug-only=o2c input.onnx output.circle +``` + +## TensorFlow source code + +Some source codes are referenced from TensorFlow and the file path is added to +inside our source. + +Current codes are from `v2.12.1` tag. diff --git a/circle-mlir/circle-mlir/CMakeLists.txt b/circle-mlir/circle-mlir/CMakeLists.txt index 53c54c9e95e..4091ef8746b 100644 --- a/circle-mlir/circle-mlir/CMakeLists.txt +++ b/circle-mlir/circle-mlir/CMakeLists.txt @@ -2,4 +2,5 @@ include(UseMLIR) include(UseAbseil) add_subdirectory(lib) +add_subdirectory(tools) add_subdirectory(tools-test) diff --git a/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td b/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td index 1c0989feec7..d2f07f977ca 100644 --- a/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td +++ b/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td @@ -296,6 +296,44 @@ class CIR_ConvOp($_op))">>, + ResultsBroadcastableShape, + DeclareOpInterfaceMethods, + Pure, + Commutative, + // TODO enable QuantizableResult, + ]> { + let summary = "Addition operator"; + + let description = [{ + Element-wise addition operation. + }]; + + let arguments = ( + // TODO add more dtypes + ins CIR_TensorOf<[F32, I32, I64]>:$lhs, + CIR_TensorOf<[F32, I32, I64]>:$rhs, + CIR_AFAttr:$fused_activation_function); + + let results = (outs CIR_TensorOf<[F32, I32, I64]>:$output); + + let hasFolder = 1; + + let hasCustomAssemblyFormat = 1; + + let extraClassDefinition = [{ + ParseResult $cppClass::parse(OpAsmParser &parser, OperationState &result) { + return parseOneResultSameOperandTypeOp(parser, result); + } + void $cppClass::print(OpAsmPrinter &p) { + return printOneResultOp(getOperation(), p); + } + }]; + + let hasOptions = 1; +} def CIR_ConstOp : Op bool inferBinShapes(BINOP &op, SmallVector } // namespace -// TODO add AddOp +//===----------------------------------------------------------------------===// +// AddOp +//===----------------------------------------------------------------------===// + +void AddOp::inferShapes() +{ + AddOp op = *this; + SmallVector inferred; + if (!inferBinShapes(op, inferred)) + return; + + auto input0_op = getOperand(0); + auto input0_type = input0_op.getType().cast(); + RankedTensorType inferred_type = RankedTensorType::get(inferred, input0_type.getElementType()); + getResult().setType(inferred_type); +} + +//===----------------------------------------------------------------------===// +// CustomOp +//===----------------------------------------------------------------------===// + +void CustomOp::inferShapes() +{ + CustomOp op = *this; + auto outputs = op.getOutput(); + bool all_static = true; + for (auto output : outputs) + { + auto output_type = output.getType().cast(); + if (not output_type.hasStaticShape()) + { + all_static = false; + break; + } + } + if (all_static) + return; + + if (op.getCustomCode() == "Erf") + { + assert(op.getInput().size() == 1); + assert(op.getOutput().size() == 1); + + auto input_op = getOperand(0); + auto input_type = input_op.getType().cast(); + auto input_shape = input_type.getShape(); + llvm::SmallVector inferred(input_shape.begin(), input_shape.end()); + + dumpShape(op, inferred); + + RankedTensorType inferred_type = RankedTensorType::get(inferred, input_type.getElementType()); + getResult(0).setType(inferred_type); + } +} } // namespace Circle } // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h b/circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h new file mode 100644 index 00000000000..89ea40fe1c9 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/dialect/src/ops/AddOp.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// from tensorflow/compiler/mlir/lite/ir/tfl_ops.cc + +#ifndef __CIRCLE_MLIR_DIALECT_OPS_ADD_OP_H__ +#define __CIRCLE_MLIR_DIALECT_OPS_ADD_OP_H__ + +#include "circle-mlir/dialect/CircleDialect.h" + +namespace mlir +{ +namespace Circle +{ + +// Return true if the given Add operation has the CPU kernel supported shapes. +bool VerifyAddOpShapeConstraints(AddOp op) +{ + auto element_type = getElementTypeOrSelf(op.getOutput().getType()); + + // Allows F32 and I32 outputs when the operands have valid shapes, + // which are broadcastable shapes up to four dimensions or have same shapes. + // TODO support Quantized Type + if (element_type.isF32() || IsI32Type(element_type) || IsI64Type(element_type)) + { + return VerifyOperandsHaveSameShapesOrBroadcastableShape( + /*op=*/op.getOperation(), /*indices=*/ArrayRef{0, 1}, + /*max_bcast_rank=*/4); + } + + return false; +} + +//===----------------------------------------------------------------------===// +// AddOp +//===----------------------------------------------------------------------===// + +OpFoldResult AddOp::fold(FoldAdaptor adaptor) +{ + auto operands = adaptor.getOperands(); + // TODO(b/142478136): Handle fused ops. + if (getFusedActivationFunction() != "NONE") + return {}; + return ConstFoldBinaryOp( + getType(), operands, [](APFloat a, APFloat b) { return a + b; }, + [](APInt a, APInt b) { return a + b; }); +} + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_DIALECT_OPS_ADD_OP_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt b/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt index 587d9091520..030d6f22ef4 100644 --- a/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt +++ b/circle-mlir/circle-mlir/lib/pass/CMakeLists.txt @@ -1,5 +1,12 @@ set(SRC src/CirclePass.cpp + src/ConvertONNXToCirclePass.cpp + src/ConvertHelper.cpp + src/RewriteCirclePass.cpp + src/RewriteONNXPass.cpp + src/DumpCircleOpsPass.cpp + src/RuntimeVerifyPass.cpp + src/ShapeInferencePass.cpp ) add_library(cirmlir_pass STATIC ${SRC}) diff --git a/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h b/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h index 09e6160484a..264d846411b 100644 --- a/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h +++ b/circle-mlir/circle-mlir/lib/pass/include/circle-mlir/pass/CirclePass.h @@ -26,7 +26,14 @@ namespace mlir namespace Circle { +int preprocessONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int shapeInferenceONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module); int convertToCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int postProcessCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int shapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int dynaShapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module); +int dumpCircleOps(llvm::raw_fd_ostream &os, mlir::MLIRContext &context, + mlir::OwningOpRef &module); } // namespace Circle } // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp index 63999342f9a..2595453e3fb 100644 --- a/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp +++ b/circle-mlir/circle-mlir/lib/pass/src/CirclePass.cpp @@ -16,6 +16,13 @@ #include "circle-mlir/pass/CirclePass.h" +#include "ConvertONNXToCirclePass.h" +#include "RewriteCirclePass.h" +#include "RewriteONNXPass.h" +#include "DumpCircleOpsPass.h" +#include "RuntimeVerifyPass.h" +#include "ShapeInferencePass.h" + #include #include @@ -39,6 +46,49 @@ template <> int safecast(const char *s, const int &value) } // namespace +int preprocessONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int dump = safecast(std::getenv("CM_ONNX_DUMP"), 0); + std::function shouldPrintBeforePass; + std::function shouldPrintAfterPass; + shouldPrintBeforePass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + shouldPrintAfterPass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + pm.enableIRPrinting(shouldPrintBeforePass, shouldPrintAfterPass, false, false, false, + llvm::errs()); + + int result = 0; + pm.addNestedPass(onnx_mlir::createDecomposeONNXToONNXPass()); + // Replace ONNXReturnOp with func::ReturnOp. + pm.addPass(onnx_mlir::createStandardFuncReturnPass()); + pm.addPass(createRewriteONNXPass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + { + // TODO show error message if needed + result = -1; + } + + return result; +} + +int shapeInferenceONNX(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int result = 0; + pm.addPass(onnx_mlir::createShapeInferencePass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + { + // TODO show error message if needed + result = -1; + } + + return result; +} + int convertToCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) { mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); @@ -52,6 +102,7 @@ int convertToCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int dump = safecast(std::getenv("CM_PASS_DUMP"), 0); + std::function shouldPrintBeforePass; + std::function shouldPrintAfterPass; + shouldPrintBeforePass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + shouldPrintAfterPass = [&](mlir::Pass *, mlir::Operation *) { return dump ? true : false; }; + pm.enableIRPrinting(shouldPrintBeforePass, shouldPrintAfterPass, false, false, false, + llvm::errs()); + + int result = 0; + int64_t dyna_count = 0; + int64_t prev_count = 0; + int64_t same_count = 0; + pm.addPass(CreateShapeInferencePass(dyna_count)); + pm.addPass(mlir::createCanonicalizerPass()); // to make Op::fold() call + pm.addPass(createRewriteCirclePass()); + pm.addPass(CreateRuntimeVerifyPass()); + // NOTE as couldn't find how to make pm.run() continue till there is nothing to shape infer, + // this loop is to emulate that kind of feature. + // 1/ loop until all has become static shape + // 2/ continue loop while dynamic shape node count has changed + // 3/ exit loop if count has not changed for 20 times, to prevent infinite loops + // TODO find a correct way to continue shape infer. + while (same_count < 20) + { + dyna_count = 0; + auto runres = pm.run(*module); + if (mlir::failed(runres)) + { + result = -1; + break; + } + if (dyna_count == 0) + break; + if (dyna_count == prev_count) + same_count++; + else if (dyna_count < prev_count) + same_count = 0; + + prev_count = dyna_count; + } + + return result; +} + +int shapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int result = 0; + pm.addPass(CreateShapeValidatePass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + +int dynaShapeValidateCircle(mlir::MLIRContext &context, mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + int result = 0; + pm.addPass(CreateDynaShapeValidatePass()); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + +int dumpCircleOps(llvm::raw_fd_ostream &os, mlir::MLIRContext &context, + mlir::OwningOpRef &module) +{ + mlir::PassManager pm(module.get()->getName(), mlir::OpPassManager::Nesting::Implicit); + + DumpCircleOpsPass::GetOStream_t gos = [&](void) -> llvm::raw_fd_ostream & { return os; }; + + int result = 0; + auto pass = std::make_unique(); + pass->ostream(gos); + pm.addPass(std::move(pass)); + auto runres = pm.run(*module); + if (mlir::failed(runres)) + result = -1; + + return result; +} + } // namespace Circle } // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp new file mode 100644 index 00000000000..6dfce7efbd7 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.cpp @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ConvertHelper.h" + +#include "circle-mlir/dialect/NameUtils.h" + +#include // from @llvm-project +#include // from @llvm-project +#include +#include + +#include +#include + +namespace mlir +{ +namespace Circle +{ + +std::string GetOperationName(mlir::Operation *op) +{ + assert(op != nullptr); + + mlir::Location opLoc = op->getLoc(); + auto name = mlir::GetNameFromLoc(opLoc); + if (!name.empty()) + return name; + + // TOO remove this when not used anymore + auto strattr = op->getAttrOfType("onnx_node_name"); + if (strattr) + return strattr.str(); + + // Use operator type as name if there is no name + // TODO revise this to better implementation + static uint64_t sequence = 1; + auto seqstr = std::to_string(sequence); + sequence++; + return op->getName().getStringRef().str() + seqstr; +} + +DenseIntElementsAttr GetI1ElementsAttr(ArrayRef values, Builder *builder) +{ + mlir::RankedTensorType ty = + GetTypeFromTensorShape({static_cast(values.size())}, builder->getI1Type(), {}); + return DenseIntElementsAttr::get(ty, values); +} + +DenseIntElementsAttr GetI32ElementsAttr(ArrayRef values, Builder *builder) +{ + mlir::RankedTensorType ty = + GetTypeFromTensorShape({static_cast(values.size())}, builder->getI32Type(), {}); + return DenseIntElementsAttr::get(ty, values); +} + +namespace +{ + +template bool ExtractConstantIntValues(mlir::Value &input, std::vector &values) +{ + mlir::DenseElementsAttr dataAttr; + + if (auto constOp = dyn_cast_or_null(input.getDefiningOp())) + { + dataAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (dataAttr == nullptr) + { + auto disValueAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (disValueAttr) + dataAttr = disValueAttr.toDenseElementsAttr(); + } + } + else if (auto constOp2 = dyn_cast_or_null(input.getDefiningOp())) + dataAttr = constOp2.getValueAttr().dyn_cast_or_null(); + else + return false; + + if (dataAttr == nullptr) + return false; + + auto valueIt = dataAttr.getValues().begin(); + auto valueEd = dataAttr.getValues().end(); + for (; valueIt != valueEd; ++valueIt) + { + T value = static_cast((*valueIt).getSExtValue()); + values.push_back(value); + } + return true; +} + +template bool ExtractConstantFloatValues(mlir::Value &input, std::vector &values) +{ + mlir::DenseElementsAttr dataAttr; + + if (auto constOp = dyn_cast_or_null(input.getDefiningOp())) + { + dataAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (dataAttr == nullptr) + { + auto disValueAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (disValueAttr) + dataAttr = disValueAttr.toDenseElementsAttr(); + } + } + else if (auto constOp2 = dyn_cast_or_null(input.getDefiningOp())) + dataAttr = constOp2.getValueAttr().dyn_cast_or_null(); + else + return false; + + if (dataAttr == nullptr) + return false; + + auto valueIt = dataAttr.getValues().begin(); + auto valueEd = dataAttr.getValues().end(); + for (; valueIt != valueEd; ++valueIt) + { + T value = static_cast((*valueIt).convertToFloat()); + values.push_back(value); + } + return true; +} + +} // namespace + +bool ExtractConstantValues(mlir::Value &input, std::vector &values) +{ + return ExtractConstantIntValues(input, values); +} + +bool ExtractConstantValues(mlir::Value &input, std::vector &values) +{ + return ExtractConstantIntValues(input, values); +} + +bool ExtractConstantValues(mlir::Value &input, std::vector &values) +{ + return ExtractConstantFloatValues(input, values); +} + +namespace +{ + +template void ExtractArrayAttrIntValues(mlir::ArrayAttr &array, std::vector &values) +{ + for (int i = 0; i < array.size(); ++i) + { + auto v = GetIntValue(array, i); + values.push_back(v); + } +} + +} // namespace + +void ExtractArrayAttrValues(mlir::ArrayAttr &array, std::vector &values) +{ + ExtractArrayAttrIntValues(array, values); +} + +mlir::Value CreateNoValue(mlir::ConversionPatternRewriter &rewriter) +{ + return rewriter.create(rewriter.getUnknownLoc(), rewriter.getNoneType(), + rewriter.getUnitAttr()); +} + +mlir::RankedTensorType GetChnLastType(mlir::RankedTensorType tensor_type) +{ + auto tensor_shape = tensor_type.getShape(); + // NCHW to NHWC + auto to_nhwc = {tensor_shape[0], tensor_shape[2], tensor_shape[3], tensor_shape[1]}; + return mlir::RankedTensorType::get(to_nhwc, tensor_type.getElementType()); +} + +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, float value, + const std::string &name) +{ + mlir::Type f32 = rewriter.getF32Type(); + mlir::RankedTensorType f32type = mlir::RankedTensorType::get({}, f32); + llvm::SmallVector values; + values.push_back(value); + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return rewriter.create(constLoc, mlir::DenseFPElementsAttr::get(f32type, values)); +} + +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &reference, float value) +{ + auto rtype = reference.getType().dyn_cast_or_null(); + if (not rtype) + return {}; + if (not rtype.getElementType().isF32()) + return {}; + auto shape = rtype.getShape(); + if (shape.size() == 0) + return {}; + + // TODO revise to better value filling + int64_t numElements = 1; + for (size_t dim = 0; dim < shape.size(); ++dim) + numElements = numElements * shape[dim]; + + llvm::SmallVector values; + for (int64_t c = 0; c < numElements; ++c) + values.push_back(value); + + return rewriter.create(opLoc, mlir::DenseFPElementsAttr::get(rtype, values)); +} + +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Value &reference, + float value, const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateConst(rewriter, constLoc, reference, value); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + int64_t value) +{ + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType scalar_type = RankedTensorType::get({}, i32); + auto avalue = static_cast(value); + auto attr = mlir::DenseElementsAttr::get(scalar_type, {avalue}); + return rewriter.create(opLoc, attr); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, int64_t value, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + auto const_op = CreateI32Const(rewriter, constLoc, value); + return const_op; +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + llvm::ArrayRef source) +{ + auto ssize = static_cast(source.size()); + std::vector values; + for (int32_t i = 0; i < ssize; ++i) + values.push_back(source[i]); + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType ptype = RankedTensorType::get({ssize}, i32); + return rewriter.create(opLoc, DenseIntElementsAttr::get(ptype, values)); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, + llvm::ArrayRef source, const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateI32Const(rewriter, constLoc, source); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + std::vector &source) +{ + auto num = static_cast(source.size()); + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType ptype = RankedTensorType::get({num}, i32); + return rewriter.create(opLoc, DenseIntElementsAttr::get(ptype, source)); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, std::vector &source, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateI32Const(rewriter, constLoc, source); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &source) +{ + std::vector values; + if (!ExtractConstantValues(source, values)) + return {}; + + mlir::RankedTensorType stype = source.getType().dyn_cast_or_null(); + mlir::Type i32 = rewriter.getI32Type(); + mlir::RankedTensorType si16stype = RankedTensorType::get(stype.getShape(), i32); + return rewriter.create(opLoc, DenseIntElementsAttr::get(si16stype, values)); +} + +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Value &source, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateI32Const(rewriter, constLoc, source); +} + +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Location &opLoc, mlir::Value &reference, + mlir::Value &source) +{ + // TODO support other dtypes + // TODO support more source shape like 1xN, Nx1, ... + // TODO revise to better form when known + + // check reference is rank4, F32 + auto rtype = reference.getType().dyn_cast_or_null(); + auto rshape = rtype.getShape(); + if (not(rtype.getElementType().isF32() && rshape.size() == 4)) + return source; + + // check source is rank1, F32, same number of elements + auto stype = source.getType().dyn_cast_or_null(); + auto sshape = stype.getShape(); + if (sshape.size() == rshape.size()) + return source; + if (not(stype.getElementType().isF32() && sshape.size() == 1 && rshape[1] == sshape[0])) + return source; + + int32_t C = rshape[1]; + + mlir::DenseElementsAttr dataAttr; + if (auto constOp = dyn_cast(source.getDefiningOp())) + { + dataAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (dataAttr == nullptr) + { + auto disValueAttr = constOp.getValueAttr().dyn_cast_or_null(); + if (disValueAttr) + dataAttr = disValueAttr.toDenseElementsAttr(); + } + } + else if (auto constOp2 = dyn_cast(source.getDefiningOp())) + dataAttr = constOp2.getValueAttr().dyn_cast_or_null(); + else + return source; + if (dataAttr == nullptr) + return source; + + auto valueIt = dataAttr.getValues().begin(); + auto valueEd = dataAttr.getValues().end(); + llvm::SmallVector values; + for (; valueIt != valueEd; ++valueIt) + { + float val = (*valueIt).convertToFloat(); + values.push_back(val); + } + + mlir::Type f32 = rewriter.getF32Type(); + mlir::RankedTensorType ttype = mlir::RankedTensorType::get({1, C, 1, 1}, f32); + return rewriter.create(opLoc, mlir::DenseFPElementsAttr::get(ttype, values)); +} + +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Value &reference, mlir::Value &source, + const std::string &name) +{ + mlir::Location constLoc = mlir::NameLoc::get(rewriter.getStringAttr(name)); + return CreateConstBroadcastChn(rewriter, constLoc, reference, source); +} + +bool GetPads(std::optional<::mlir::ArrayAttr> pads, std::vector &values) +{ + bool process = false; + if (pads.has_value()) + { + auto value = pads.value(); + // NOTE assert for not rank 4: this is for debug build to break the execution + assert(value.size() == 4); + // NOTE skip processing pads if not rank 4 + if (value.size() != 4) + return process; + for (int i = 0; i < value.size(); ++i) + { + auto v = GetIntValue(value, i); + values.push_back(v); + if (v) + process = true; + } + } + return process; +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h new file mode 100644 index 00000000000..6b512ae5883 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertHelper.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_CONVERT_HELPER_H__ +#define __CIRCLE_MLIR_PASS_CONVERT_HELPER_H__ + +#include + +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +inline const char ACT_NONE[]{"NONE"}; +inline const char ACT_RELU[]{"RELU"}; +inline const char ACT_RELU6[]{"RELU6"}; +inline const char ACT_TANH[]{"TANH"}; + +// Get name of the Op +std::string GetOperationName(mlir::Operation *op); + +// Returns 1D 1-bit dense elements attribute with the given values. +DenseIntElementsAttr GetI1ElementsAttr(ArrayRef values, Builder *builder); + +// Returns 1D 32-bit dense elements attribute with the given values. +DenseIntElementsAttr GetI32ElementsAttr(ArrayRef values, Builder *builder); + +bool ExtractConstantValues(mlir::Value &input, std::vector &values); +bool ExtractConstantValues(mlir::Value &input, std::vector &values); +bool ExtractConstantValues(mlir::Value &input, std::vector &values); + +void ExtractArrayAttrValues(mlir::ArrayAttr &array, std::vector &values); + +// Create NoValueOp, used for No Bias +mlir::Value CreateNoValue(mlir::ConversionPatternRewriter &rewriter); + +// Get output type of op with channel last order +mlir::RankedTensorType GetChnLastType(mlir::RankedTensorType tensor_type); + +// Create ConstOp with scalar type and float value +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, float value, + const std::string &name); +// Create ConstOp with type 'reference' and 'value' values +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &reference, float value); +mlir::Value CreateConst(mlir::ConversionPatternRewriter &rewriter, mlir::Value &reference, + float value, const std::string &name); + +// Create scalar ConstOp with value +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + int64_t value); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, int64_t value, + const std::string &name); + +// Create int32_t 1D ConstOp with ArrayRef +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + llvm::ArrayRef values); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, + llvm::ArrayRef source, const std::string &name); + +// Create int32_t 1D ConstOp with std:vector +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + std::vector &source); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, std::vector &source, + const std::string &name); + +// Create int32_t ConstOp from int32_t/int64_t +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Location &opLoc, + mlir::Value &source); +mlir::Value CreateI32Const(mlir::ConversionPatternRewriter &rewriter, mlir::Value &source, + const std::string &name); + +// Create ConstOp with broadcast applied to rank of 'reference' with values from 'source' +// - 'reference' is rank-4, NCHW format +// - 'source' is vector with C elemnts +// - return rank-4 with shape 1xCx1x1 +// - only F32 is supported for now +// - return source if any condition does not match +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Location &opLoc, mlir::Value &reference, + mlir::Value &source); +mlir::Value CreateConstBroadcastChn(mlir::ConversionPatternRewriter &rewriter, + mlir::Value &reference, mlir::Value &source, + const std::string &name); + +// Get integer value of array[index] +template TYPE GetIntValue(mlir::ArrayAttr array, int index) +{ + return static_cast(array.getValue()[index].cast().getInt()); +} + +// Get value from pads if available as return true +// if false, we do not need to process pads value +bool GetPads(std::optional<::mlir::ArrayAttr> pads, std::vector &values); + +#define CHECK_VALID_RANK_2(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (VALUE.getRank() != 2) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_4(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (VALUE.getRank() != 4) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_2_4(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (!(VALUE.getRank() == 2 || VALUE.getRank() == 4)) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_3_4(VALUE) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (!(VALUE.getRank() == 3 || VALUE.getRank() == 4)) \ + return mlir::failure(); \ + } while (0) + +#define CHECK_VALID_RANK_ATLEAST(VALUE, NUM) \ + do \ + { \ + if (not VALUE) \ + return mlir::failure(); \ + if (VALUE.getRank() < NUM) \ + return mlir::failure(); \ + } while (0) + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_CONVERT_HELPER_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp new file mode 100644 index 00000000000..d77c0068bef --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019-2022 The IBM Research Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ConvertONNXToCirclePass.h" + +#define DEBUG_TYPE "o2c" +#include + +// NOTE lets use names from ONNX Op for the conversion class and the file name. +// ONNX: ONNXAbcdOp +// class: ConvAbcd +// file: AbcdOp.h + +#include + +#include +#include +#include +#include + +// from onnx-mlir source +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +namespace +{ + +// Convert for binary input with Activation; such as Add, Sub, Mul, Div, ... +template +class ConvBinaryT : public mlir::OpConversionPattern +{ +public: + using mlir::OpConversionPattern::OpConversionPattern; + using OpAdaptor = typename ONNXOpT::Adaptor; + + mlir::LogicalResult matchAndRewrite(ONNXOpT op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override + { + mlir::Value lhs = adaptor.getA(); + mlir::Value rhs = adaptor.getB(); + + rewriter.replaceOpWithNewOp(op, op.getType(), lhs, rhs, "NONE"); + + return mlir::success(); + } +}; + +} // namespace + +namespace +{ + +inline bool isCircleFloat(mlir::Type type) +{ + return type.isa(); +} + +inline bool isCircleInt(mlir::Type type) +{ + mlir::IntegerType intType = type.dyn_cast(); + if (intType) + { + std::set intWidth{1, 8, 16, 32, 64}; + auto w = intType.getWidth(); + if (intWidth.find(w) != intWidth.end()) + { + return intType.isSignless() || (w == 16 && intType.isSigned()) || + (w == 8 && intType.isUnsigned()); + } + } + return false; +} + +} // namespace + +struct ConvertONNXToCirclePass + : public mlir::PassWrapper> +{ + ConvertONNXToCirclePass() = default; + ConvertONNXToCirclePass(const ConvertONNXToCirclePass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "onnx-to-circle"; } + + llvm::StringRef getDescription() const override { return "ONNX to Circle"; } + + Option target{*this, "target", ::llvm::cl::desc("ONNX dialect to Circle dialect"), + ::llvm::cl::init("")}; + + void runOnOperation() final; +}; + +void ConvertONNXToCirclePass::runOnOperation() +{ + mlir::func::FuncOp function = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::ConversionTarget target(getContext()); + + TypeConverter typeConverter; + typeConverter.addConversion([](Type type) -> std::optional { + // TODO support mode dtypes + // NOTE Conv2D without bias is NoneType + if (isCircleFloat(type) || isCircleInt(type) || type.isa()) + return type; + LLVM_DEBUG({ llvm::dbgs() << "TypeConverter Type None\n"; }); + return std::nullopt; + }); + typeConverter.addConversion([&](TensorType type) -> std::optional { + if (typeConverter.isLegal(type.getElementType())) + return type; + LLVM_DEBUG({ llvm::dbgs() << "TypeConverter TensorType None\n"; }); + return std::nullopt; + }); + + target.addLegalDialect(); + target.addLegalDialect(); + + mlir::RewritePatternSet patterns(context); + // NOTE use name from ONNX Op, suffix T for templates + patterns.insert>(typeConverter, context); + + auto res = mlir::applyFullConversion(function, target, std::move(patterns)); + if (mlir::failed(res)) + { + return signalPassFailure(); + } +} + +std::unique_ptr createConvertONNXToCirclePass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h new file mode 100644 index 00000000000..c7a22ae7251 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ConvertONNXToCirclePass.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_CONVERT_ONNX2CIRCLE_PASS_H__ +#define __CIRCLE_MLIR_PASS_CONVERT_ONNX2CIRCLE_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr createConvertONNXToCirclePass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_CONVERT_ONNX2CIRCLE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp new file mode 100644 index 00000000000..5b2206d8118 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "DumpCircleOpsPass.h" + +#include + +#include +#include + +namespace mlir +{ +namespace Circle +{ + +void DumpCircleOpsPass::runOnOperation() +{ + mlir::func::FuncOp func = getOperation(); + + for (auto ®ion : func->getRegions()) + dumpRegion(region); +} + +void DumpCircleOpsPass::dumpRegion(mlir::Region ®ion) +{ + region.walk([&](mlir::Operation *op) { ostream() << op->getName() << "\n"; }); + + region.walk([&](mlir::Operation *op) { + for (auto ®ion : op->getRegions()) + dumpRegion(region); + }); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h new file mode 100644 index 00000000000..a93451bc417 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/DumpCircleOpsPass.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_DUMP_CIRCLE_OPS_PASS_H__ +#define __CIRCLE_MLIR_PASS_DUMP_CIRCLE_OPS_PASS_H__ + +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +struct DumpCircleOpsPass + : public mlir::PassWrapper> +{ + DumpCircleOpsPass() = default; + DumpCircleOpsPass(const DumpCircleOpsPass &pass) + : mlir::PassWrapper>() + { + _getOStream = pass._getOStream; + } + + llvm::StringRef getArgument() const override { return "circle-dump-ops"; } + + llvm::StringRef getDescription() const override { return "Dump Circle ops"; } + + Option target{*this, "target", ::llvm::cl::desc("Dump Circle operators"), + ::llvm::cl::init("")}; + + void runOnOperation() final; + +protected: + void dumpRegion(mlir::Region ®ion); + +public: + using GetOStream_t = std::function; + + void ostream(GetOStream_t os) { _getOStream = os; } + llvm::raw_fd_ostream &ostream(void) { return _getOStream(); } + +protected: + GetOStream_t _getOStream = nullptr; +}; + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_DUMP_CIRCLE_OPS_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp new file mode 100644 index 00000000000..2281f4a3e1e --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define DEBUG_TYPE "o2c" +#include + +#include "RewriteCirclePass.h" +#include "ConvertHelper.h" + +#include + +#include +#include +#include +#include +#include + +namespace mlir +{ +namespace Circle +{ + +struct RewriteCirclePass + : public mlir::PassWrapper> +{ + RewriteCirclePass() = default; + RewriteCirclePass(const RewriteCirclePass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "circle-rewrite"; } + + llvm::StringRef getDescription() const override { return "Rewrite Circle ops"; } + + Option target{*this, "target", + ::llvm::cl::desc("Rewrite Circle dialect to Circle dialect"), + ::llvm::cl::init("")}; + + void runOnOperation() final; + +private: + // Apply canonicalization, mainly constant folding, on the function. + void applyCanonicalization(); + // Apply activation fusion + void applyActivationFusion(); +}; + +void RewriteCirclePass::runOnOperation() +{ + // canonicalization + applyCanonicalization(); + // activation fusion + applyActivationFusion(); +} + +void RewriteCirclePass::applyCanonicalization() +{ + mlir::func::FuncOp func = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + + func->walk([&](Operation *op) { + op->getRegisteredInfo()->getCanonicalizationPatterns(patterns, context); + }); +} + +void RewriteCirclePass::applyActivationFusion() +{ + mlir::func::FuncOp func = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + + // TODO enable Tanh after circle-interpreter works + // patterns.add>(context); + + // TODO enable this after https://github.com/Samsung/ONE/pull/10921 lands + // and NPU_Compiler is ready + // patterns.add>(context); + + // TODO add more patterns + + (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); +} + +std::unique_ptr createRewriteCirclePass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h new file mode 100644 index 00000000000..d238353fe6a --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteCirclePass.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_REWRITE_CIRCLE_PASS_H__ +#define __CIRCLE_MLIR_PASS_REWRITE_CIRCLE_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr createRewriteCirclePass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_REWRITE_CIRCLE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp new file mode 100644 index 00000000000..7303bb7e2bb --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define DEBUG_TYPE "o2c" +#include + +#include "RewriteONNXPass.h" + +#include +#include +#include +#include +#include + +// from onnx-mlir source +#include + +#include "onnx/CompactReshapeConvReshape.h" + +namespace mlir +{ +namespace Circle +{ + +struct RewriteONNXPass + : public mlir::PassWrapper> +{ + RewriteONNXPass() = default; + RewriteONNXPass(const RewriteONNXPass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "circle-onnx-rewrite"; } + + llvm::StringRef getDescription() const override { return "Rewrite ONNX ops"; } + + Option target{*this, "target", + ::llvm::cl::desc("Rewrite ONNX dialect to ONNX dialect"), + ::llvm::cl::init("")}; + + void runOnOperation() final; +}; + +void RewriteONNXPass::runOnOperation() +{ + mlir::func::FuncOp func = getOperation(); + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + + patterns.add(context); // remove unnecessary reshapes + // TODO add more patterns + + (void)applyPatternsAndFoldGreedily(func, std::move(patterns)); +} + +std::unique_ptr createRewriteONNXPass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h new file mode 100644 index 00000000000..5c905c97be8 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RewriteONNXPass.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_REWRITE_ONNX_PASS_H__ +#define __CIRCLE_MLIR_PASS_REWRITE_ONNX_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr createRewriteONNXPass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_REWRITE_ONNX_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp new file mode 100644 index 00000000000..2f61ff0c555 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.cpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "RuntimeVerifyPass.h" + +#include +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +struct RuntimeVerifyPass + : public mlir::PassWrapper> +{ + RuntimeVerifyPass() = default; + RuntimeVerifyPass(const RuntimeVerifyPass &pass) + : mlir::PassWrapper>() + { + // Do nothing + } + + llvm::StringRef getArgument() const override { return "circle-runtime-verify"; } + llvm::StringRef getDescription() const override { return "Circle Runtime Verify"; } + + void runOnOperation(void) final; +}; + +void RuntimeVerifyPass::runOnOperation(void) +{ + getOperation().walk([&](CirRuntimeVerifyOpInterface op) { + if (mlir::failed(op.VerifyCirRuntimeConstraints(op.getOperation(), true))) + signalPassFailure(); + }); +} + +// Verifies circle runtime constraints. +std::unique_ptr CreateRuntimeVerifyPass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h new file mode 100644 index 00000000000..7f67d9b3b24 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/RuntimeVerifyPass.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_RUNTIME_VERIFY_PASS_H__ +#define __CIRCLE_MLIR_PASS_RUNTIME_VERIFY_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr CreateRuntimeVerifyPass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_RUNTIME_VERIFY_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp new file mode 100644 index 00000000000..4eaf015705c --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.cpp @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define DEBUG_TYPE "o2c" +#include + +#include "ShapeInferencePass.h" + +#include +#include +#include + +#include + +namespace mlir +{ +namespace Circle +{ + +/// The ShapeInferencePass is a pass that performs intra-procedural +/// shape inference. +/// +/// Algorithm: +/// +/// 1) Build a worklist containing all the operations that return a +/// dynamically shaped tensor: these are the operations that need shape +/// inference. +/// 2) Iterate on the worklist: +/// a) find an operation to process: the next ready operation in the +/// worklist has all of its arguments non-generic, +/// b) if no operation is found, break out of the loop, +/// c) remove the operation from the worklist, +/// d) infer the shape of its output from the argument types. +/// 3) If the worklist is empty, the algorithm succeeded. +/// +struct ShapeInferencePass + : public mlir::PassWrapper> +{ + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ShapeInferencePass) + void runOnOperation() override + { + auto f = getOperation(); + + // Populate the worklist with the operations that need shape inference: + // these are operations that return a dynamic shape. + llvm::SmallPtrSet opWorklist; + int64_t op_count = 0; + f.walk([&](mlir::Operation *op) { + if (returnsDynamicShape(op)) + { + opWorklist.insert(op); + op_count++; + } + }); + + // TODO remove this when this pass runs again if there is any change in the graph + if (_dynacount) + *_dynacount = op_count; + + // Iterate on the operations in the worklist until all operations have been + // inferred or no change happened (fix point). + while (!opWorklist.empty()) + { + // Find the next operation ready for inference, that is an operation + // with all operands already resolved (non-generic). + auto nextop = llvm::find_if(opWorklist, allOperandsInferred); + if (nextop == opWorklist.end()) + break; + + Operation *op = *nextop; + opWorklist.erase(op); + + // Ask the operation to infer its output shapes. + LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); + if (auto shapeOp = dyn_cast(op)) + { + shapeOp.inferShapes(); + if (returnsDynamicShape(op)) + { + LLVM_DEBUG({ + mlir::Location opLoc = op->getLoc(); + llvm::dbgs() << "-- " << opLoc << " still has dynamic shape\n"; + }); + } + } + else + { + LLVM_DEBUG({ + mlir::Location opLoc = op->getLoc(); + llvm::dbgs() << "-- " << opLoc << " has dynamic shape but no CirShapeInference\n"; + }); + } + } + + // If the operation worklist isn't empty, this indicates a failure. + if (!opWorklist.empty()) + { + f.emitWarning("Shape inference still has dynamic shapes, ") + << opWorklist.size() << " operations couldn't be inferred\n"; + while (!opWorklist.empty()) + { + Operation *op = *opWorklist.begin(); + LLVM_DEBUG(llvm::dbgs() << "Shape inference left: " << *op << "\n"); + opWorklist.erase(op); + } + } + + // set function shape to that from last op. + // this is to update when function shape is unknown at beginning and then + // fixed to known with shape inference. + Operation *returnOp = f.getBody().back().getTerminator(); + assert(returnOp && "function must return"); + FunctionType fty = f.getFunctionType(); + assert(f.getNumResults() == returnOp->getNumOperands() && + "returned results count much match function type"); + f.setType(fty.clone(fty.getInputs(), returnOp->getOperandTypes())); + } + + /// A utility method that returns if the given operation has all of its + /// operands inferred. + static bool allOperandsInferred(Operation *op) + { + return llvm::all_of(op->getOperands(), [](mlir::Value operand) { + // ignore for NoValueOp + auto no_value = dyn_cast_or_null(operand.getDefiningOp()); + if (no_value) + return true; + auto resType = operand.getType(); + return llvm::isa(resType); + }); + } + + /// A utility method that returns if the given operation has a dynamically + /// shaped result. + static bool returnsDynamicShape(Operation *op) + { + // return false to ignore for NoValueOp, as it doesn't have ShapedType + auto no_value = dyn_cast_or_null(*op); + if (no_value) + return false; + + return llvm::any_of(op->getResultTypes(), [](Type resultType) { + // Checks whether each dimension is all dynamic if it is ShapedType. + if (ShapedType shapedType = resultType.dyn_cast()) + { + if (not shapedType.hasRank()) + return true; + int rank = shapedType.getRank(); + for (int i = 0; i < rank; ++i) + if (shapedType.isDynamicDim(i)) + return true; + return false; + } + // Non-shaped types are considered dynamic + return true; + }); + } + + int64_t *_dynacount = nullptr; +}; + +struct ShapeValidatePass + : public mlir::PassWrapper> +{ + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ShapeValidatePass) + void runOnOperation() override + { + auto f = getOperation(); + + llvm::SmallPtrSet opWorklist; + f.walk([&](mlir::Operation *op) { + if (returnsDynamicShape(op)) + opWorklist.insert(op); + }); + + if (!opWorklist.empty()) + { + f.emitError("Shape validation found node with unknown shape.\n"); + // TODO dump ops when necessary + signalPassFailure(); + } + } + + static bool returnsDynamicShape(Operation *op) + { + return llvm::any_of(op->getResultTypes(), [](Type resultType) { + if (ShapedType shapedType = resultType.dyn_cast()) + { + int rank = shapedType.getRank(); + for (int i = 0; i < rank; ++i) + if (shapedType.isDynamicDim(i)) + return true; + } + return false; + }); + } +}; + +struct AnyShapeValidatePass + : public mlir::PassWrapper> +{ + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(AnyShapeValidatePass) + void runOnOperation() override + { + auto f = getOperation(); + + llvm::SmallPtrSet opWorklist; + f.walk([&](mlir::Operation *op) { + if (returnsFullDynamicShape(op)) + opWorklist.insert(op); + }); + + if (!opWorklist.empty()) + { + f.emitError("Shape validation found node with full dynamic shape.\n"); + // TODO dump ops when necessary + signalPassFailure(); + } + } + + static bool returnsFullDynamicShape(Operation *op) + { + return llvm::any_of(op->getResultTypes(), [](Type resultType) { + if (ShapedType shapedType = resultType.dyn_cast()) + { + int rank = shapedType.getRank(); + for (int i = 0; i < rank; ++i) + if (not shapedType.isDynamicDim(i)) + return false; + return true; + } + return false; + }); + } +}; + +// Create a Shape Inference pass. +std::unique_ptr CreateShapeInferencePass(int64_t &dynaCount) +{ + auto inst = std::make_unique(); + inst->_dynacount = &dynaCount; + return inst; +} + +std::unique_ptr CreateShapeValidatePass(void) +{ + return std::make_unique(); +} + +// test helper to check input model having output with all dynamic dim +// if output has any static dim, it is success +std::unique_ptr CreateDynaShapeValidatePass(void) +{ + return std::make_unique(); +} + +} // namespace Circle +} // namespace mlir diff --git a/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h new file mode 100644 index 00000000000..e833e1d0430 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/ShapeInferencePass.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2020 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_SHAPE_INFERENCE_PASS_H__ +#define __CIRCLE_MLIR_PASS_SHAPE_INFERENCE_PASS_H__ + +#include + +namespace mlir +{ +namespace Circle +{ + +std::unique_ptr CreateShapeInferencePass(int64_t &dynaCount); +std::unique_ptr CreateShapeValidatePass(void); +std::unique_ptr CreateDynaShapeValidatePass(void); + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_SHAPE_INFERENCE_PASS_H__ diff --git a/circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h b/circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h new file mode 100644 index 00000000000..90914a2ac26 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/pass/src/onnx/CompactReshapeConvReshape.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CIRCLE_MLIR_PASS_ONNX_COMPACT_RESHAPE_CONV_RESHAPE_H__ +#define __CIRCLE_MLIR_PASS_ONNX_COMPACT_RESHAPE_CONV_RESHAPE_H__ + +#include "ConvertHelper.h" + +namespace mlir +{ +namespace Circle +{ + +namespace +{ + +// check if values are rank 3 1N1 +int64_t check_R3_1N1(std::vector &values) +{ + if (values.size() == 3) + { + if (values[0] == 1 && values[2] == 1) + return values[1]; + } + return 0; +} + +// check if values are rank 4 1N11 +int64_t check_R4_1N11(std::vector &values) +{ + if (values.size() == 4) + { + if (values[0] == 1 && values[2] == 1 && values[3] == 1) + return values[1]; + } + return 0; +} + +mlir::ArrayAttr duplicate(mlir::PatternRewriter &rewriter, mlir::ArrayAttr input) +{ + mlir::SmallVector temp_v; + size_t size = input.size(); + for (size_t i = 0; i < size; i++) + { + auto val = input[i].dyn_cast().getInt(); + temp_v.push_back(val); + temp_v.push_back(val); + } + return rewriter.getI64ArrayAttr(temp_v); +} + +} // namespace + +// Find sequence with I/O shape of 1N11 -> 1N1 -> 1M1 -> 1M11 +// (1N11)- ONNXReshape -(1N1)- ONNXConv -(1M1)- ONNXReshape -(1M11) +// Relace with with I/O shape of 1N11 -> 1M11 +// (1N11)- ONNXConv -(1M11) +// NOTE +// ShuffleFaceNet end part has this sequence. +// onnx-tf does like this. +// don't know why ShuffleFaceNet creates with torch.nn.Conv1d(); +struct CompactReshapeConvReshape : public OpRewritePattern +{ + using OpRewritePattern::OpRewritePattern; + + mlir::LogicalResult matchAndRewrite(mlir::ONNXReshapeOp reshape2_op, + mlir::PatternRewriter &rewriter) const override + { + // check Conv-Reshape sequence with shape + mlir::Operation *is_conv = reshape2_op.getOperand(0).getDefiningOp(); + mlir::Operation *is_const_r2 = reshape2_op.getOperand(1).getDefiningOp(); + bool bis_conv = mlir::isa_and_nonnull(is_conv); + bool bis_const = mlir::isa_and_nonnull(is_const_r2); + if (!bis_conv || !bis_const) + return mlir::failure(); + + // check if 'shape' value is 1N11 + mlir::Value const_r2_op = cast(is_const_r2); + std::vector shape_values_r2; + if (!ExtractConstantValues(const_r2_op, shape_values_r2)) + return mlir::failure(); + if (check_R4_1N11(shape_values_r2) == 0) + return mlir::failure(); + + auto conv_op = cast(is_conv); + + // check Reshape-Conv sequence + mlir::Operation *is_reshape = conv_op.getOperand(0).getDefiningOp(); + bool bis_reshape = mlir::isa_and_nonnull(is_reshape); + if (!bis_reshape) + return mlir::failure(); + + // check Reshape shape is constant and 1N1 + auto reshape1_op = cast(is_reshape); + mlir::Operation *is_const_r1 = reshape1_op.getOperand(1).getDefiningOp(); + bis_const = mlir::isa_and_nonnull(is_const_r1); + if (!bis_const) + return mlir::failure(); + mlir::Value const_r1_op = cast(is_const_r1); + std::vector shape_values_r1; + if (!ExtractConstantValues(const_r1_op, shape_values_r1)) + return mlir::failure(); + if (check_R3_1N1(shape_values_r1) == 0) + return mlir::failure(); + + // Get Conv-weight, check shape is OI1, and create new weight with OI11 + mlir::Operation *is_conv_w = conv_op.getOperand(1).getDefiningOp(); + bis_const = mlir::isa_and_nonnull(is_conv_w); + if (!bis_const) + return mlir::failure(); + mlir::Value const_w_op = cast(is_conv_w); + auto w_type = const_w_op.getType().dyn_cast_or_null(); + if (!w_type.getElementType().isF32()) + return mlir::failure(); + auto w_shape = w_type.getShape(); + if (w_shape.size() != 3) + return mlir::failure(); + + // Now, op sequence and shape values match + + // Create weight with 4D OI11 + std::vector weight_values; + if (!ExtractConstantValues(const_w_op, weight_values)) + return mlir::failure(); + + int64_t w_s_O = w_shape[0]; + int64_t w_s_I = w_shape[1]; + int64_t w_s_2 = w_shape[2]; + auto w_rttype = + mlir::RankedTensorType::get({w_s_O, w_s_I, w_s_2, w_s_2}, rewriter.getF32Type()); + mlir::Location opLoc = const_w_op.getLoc(); + mlir::Attribute empty_sparse; + mlir::Attribute attr_value = + mlir::DenseElementsAttr::get(w_rttype, llvm::ArrayRef(weight_values)); + mlir::Value new_kernel = rewriter.create(opLoc, empty_sparse, attr_value); + + // Get input of first Reshape to be used for input of New Conv + mlir::Value bias = conv_op.getOperand(2); + mlir::Value input_r1_op = reshape1_op.getOperand(0); + + // some attributes needs suplicate in size to match 1D -> 2D + mlir::StringAttr auto_pad = conv_op.getAutoPadAttr(); + mlir::ArrayAttr dilations = duplicate(rewriter, conv_op.getDilationsAttr()); + mlir::IntegerAttr group = conv_op.getGroupAttr(); + mlir::ArrayAttr kernel_shape = duplicate(rewriter, conv_op.getKernelShapeAttr()); + mlir::ArrayAttr pads = duplicate(rewriter, conv_op.getPadsAttr()); + mlir::ArrayAttr strides = duplicate(rewriter, conv_op.getStridesAttr()); + + rewriter.replaceOpWithNewOp(reshape2_op, reshape2_op.getType(), input_r1_op, + new_kernel, bias, auto_pad, dilations, group, + kernel_shape, pads, strides); + + return mlir::success(); + } +}; + +} // namespace Circle +} // namespace mlir + +#endif // __CIRCLE_MLIR_PASS_ONNX_COMPACT_RESHAPE_CONV_RESHAPE_H__ diff --git a/circle-mlir/circle-mlir/tools/CMakeLists.txt b/circle-mlir/circle-mlir/tools/CMakeLists.txt new file mode 100644 index 00000000000..144cba418d4 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(onnx2circle) diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt b/circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt new file mode 100644 index 00000000000..3e0e5c0de8b --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/CMakeLists.txt @@ -0,0 +1,45 @@ +set(SRC + src/onnx2circle.cpp +) + +string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE_LOWER) +if(BUILD_TYPE_LOWER STREQUAL "debug") + list(APPEND SRC src/driverDebug.cpp) +else() + list(APPEND SRC src/driverRelease.cpp) + set(RELEASE_BUILD ON) +endif() + +add_executable(onnx2circle ${SRC}) +cir_mlir_static_flags(onnx2circle) +cir_onnx_static_flags(onnx2circle) +cir_onnx_tools_flags(onnx2circle) +target_link_libraries(onnx2circle PUBLIC cirmlir_dialect) +target_link_libraries(onnx2circle PUBLIC cirmlir_pass) +target_link_libraries(onnx2circle PUBLIC cirmlir_export) +target_link_libraries(onnx2circle PUBLIC cirmlir_coverage) +if(RELEASE_BUILD) + target_link_libraries(onnx2circle PUBLIC arser) +endif() + +install(TARGETS onnx2circle DESTINATION bin) + +if(NOT ENABLE_TEST) + return() +endif() + +include(TestModels.cmake) + +set(SRC_TEST + src/onnx2circle.cpp + src/onnx2circle.test.cpp +) + +GTest_AddTest_Public(onnx2circle_test ${SRC_TEST}) +cir_mlir_static_flags(onnx2circle_test) +cir_onnx_static_flags(onnx2circle_test) +cir_onnx_tools_flags(onnx2circle_test) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_dialect) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_pass) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_export) +target_link_libraries(onnx2circle_test PUBLIC cirmlir_coverage) diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake b/circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake new file mode 100644 index 00000000000..a359afd379f --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/TestModels.cmake @@ -0,0 +1,113 @@ +set(FILE_DEPS ) + +# ConvertUnitModel used in test.lst +set(TEST_MODELS ) +macro(ConvertUnitModel MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND TEST_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ConvertUnitModel) + +set(TEST_NEG_MODELS ) +macro(ConvertUnitModelNEG MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND TEST_NEG_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ConvertUnitModelNEG) + +set(VALIDATE_SHAPEINF_MODELS) +macro(ValidateShapeInf MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND VALIDATE_SHAPEINF_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ValidateShapeInf) + +# ValidateDynaShapeInf is to test output should have dynamic shape from dynamic shape input +set(VALIDATE_DYNASHAPEINF_MODELS) +macro(ValidateDynaShapeInf MLIR_FNAME) + # copy to build folder + set(TEST_MLIR_MODEL_SRC "${CMAKE_SOURCE_DIR}/models/mlir/${MLIR_FNAME}") + set(TEST_MLIR_MODEL_DST "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MLIR_FNAME}") + add_custom_command( + OUTPUT ${TEST_MLIR_MODEL_DST} + COMMAND ${CMAKE_COMMAND} -E copy "${TEST_MLIR_MODEL_SRC}" "${TEST_MLIR_MODEL_DST}" + DEPENDS ${TEST_MLIR_MODEL_SRC} + COMMENT "tools/onnx2circle: prepare mlir/${MLIR_FNAME}" + ) + list(APPEND VALIDATE_DYNASHAPEINF_MODELS "${MLIR_FNAME}") + list(APPEND FILE_DEPS "${TEST_MLIR_MODEL_DST}") +endmacro(ValidateDynaShapeInf) + +# Read "test.lst" +include("test.lst") + +add_custom_target(onnx2circle_deps ALL DEPENDS ${FILE_DEPS}) + +foreach(MODEL IN ITEMS ${TEST_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_test_${MODEL} + COMMAND "$" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) +endforeach() + +foreach(MODEL IN ITEMS ${TEST_NEG_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_neg_test_${MODEL} + COMMAND "$" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) + set_tests_properties(onnx2circle_neg_test_${MODEL} PROPERTIES WILL_FAIL TRUE) +endforeach() + +foreach(MODEL IN ITEMS ${VALIDATE_SHAPEINF_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_valshapeinf_${MODEL} + COMMAND "$" "--check_shapeinf" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) +endforeach() + +foreach(MODEL IN ITEMS ${VALIDATE_DYNASHAPEINF_MODELS}) + set(MLIR_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}") + set(CIRCLE_MODEL_PATH "${CMAKE_CURRENT_BINARY_DIR}/models/mlir/${MODEL}.circle") + add_test( + NAME onnx2circle_valdynshapeinf_${MODEL} + COMMAND "$" "--check_dynshapeinf" + "${MLIR_MODEL_PATH}" + "${CIRCLE_MODEL_PATH}" + ) +endforeach() diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h b/circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h new file mode 100644 index 00000000000..8df741e4583 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/cmdOptions.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CMD_OPTIONS_H__ +#define __CMD_OPTIONS_H__ + +namespace opts +{ + +inline const char *__opt_save_ops = "Save operators list instead of .circle "; +inline const char *__opt_unroll_rnn_d = "Unroll RNN Op if exist"; +inline const char *__opt_unroll_lstm_d = "Unroll LSTM Op if exist"; +inline const char *__opt_edbuf_d = "Tensorflow experimental_disable_batchmatmul_unfold"; +inline const char *__opt_keep_io_order_d = "Rename I/O to match order (obsolete)"; +inline const char *__opt_save_int_d = "Save intermediate files (obsolete)"; +inline const char *__opt_check_shapeinf = "Validate shape inference"; +inline const char *__opt_check_dynshapeinf = "Validate dynamic shape inference"; + +} // namespace opts + +#endif // __CMD_OPTIONS_H__ diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp new file mode 100644 index 00000000000..35c570cdbf1 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverDebug.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onnx2circle.h" +#include "cmdOptions.h" + +#include +#include +#include + +#define DEBUG_TYPE "o2c" +#include + +#include +#include + +#include +#include +#include + +namespace opts +{ + +llvm::cl::OptionCategory O2CirCat("onnx2circle options"); +llvm::cl::OptionCategory O2CObsol("obsolete options"); + +static llvm::cl::opt InputFilename(llvm::cl::Positional, llvm::cl::desc(""), + llvm::cl::Required, llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OutputFilename(llvm::cl::Positional, llvm::cl::desc(""), + llvm::cl::Required, llvm::cl::cat(O2CirCat)); + +// Note: If you look at the help desctription in this program, +// it is configured to accept as the third Positional argument +// which is set from the `onnx-mlir` submodule. +// This option is not used in actual work. + +static llvm::cl::opt OptSaveOPS("save_ops", llvm::cl::desc(__opt_save_ops), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt RunSingleInstance("o2c-single", llvm::cl::desc("run single instance"), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptUnrollRNN("unroll_rnn", llvm::cl::desc(__opt_unroll_rnn_d), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptUnrollLSTM("unroll_lstm", llvm::cl::desc(__opt_unroll_lstm_d), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptExpDisBMMUnfold("experimental_disable_batchmatmul_unfold", + llvm::cl::desc(__opt_edbuf_d), llvm::cl::init(false), + llvm::cl::cat(O2CirCat)); + +static llvm::cl::opt OptKeepIOOrder("keep_io_order", llvm::cl::desc(__opt_keep_io_order_d), + llvm::cl::init(false), llvm::cl::cat(O2CObsol)); + +static llvm::cl::opt OptSaveIntermediate("save_intermediate", + llvm::cl::desc(__opt_save_int_d), + llvm::cl::init(false), llvm::cl::cat(O2CObsol)); + +// shape inference validation +static llvm::cl::opt OptCheckShapeInf("check_shapeinf", llvm::cl::desc(__opt_check_shapeinf), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); +static llvm::cl::opt OptCheckDynShapeInf("check_dynshapeinf", + llvm::cl::desc(__opt_check_dynshapeinf), + llvm::cl::init(false), llvm::cl::cat(O2CirCat)); + +} // namespace opts + +class SingleRun +{ +public: + static void Ensure(void) + { + SingleRun::_lock_fd = -1; + int rc = -1; + int retry = 100; // retry for 10 seconds + + do + { + rc = -1; + SingleRun::_lock_fd = open(_lock_file, O_CREAT | O_RDWR, 0660); + if (_lock_fd > 0) + { + rc = flock(SingleRun::_lock_fd, LOCK_EX | LOCK_NB); + if (rc == 0) + break; + close(SingleRun::_lock_fd); + SingleRun::_lock_fd = -1; + } + usleep(100 * 1000); // wait for 100 msecs + if (--retry < 0) + { + std::cerr << "Failed to SingleRun::Ensure." << std::endl; + break; + } + } while (rc != 0); + } + + static void Release(void) + { + if (SingleRun::_lock_fd > 0) + { + close(_lock_fd); + _lock_fd = -1; + } + if (_lock_file) + { + unlink(_lock_file); + } + } + +private: + static int _lock_fd; + static const char *const _lock_file; +}; + +int SingleRun::_lock_fd = -1; +const char *const SingleRun::_lock_file = "/tmp/onnx2cirlce_run_single.lock"; + +void onexit() { SingleRun::Release(); } + +int main(int argc, char *argv[]) +{ + std::atexit(onexit); + + llvm::cl::ParseCommandLineOptions(argc, argv, ""); + + LLVM_DEBUG({ + llvm::dbgs() << "onnx2circle debug enter\n"; + llvm::dbgs() << "Source model: " << opts::InputFilename << "\n"; + llvm::dbgs() << "Target model: " << opts::OutputFilename << "\n"; + }); + + if (!llvm::sys::fs::exists(opts::InputFilename)) + { + std::cerr << "Source model: " << opts::InputFilename << " not found." << std::endl; + return -1; + } + + if (opts::RunSingleInstance) + SingleRun::Ensure(); + + O2Cparam param; + param.sourcefile = opts::InputFilename; + param.targetfile = opts::OutputFilename; + param.save_ops = opts::OptSaveOPS; + param.unroll_rnn = opts::OptUnrollRNN; + param.unroll_lstm = opts::OptUnrollLSTM; + param.unfold_batchmatmul = !opts::OptExpDisBMMUnfold; + param.check_shapeinf = opts::OptCheckShapeInf; + param.check_dynshapeinf = opts::OptCheckDynShapeInf; + + auto result = entry(param); + LLVM_DEBUG({ llvm::dbgs() << "Conversion done: " << result << "\n"; }); + return result; +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp new file mode 100644 index 00000000000..24cb834bba0 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/driverRelease.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onnx2circle.h" +#include "cmdOptions.h" + +#include + +#include + +using namespace opts; + +std::string get_copyright(void) +{ + std::string str; + str = "Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved\r\n"; + str += "Licensed under the Apache License, Version 2.0\r\n"; + str += "https://github.sec.samsung.net/one-project/circle-mlir"; + return str; +} + +void print_version(void) +{ + std::cout << "onnx2circle version " << __version << std::endl; + std::cout << get_copyright() << std::endl; +} + +void print_version_only(void) { std::cout << __version; } + +int safe_main(int argc, char *argv[]) +{ + arser::Arser arser; + + arser::Helper::add_version(arser, print_version); + + arser.add_argument("--version_only") + .nargs(0) + .required(false) + .default_value(false) + .help("Show version number only and exit") + .exit_with(print_version_only); + + arser.add_argument("--save_ops") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_save_ops); + + arser.add_argument("--unroll_rnn") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_unroll_rnn_d); + + arser.add_argument("--unroll_lstm") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_unroll_lstm_d); + + arser.add_argument("--experimental_disable_batchmatmul_unfold") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_edbuf_d); + + // ignored obsolete options + arser.add_argument("--keep_io_order") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_keep_io_order_d); + + arser.add_argument("--save_intermediate") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_save_int_d); + + arser.add_argument("--check_shapeinf") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_check_shapeinf); + + arser.add_argument("--check_dynshapeinf") + .nargs(0) + .required(false) + .default_value(false) + .help(__opt_check_dynshapeinf); + + // two positional arguments + arser.add_argument("onnx").help("Input ONNX file"); + arser.add_argument("circle").help("Output Circle file"); + + arser.parse(argc, argv); + + O2Cparam param; + param.sourcefile = arser.get("onnx"); + param.targetfile = arser.get("circle"); + param.save_ops = arser.get("--save_ops"); + param.unroll_rnn = arser.get("--unroll_rnn"); + param.unroll_lstm = arser.get("--unroll_lstm"); + param.unfold_batchmatmul = !arser.get("--experimental_disable_batchmatmul_unfold"); + param.check_shapeinf = arser.get("--check_shapeinf"); + param.check_dynshapeinf = arser.get("--check_dynshapeinf"); + + return entry(param); +} + +int main(int argc, char *argv[]) +{ + try + { + return safe_main(argc, argv); + } + catch (const std::exception &err) + { + std::cout << err.what() << '\n'; + } + return -1; +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp new file mode 100644 index 00000000000..a2cb0ee8382 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019-2022 The IBM Research Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "onnx2circle.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +// ONNX-MLIR +#include +#include +#include + +// CIRCLE-MLIR +#include +#include +#include + +#include +#include + +namespace onnx2circle +{ + +// from ONNX-MLIR src/Compiler/CompilerUtils.cpp +std::string dirName(llvm::StringRef inputFilename) +{ + llvm::SmallVector path(inputFilename.begin(), inputFilename.end()); + llvm::sys::path::remove_filename(path); + return std::string(path.data(), path.size()); +} + +// from ONNX-MLIR src/Compiler/CompilerUtils.cpp +void registerDialects(mlir::MLIRContext &context) +{ + context.getOrLoadDialect(); + + context.getOrLoadDialect(); + context.getOrLoadDialect(); +} + +int loadONNX(const std::string &onnx_path, mlir::MLIRContext &context, + mlir::OwningOpRef &module) +{ + llvm::StringRef inputFilename(onnx_path); + std::string errorMessage; + if (inputFilename.endswith(".mlir")) + { + auto input = mlir::openInputFile(inputFilename, &errorMessage); + if (!input) + { + llvm::errs() << errorMessage << "\n"; + llvm::errs().flush(); + return -1; + } + + // Parse the input mlir. + llvm::SourceMgr sourceMgr; + mlir::SourceMgrDiagnosticHandler sourceMgrHandler(sourceMgr, &context); + sourceMgr.AddNewSourceBuffer(std::move(input), llvm::SMLoc()); + module = mlir::parseSourceFile(sourceMgr, &context); + if (!module) + { + llvm::errs() << "Error can't load file " << inputFilename << "\n"; + llvm::errs().flush(); + return -1; + } + } + else if (inputFilename.endswith(".onnx")) + { + onnx_mlir::ImportOptions options; + options.useOnnxModelTypes = onnx_mlir::useOnnxModelTypes; + options.invokeOnnxVersionConverter = onnx_mlir::invokeOnnxVersionConverter; + options.shapeInformation = onnx_mlir::shapeInformation; + options.allowSorting = onnx_mlir::allowSorting; + options.externalDataDir = dirName(inputFilename); + + int rc = + onnx_mlir::ImportFrontendModelFile(inputFilename, context, module, &errorMessage, options); + if (rc != onnx_mlir::CompilerSuccess) + { + llvm::errs() << "Error can't load file " << inputFilename << "\n"; + llvm::errs() << errorMessage << "\n"; + llvm::errs().flush(); + return -1; + } + } + else + { + llvm::errs() << "Unknown model file extension.\n"; + llvm::errs().flush(); + return -1; + } + + return 0; +} + +int convertToCircle(const O2Cparam ¶m) +{ + const std::string &sourcefile = param.sourcefile; + const std::string &targetfile = param.targetfile; + + mlir::MLIRContext context; + registerDialects(context); + + mlir::OwningOpRef module; + auto result = loadONNX(sourcefile, context, module); + if (result != 0) + return result; + + result = mlir::Circle::preprocessONNX(context, module); + if (result != 0) + return result; + + result = mlir::Circle::shapeInferenceONNX(context, module); + if (result != 0) + return result; + + result = mlir::Circle::convertToCircle(context, module); + if (result != 0) + return result; + + result = mlir::Circle::postProcessCircle(context, module); + if (result != 0) + return result; + + if (param.check_shapeinf) + { + result = mlir::Circle::shapeValidateCircle(context, module); + if (result != 0) + return result; + } + if (param.check_dynshapeinf) + { + // output should have any static shape from dynamic input + result = mlir::Circle::dynaShapeValidateCircle(context, module); + if (result != 0) + return result; + } + + std::string error_msg; + if (param.save_ops) + { + std::string output_filename = targetfile + ".ops"; + auto output = mlir::openOutputFile(output_filename, &error_msg); + if (!error_msg.empty()) + { + llvm::errs() << "Failed: " << error_msg << "\n"; + return -1; + } + result = mlir::Circle::dumpCircleOps(output->os(), context, module); + if (result == 0) + output->keep(); + + return result; + } + + std::string serialized_flatbuffer; + if (!mlir::Circle::MlirToFlatBufferTranslateFunction(module.get(), &serialized_flatbuffer)) + return -1; + auto output = mlir::openOutputFile(targetfile, &error_msg); + // TODO error handle + output->os() << serialized_flatbuffer; + output->keep(); + + return 0; +} + +} // namespace onnx2circle + +// NOTE sync version number with 'infra/debian/*/changelog' for upgrade +const char *__version = "0.2.0"; + +int entry(const O2Cparam ¶m) +{ + int result = onnx2circle::convertToCircle(param); + return result; +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h new file mode 100644 index 00000000000..8fc7f6f82ed --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ONNX2CIRCLE_H__ +#define __ONNX2CIRCLE_H__ + +#include + +struct O2Cparam +{ + std::string sourcefile; + std::string targetfile; + + bool save_ops = false; + bool unroll_rnn = false; + bool unroll_lstm = false; + bool unfold_batchmatmul = false; + bool check_shapeinf = false; + bool check_dynshapeinf = false; + // TODO add more if necessary +}; + +extern const char *__version; + +int entry(const O2Cparam ¶m); + +#endif // __ONNX2CIRCLE_H__ diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp new file mode 100644 index 00000000000..c5ed732207b --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/src/onnx2circle.test.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include + +// delcare methods of onnx2circle.cpp to test +namespace onnx2circle +{ + +int loadONNX(const std::string &onnx_path, mlir::MLIRContext &context, + mlir::OwningOpRef &module); + +} // namespace onnx2circle + +#include + +TEST(LoadONNXTest, NonExistFile_NEG) +{ + mlir::MLIRContext context; + mlir::OwningOpRef module; + + std::string invalid_filename = "/no_such_folder/no_such_file_in_storage.mlir"; + auto result = onnx2circle::loadONNX(invalid_filename, context, module); + ASSERT_NE(0, result); + + invalid_filename = "/no_such_folder/no_such_file_in_storage.onnx"; + result = onnx2circle::loadONNX(invalid_filename, context, module); + ASSERT_NE(0, result); +} + +TEST(LoadONNXTest, NotSupportedExtension_NEG) +{ + std::string invalid_filename = "somefile.blabla"; + + mlir::MLIRContext context; + mlir::OwningOpRef module; + + auto result = onnx2circle::loadONNX(invalid_filename, context, module); + ASSERT_NE(0, result); +} diff --git a/circle-mlir/circle-mlir/tools/onnx2circle/test.lst b/circle-mlir/circle-mlir/tools/onnx2circle/test.lst new file mode 100644 index 00000000000..627d96bd957 --- /dev/null +++ b/circle-mlir/circle-mlir/tools/onnx2circle/test.lst @@ -0,0 +1,4 @@ +## EXAMPLE +# +# ConvertUnitModel(test_mode.mlir) +# diff --git a/circle-mlir/externals/CMakeLists.txt b/circle-mlir/externals/CMakeLists.txt index 5bc5d81bfd7..ddf08ae6edb 100644 --- a/circle-mlir/externals/CMakeLists.txt +++ b/circle-mlir/externals/CMakeLists.txt @@ -15,9 +15,55 @@ if(NOT CMAKE_BUILD_TYPE) endif(NOT CMAKE_BUILD_TYPE) message(STATUS "Build type: ${CMAKE_BUILD_TYPE}") +# NOTE: ld.gold has some problem with pybind11 +option(CIRCLE_MLIR_USE_GOLD "Use ld.gold linker for llvm-project" OFF) +option(CIRCLE_MLIR_USE_CLANG "Use clang and lld for llvm-project and onnx-mlir" OFF) + include(ExternalProject) -set(EXTERNALS_BUILD_INST_DIR ${CMAKE_BINARY_DIR}) +if(DEFINED ENV{CIRCLE_MLIR_LOCALINST}) + set(CIRCLE_MLIR_LOCALINST $ENV{CIRCLE_MLIR_LOCALINST}) +endif() + +if(DEFINED ENV{CIRCLE_MLIR_USE_GOLD}) + set(CIRCLE_MLIR_USE_GOLD ON) +endif() + +if(DEFINED ENV{CIRCLE_MLIR_USE_CLANG}) + set(CIRCLE_MLIR_USE_CLANG ON) +endif() + +if(CIRCLE_MLIR_USE_GOLD AND CIRCLE_MLIR_USE_CLANG) + message(FATAL_ERROR "CIRCLE_MLIR_USE_GOLD and CIRCLE_MLIR_USE_CLANG are exclusive option.") +endif() + +# Use gcc + gold +if(CIRCLE_MLIR_USE_GOLD) + set(CIRCLE_MLIR_GOLD_OPTION -DCMAKE_EXE_LINKER_FLAGS='-Wl,-no-keep-memory,-fuse-ld=gold' -DLLVM_USE_LINKER=gold) + message(STATUS "Use linker ld.gold: ${CIRCLE_MLIR_GOLD_OPTION}") +endif() + +# Use clang + lld +if(CIRCLE_MLIR_USE_CLANG) + find_program(CLANG_COMPILER clang) + if(NOT EXISTS ${CLANG_COMPILER}) + message(FATAL_ERROR "Clang is not available.") + endif() + find_program(LLD_LINKER lld) + if(NOT EXISTS ${LLD_LINKER}) + message(FATAL_ERROR "LLD is not available.") + endif() + set(CIRCLE_MLIR_CLANG_OPTION -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++) + set(CIRCLE_MLIR_CLANG_OPTION ${CIRCLE_MLIR_CLANG_OPTION} -DLLVM_ENABLE_LLD=ON) + message(STATUS "Use Clang and LLD linker: ${CIRCLE_MLIR_CLANG_OPTION}") +endif() + +if(CIRCLE_MLIR_LOCALINST) + message(STATUS "CIRCLE_MLIR_LOCALINST=${CIRCLE_MLIR_LOCALINST}") + set(EXTERNALS_BUILD_INST_DIR ${CIRCLE_MLIR_LOCALINST}) +else() + set(EXTERNALS_BUILD_INST_DIR ${CMAKE_BINARY_DIR}) +endif() set(FB_BUILD_DIR "${EXTERNALS_BUILD_INST_DIR}/flatbuffers-build") set(FB_INSTALL_DIR "${EXTERNALS_BUILD_INST_DIR}/flatbuffers-install") @@ -66,8 +112,14 @@ ExternalProject_Add(externals-llvm-project INSTALL_DIR "${LP_INSTALL_DIR}" CMAKE_ARGS -DLLVM_ENABLE_PROJECTS=mlir -DLLVM_TARGETS_TO_BUILD=host + -DLLVM_ENABLE_TERMINFO=OFF + -DLLVM_ENABLE_ZLIB=OFF + -DLLVM_ENABLE_ZSTD=OFF + -DLLVM_INCLUDE_TESTS=OFF -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX='${LP_INSTALL_DIR}' + ${CIRCLE_MLIR_GOLD_OPTION} + ${CIRCLE_MLIR_CLANG_OPTION} -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_ENABLE_RTTI=ON # NOTE externals-flatbuffers externals-abseil-cpp are added to DEPENDS only @@ -105,6 +157,7 @@ ExternalProject_Add(externals-onnx-mlir CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH='${CMAKE_PREFIX_PATH}' -DCMAKE_INSTALL_PREFIX='${OM_INSTALL_DIR}' + ${CIRCLE_MLIR_CLANG_OPTION} -DMLIR_DIR='${LP_BUILD_DIR}/lib/cmake/mlir' -DPython3_ROOT_DIR='$ENV{Python3_ROOT_DIR}' -DONNX_MLIR_BUILD_TESTS=OFF