diff --git a/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td b/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td new file mode 100644 index 00000000000..f7217a69654 --- /dev/null +++ b/circle-mlir/circle-mlir/lib/dialect/mlir/CircleOps.td @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2025 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2019 The TensorFlow Authors. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// from tensorflow/compiler/mlir/lite/ir/tfl_ops.td + +#ifndef CIRCLE_OPS +#define CIRCLE_OPS + +include "mlir/IR/FunctionInterfaces.td" +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/InferTypeOpInterface.td" +include "mlir/Interfaces/SideEffectInterfaces.td" + +include "mlir/CircleOpInterfaces.td" +include "mlir/CircleShapeInferenceInterfaces.td" +include "mlir/CircleOpEnums.td" + +//===----------------------------------------------------------------------===// +// Derived shape attribute class. +//===----------------------------------------------------------------------===// + +class DerivedCircleTypeAttr : + DerivedAttr<"circle::TensorType", body, convert>; + +// CIR Runtime op trait predicate. +class CIR_RuntimePredOpTrait : + GenInternalOpTrait<"CIRRuntimeOpTrait"> { + Pred cirRuntimePredicate = pred; + string cirRuntimeDescription = desc; +} + +class CIR_OperandsHaveSameShapesOrBroadcastableShape< + list indices, int max_bcast_rank> : + CIR_RuntimePredOpTrait<"operands do not have the same shape or " + "broadcastable shapes within the rank " # max_bcast_rank, + CPred<"Circle::VerifyOperandsHaveSameShapesOrBroadcastableShape(" + "$_op, llvm::ArrayRef({" # !interleave(indices, ", ") # + "}), " # max_bcast_rank # ")">>; + +// Returns true if the n-th operand has unknown rank or at least rank m. +class CIR_OperandHasAtleastRank : + PredOpTrait<"operand " # n # " is " # m # "-D", + Or<[CPred<"$_op.getOperand(" # n # ").getType().isa()">, + CPred<"$_op.getOperand(" # n # + ").getType().cast().getRank() >= " # m>]>>; + +// CIR Runtime type predicate. +class CIR_RuntimeType { + Pred circRuntimeTypePredicate = t.predicate; + string cirRuntimeTypeDescription = t.summary; +} + +class CIR_TensorOf allowedRuntimeTypes, + list allowedOpTypes = [AnyType]> : + TensorOf, CIR_RuntimeType> { + // Set the summary equal to that representing the runtime types. + let summary = TensorOf.summary; +} + +class CIR_TensorOfOrNone allowedRuntimeTypes, string description = "", + list allowedOpTypes = [AnyType]> : + AnyTypeOf<[CIR_TensorOf, NoneType], description>, + CIR_RuntimeType, NoneType]>>; + +class CIR_VariadicTensorOf allowedRuntimeTypes, + list allowedOpTypes = [AnyType]> : + Variadic>, + CIR_RuntimeType>>; + +def CIR_Int32Or64 : SignlessIntOfWidths<[32, 64]>; + +def CIR_BoolTensor : CIR_TensorOf<[I1]>; +def CIR_FpTensor : CIR_TensorOf<[F32]>; +def CIR_I32OrI64Tensor : CIR_TensorOf<[CIR_Int32Or64]>; +def CIR_I32Tensor : CIR_TensorOf<[I32]>; + +class CIR_0DTensorOf allowedRuntimeTypes, + list allowedOpTypes = [AnyType]> : + 0DTensorOf, CIR_RuntimeType>; +class CIR_1DTensorOf allowedRuntimeTypes, + list allowedOpTypes = [AnyType]> : + 1DTensorOf, CIR_RuntimeType>; + +class CIR_1DTensorOfOrNone allowedRuntimeTypes, string description = "", + list allowedOpTypes = [AnyType]> : + AnyTypeOf<[TensorOf, NoneType], description>, + CIR_RuntimeType, NoneType]>>; + +//===----------------------------------------------------------------------===// +// Rank/Shape helpers. +//===----------------------------------------------------------------------===// + +class CIR_OperandIsUnrankedPred : + CPred<"$_op.getOperand(" # n # ").getType().isa()">; + +// TODO: Some of these could be generalized and/or moved to more general +// location. +// Returns true if the n-th operand has unknown rank or has rank m. +class CIR_OperandHasRank : + PredOpTrait<"operand " # n # " is " # m # "-D", + Or<[CIR_OperandIsUnrankedPred, + CPred<"$_op.getOperand(" # n # + ").getType().cast().getRank() == " # m>]>>; + +class CIR_TFTypesWithSameBits : + And<[CPred<"getElementTypeOrSelf($_op.getResult(" # i # ")).isUnsignedInteger(" # num # ")">, + CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isUnsignedInteger(" # num # ")">]>; + +class CIR_TFOperandTypesWithSameBits : + And<[ + Or<[/*CPred<"getElementTypeOrSelf($_op.getOperand(" # i # ")).isa()">,*/ + CPred<"getElementTypeOrSelf($_op.getOperand(" # i # ")).isUnsignedInteger(" # num # ")">]>, + Or<[/*CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isa()">,*/ + CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isUnsignedInteger(" # num # ")">]>]>; + +class CIR_OperandHasRankAtMostPred : + Or<[CIR_OperandIsUnrankedPred, + CPred<"$_op.getOperand(" # n # + ").getType().cast().getRank() <= " # m>]>; + +// True if operand n is ranked and has a rank > dim. +class CIR_OperandIsRankedAndHasDimPred : And<[ + CPred<"$_op.getOperand(" # n # ").getType().isa()">, + CPred<"$_op.getOperand(" # n # ").getType().cast().getRank() > " + # dim>]>; + +// Returns true if the n-th operand is ranked and has a dimension length <= +// size at the rank dim. +class CIR_OperandDimIsAtMost : And<[ + CIR_OperandIsRankedAndHasDimPred, + CPred<"$_op.getOperand(" # n # ").getType().cast()" + ".getShape()[" # dim # " ] <= " # size>]>; + +class CIR_OperandRankEquals1DimOfOperand : + PredOpTrait<"operand " # x # "'s rank equals operand " # y # "'s size", + Or<[CIR_OperandIsUnrankedPred, + CIR_OperandIsUnrankedPred, + CPred<"!$_op.getOperand(" # y # + ").getType().cast().hasStaticShape()">, + CPred<"$_op.getOperand(" # x # + ").getType().cast().getRank() == " + "$_op.getOperand(" # y # + ").getType().cast().getShape()[0]">]>>; + +class CIR_OperandHasRankAtMost : + PredOpTrait<"operand " # n # " is at most " # m # "-D", + CIR_OperandHasRankAtMostPred>; + +class CIR_OperandHasRankAtLeast : + PredOpTrait<"operand " # n # " is at least " # m # "-D", + Or<[CIR_OperandIsUnrankedPred, + CPred<"$_op.getOperand(" # n # + ").getType().cast().getRank() >= " # m>]>>; + +// Ensures the array attribute's size is within the given maximum size. +class CIR_ArrayMaxCount : AttrConstraint< + CPred<"$_self.isa() && $_self.cast().size() <= " # n>, + "whose size is at most " # n>; + +// This is a quantization-aware version of TCresVTEtIsSameAsOp +class CIR_TCresVTEtIsSameAsOp : And<[ + TCOpResIsShapedTypePred, + Or<[ + TCresVTEtIsSameAsOpBase, + CIR_TFTypesWithSameBits/* TODO enable, + And<[ + SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # j # "))", + quant_QuantizedType.predicate>, + CPred<"quant::QuantizedType::castToStorageType(" + "getElementTypeOrSelf($_op.getResult(" # i # "))) == " + "quant::QuantizedType::castToStorageType(" + "getElementTypeOrSelf($_op.getOperand(" # j # ")))">]>*/]>]>; + +// This is a quantization-aware version of TCopVTEtAreSameAt +class CIR_TCopVTEtAreSameAt : Or<[ + TCopVTEtAreSameAt<[i, j]>, + CIR_TFOperandTypesWithSameBits/*, + And<[ + SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # j # "))", + quant_QuantizedType.predicate>, + CPred<"quant::QuantizedType::castToStorageType(" + "getElementTypeOrSelf($_op.getOperand(" # i # "))) == " + "quant::QuantizedType::castToStorageType(" + "getElementTypeOrSelf($_op.getOperand(" # j # ")))">]>*/]>; + +def CIR_SameFirstOperandAndFirstResultElementType : + PredOpTrait<"values and output must have same element type", + CIR_TCresVTEtIsSameAsOp<0, 0>>; + +//===----------------------------------------------------------------------===// +// CIR op common constraints. +//===----------------------------------------------------------------------===// + +class OperandsSameElementTypeConstraintBase : + PredOpTrait/*, + // Two operands' values are both quantized and their type have the same + // underlying storage type. + And<[ + SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(0))", + quant_QuantizedType.predicate>, + CPred<"quant::QuantizedType::castToStorageType(" + "getElementTypeOrSelf($_op.getOperand(0))) == " + "quant::QuantizedType::castToStorageType(" + "getElementTypeOrSelf($_op.getOperand(1)))">]>*/]>>; + +// This is a constraint for most of the binary ops, e.g., add, mul, div, etc. +// Binary ops lhs & rhs should have the same value type, and is capable to +// compare quantization types as well. +def BinaryOpSameElementTypeConstraint : + OperandsSameElementTypeConstraintBase<"binary op">; + +// This is a constraint for most of the comparison ops, e.g., equal, not_equal, +// greater, greater_equal, less, etc. Comparison ops lhs & rhs should have the +// same value type, and is capable to compare quantization types as well. +def ComparisonOpSameElementTypeConstraint : + OperandsSameElementTypeConstraintBase<"comparison op">; + +//===----------------------------------------------------------------------===// +// CIR common builders. +//===----------------------------------------------------------------------===// + +def CIR_BroadcastableBinaryBuilder : + OpBuilder<(ins "Value":$lhs, "Value":$rhs), + [{ + auto resultType = + OpTrait::util::getBroadcastedType(lhs.getType(), rhs.getType()); + if (!resultType) + mlir::emitError($_state.location, "non-broadcastable operands"); + $_state.addOperands({lhs, rhs}); + $_state.types.push_back(resultType); + }]>; + +class CIR_Op traits = []> : + Op])> { + // FlatBuffer generation specific information. + // ------------------------------------------- + // When generating the FlatBuffer output some operations have + // Options (as defined in the schema). These options are effectively + // the attributes of the operations (e.g., what padding is to be used + // for a pooling operator). Not all operations have Options and some + // operations share Options. The following attributes indicate whether + // the operation has Options in the serialized FlatBuffer. + + // Whether the Circle operator has options in the schema representation. + bit hasOptions = 0b0; + + // Use to specify a custom options type for Circle operators where + // the option's name does not match the Cirlce operator's name. + // If no customOption is specified then Options is used if the op + // hasOptions. + string customOption = ?; +} + +// NOTE 3'rd argument int index is removed, add when needed +class CIR_ConvOp additional_traits = []> : + CIR_Op, + // TODO enable AffineQuantizedOpInterface, + // TODO enable AffineOpCoefficient, + // TODO enable QuantizableResult, + CIR_SparseOp] # additional_traits> { + let summary = opSummary # " operator"; + + let description = [{ + Performs convolution operation on inputs. + + Inputs: + `inputs[0]`: required: the input activation tensor + `inputs[1]`: required: the filter weight tensor + `inputs[2]`: optional: the bias tensor + }]; + + let results = (outs CIR_TensorOf<[F32/*TODO enable, QI8, QUI8, QI16*/]>:$output); + + let hasOptions = 0b1; +} + +#endif // CIRCLE_OPS diff --git a/circle-mlir/circle-mlir/lib/dialect/mlir/TableGen.cmake b/circle-mlir/circle-mlir/lib/dialect/mlir/TableGen.cmake index 71a82feb892..653e2399ee0 100644 --- a/circle-mlir/circle-mlir/lib/dialect/mlir/TableGen.cmake +++ b/circle-mlir/circle-mlir/lib/dialect/mlir/TableGen.cmake @@ -19,4 +19,12 @@ mlir_tablegen(mlir/CircleOpsEnums.cc.inc -gen-enum-defs) mlir_tablegen(mlir/CircleOpsAttrdefs.h.inc -gen-attrdef-decls) mlir_tablegen(mlir/CircleOpsAttrdefs.cc.inc -gen-attrdef-defs) +set(LLVM_TARGET_DEFINITIONS mlir/CircleOps.td) +mlir_tablegen(mlir/CircleOps.h.inc -gen-op-decls) +mlir_tablegen(mlir/CircleOps.cc.inc -gen-op-defs) + +set(LLVM_TARGET_DEFINITIONS mlir/CircleOps.td) +cir_convertergen(mlir/OperatorConverters.inc --gen-operator-converters) +cir_convertergen(mlir/RuntimeVerifiers.inc --gen-runtime-verifiers) + add_public_tablegen_target(circle_mlir_gen_inc)