Skip to content

Commit bbb09e9

Browse files
committed
more operators support
1 parent c64459f commit bbb09e9

11 files changed

Lines changed: 413 additions & 128 deletions

File tree

onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,6 @@ using namespace CoreML::Specification;
1313
namespace onnxruntime {
1414
namespace coreml {
1515

16-
// Once all ops are supportted FP16, we can remove it. Before that, we keep a set of ops to
17-
// filter suppported ones.
18-
static std::set<std::string> Float16Ops = {
19-
"Add", "ArgMax", "AveragePool", "BatchNormalization", "Cast", "Clip", "Concat", "Conv", "ConvTranspose",
20-
"DepthToSpace", "Div", "Gelu", "Gemm", "GlobalAveragePool", "GlobalMaxPool", "GridSample", "GroupNormalization",
21-
"InstanceNormalization", "LayerNormalization", "LeakyRelu", "MatMul", "MaxPool", "Mul", "PRelu", "Pow",
22-
"Reciprocal", "Relu", "Reshape", "Resize", "Sigmoid", "Slice", "Split", "Sqrt", "Sub", "Tanh", "Transpose"};
23-
2416
namespace {
2517
// TODO, move this to shared_library
2618
bool HasExternalInitializer(const InitializedTensorSet& initializers, const Node& node,
@@ -114,13 +106,10 @@ bool BaseOpBuilder::IsInputDtypeSupport(const Node& node, size_t idx,
114106
return true;
115107
}
116108

117-
// only support MLProgram for FP16
118-
#if defined(COREML_ENABLE_MLPROGRAM)
119-
if (input_params.create_mlprogram && input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16 &&
120-
Float16Ops.count(node.OpType())) {
121-
return true;
109+
// only support MLProgram for FP16
110+
if (!input_params.create_mlprogram && input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) {
111+
return false;
122112
}
123-
#endif
124113

125114
LOGS(logger, VERBOSE) << "[" << node.OpType() << "] Input type: [" << input_type << "] is not currently supported";
126115
return false;

onnxruntime/core/providers/coreml/builders/impl/binary_op_builder.cc

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include "core/providers/coreml/builders/helper.h"
77
#include "core/providers/coreml/builders/impl/base_op_builder.h"
88
#include "core/providers/coreml/builders/impl/builder_utils.h"
9+
#include "core/providers/coreml/shape_utils.h"
910
#include "core/providers/coreml/builders/model_builder.h"
1011
#include "core/providers/coreml/builders/op_builder_factory.h"
1112
#include "core/providers/shared/utils/utils.h"
@@ -70,6 +71,8 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
7071
coreml_op_type = "add";
7172
} else if (op_type == "Mul") {
7273
coreml_op_type = "mul";
74+
} else if (op_type == "Max") {
75+
coreml_op_type = "maximum";
7376
} else if (op_type == "Sub") {
7477
coreml_op_type = "sub";
7578
} else if (op_type == "Div") {
@@ -86,8 +89,33 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
8689
std::unique_ptr<Operation> op = model_builder.CreateOperation(node, coreml_op_type);
8790
AddOperationInput(*op, "x", input_defs[0]->Name());
8891
AddOperationInput(*op, "y", input_defs[1]->Name());
92+
if (input_defs.size() > 2) {
93+
std::string_view layer_input_name_x = model_builder.GetUniqueName(node, "variadic");
94+
auto input_dtype = input_defs[0]->TypeAsProto()->tensor_type().elem_type();
95+
const int32_t elem_type = static_cast<int32_t>(input_dtype);
96+
std::vector<int64_t> x0_shape, x1_shape;
97+
GetShape(*input_defs[0], x0_shape, logger);
98+
GetShape(*input_defs[1], x1_shape, logger);
99+
for (size_t i = 0; i < x0_shape.size(); i++) {
100+
x0_shape[i] = std::max(x0_shape[i], x1_shape[i]);
101+
}
102+
std::unique_ptr<Operation> op_prev = std::move(op);
103+
for (size_t i = 2; i < input_defs.size(); i++) {
104+
AddIntermediateOperationOutput(*op_prev, layer_input_name_x, elem_type, x0_shape);
105+
std::unique_ptr<Operation> op_cur = model_builder.CreateOperation(node, coreml_op_type);
106+
AddOperationInput(*op_cur, "x", layer_input_name_x);
107+
AddOperationInput(*op_cur, "y", input_defs[i]->Name());
108+
model_builder.AddOperation(std::move(op_prev));
109+
op_prev = std::move(op_cur);
110+
layer_input_name_x = model_builder.GetUniqueName(node, "variadic");
111+
GetShape(*input_defs[i], x1_shape, logger);
112+
for (size_t i = 0; i < x0_shape.size(); i++) {
113+
x0_shape[i] = std::max(x0_shape[i], x1_shape[i]);
114+
}
115+
}
116+
op = std::move(op_prev);
117+
}
89118
AddOperationOutput(*op, *node.OutputDefs()[0]);
90-
91119
model_builder.AddOperation(std::move(op));
92120
} else
93121
#endif // defined (COREML_ENABLE_MLPROGRAM)
@@ -157,6 +185,10 @@ bool BinaryOpBuilder::HasSupportedInputsImpl(const Node& node, const OpBuilderIn
157185
return false;
158186
}
159187

188+
if (node.OpType() == "Max" && !input_params.create_mlprogram) {
189+
return false;
190+
}
191+
160192
return true;
161193
}
162194

onnxruntime/core/providers/coreml/builders/impl/reduction_op_builder.cc

Lines changed: 55 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include "core/providers/common.h"
66
#include "core/providers/coreml/builders/helper.h"
77
#include "core/providers/coreml/builders/impl/base_op_builder.h"
8+
#include "core/providers/coreml/builders/impl/builder_utils.h"
89
#include "core/providers/coreml/builders/model_builder.h"
910
#include "core/providers/coreml/builders/op_builder_factory.h"
1011
#include "core/providers/shared/utils/utils.h"
@@ -20,6 +21,7 @@ class ReductionOpBuilder : public BaseOpBuilder {
2021

2122
bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
2223
const logging::Logger& logger) const override;
24+
bool SupportsMLProgram() const override { return true; }
2325
};
2426

2527
namespace {
@@ -48,13 +50,12 @@ Status ReductionOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, co
4850
const logging::Logger& /* logger */) const {
4951
const auto& op_type(node.OpType());
5052
const auto& input_defs(node.InputDefs());
51-
const auto& initializers(model_builder.GetInitializerTensors());
5253

5354
std::vector<int64_t> axes;
5455

5556
NodeAttrHelper helper(node);
5657
if (input_defs.size() > 1 && input_defs[1]->Exists()) {
57-
auto& axes_tensor = *initializers.at(input_defs[1]->Name());
58+
auto& axes_tensor = *model_builder.GetConstantInitializer(input_defs[1]->Name());
5859
Initializer axes_initializer(axes_tensor);
5960
int64_t* data = axes_initializer.data<int64_t>();
6061
int64_t size = axes_initializer.size();
@@ -66,22 +67,53 @@ Status ReductionOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, co
6667

6768
const bool keepdims = helper.Get("keepdims", 1) != 0;
6869
const bool noop_with_empty_axes = helper.Get("noop_with_empty_axes", 0) != 0;
70+
#if defined(COREML_ENABLE_MLPROGRAM)
71+
if (model_builder.CreateMLProgram()) {
72+
using namespace CoreML::Specification::MILSpec;
73+
74+
std::string_view coreml_op_type;
75+
if (noop_with_empty_axes && axes.size() == 0) {
76+
coreml_op_type = "identity";
77+
} else if (op_type == "ReduceSum") {
78+
coreml_op_type = "reduce_sum";
79+
} else if (op_type == "ReduceMean") {
80+
coreml_op_type = "reduce_mean";
81+
} else if (op_type == "ReduceMax") {
82+
coreml_op_type = "reduce_max";
83+
} else {
84+
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
85+
"ReductionOpBuilder::AddToModelBuilderImpl, unexpected op: ", op_type);
86+
}
87+
std::unique_ptr<Operation> op = model_builder.CreateOperation(node, coreml_op_type);
88+
AddOperationInput(*op, "x", input_defs[0]->Name());
89+
if (coreml_op_type != "identity") {
90+
if (axes.size() > 0) {
91+
AddOperationInput(*op, "axes", model_builder.AddConstant(op->type(), "axes", axes));
92+
}
93+
AddOperationInput(*op, "keep_dims", model_builder.AddScalarConstant(op->type(), "keep_dims", keepdims));
94+
}
95+
AddOperationOutput(*op, *node.OutputDefs()[0]);
96+
97+
model_builder.AddOperation(std::move(op));
98+
} else
99+
#endif // (COREML_ENABLE_MLPROGRAM)
100+
{
101+
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);
102+
103+
if (op_type == "ReduceSum") {
104+
AddReductionParams(layer->mutable_reducesum(), axes, keepdims, noop_with_empty_axes);
105+
} else if (op_type == "ReduceMean") {
106+
AddReductionParams(layer->mutable_reducemean(), axes, keepdims, noop_with_empty_axes);
107+
} else {
108+
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
109+
"ReductionOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type);
110+
}
69111

70-
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);
112+
*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
113+
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();
71114

72-
if (op_type == "ReduceSum") {
73-
AddReductionParams(layer->mutable_reducesum(), axes, keepdims, noop_with_empty_axes);
74-
} else if (op_type == "ReduceMean") {
75-
AddReductionParams(layer->mutable_reducemean(), axes, keepdims, noop_with_empty_axes);
76-
} else {
77-
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
78-
"ReductionOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type);
115+
model_builder.AddLayer(std::move(layer));
79116
}
80-
81-
*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
82-
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();
83-
84-
model_builder.AddLayer(std::move(layer));
85117
return Status::OK();
86118
}
87119

@@ -99,21 +131,21 @@ bool ReductionOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInpu
99131
if (input_defs.size() > 1 && input_defs[1]->Exists()) {
100132
// 'axes' is optional input in new opsets
101133
const auto& axes_name = input_defs[1]->Name();
102-
const auto& initializers = input_params.graph_viewer.GetAllInitializedTensors();
103-
if (!Contains(initializers, axes_name)) {
134+
const auto* axes = input_params.graph_viewer.GetConstantInitializer(axes_name);
135+
if (!axes) {
104136
LOGS(logger, VERBOSE) << "Axes of reduction must be a constant initializer";
105137
return false;
106138
}
107139

108-
empty_axes = initializers.at(axes_name)->int64_data_size() == 0;
140+
empty_axes = axes->int64_data_size() == 0;
109141
}
110-
111-
if (empty_axes && noop_with_empty_axes) {
112-
// TODO: When we add ML Program support we should enable this as it makes the node an Identity op
113-
LOGS(logger, VERBOSE) << "CoreML doesn't support noop on empty axes for reduction layers" << std::endl;
142+
if (empty_axes && noop_with_empty_axes && !input_params.create_mlprogram) {
143+
LOGS(logger, VERBOSE) << "CoreML doesn't support noop on empty axes for reduction layers";
144+
return false;
145+
}
146+
if (!input_params.create_mlprogram && node.OpType() == "ReduceMax") {
114147
return false;
115148
}
116-
117149
return true;
118150
}
119151

onnxruntime/core/providers/coreml/builders/impl/shape_op_builder.cc

Lines changed: 104 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
// Licensed under the MIT License.
33

44
#include "core/providers/coreml/builders/impl/base_op_builder.h"
5+
#include "core/providers/coreml/builders/impl/builder_utils.h"
56
#include "core/providers/coreml/builders/model_builder.h"
7+
#include "core/providers/coreml/shape_utils.h"
68
#include "core/providers/coreml/builders/op_builder_factory.h"
79
#include "core/providers/shared/utils/utils.h" // for NodeAttrHelper
810

@@ -14,28 +16,119 @@ class ShapeOpBuilder : public BaseOpBuilder {
1416

1517
bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
1618
const logging::Logger& logger) const override;
19+
bool HasSupportedInputsImpl(const Node& node, const OpBuilderInputParams& input_params,
20+
const logging::Logger& logger) const override;
21+
bool SupportsMLProgram() const override { return true; }
1722
};
1823

1924
Status ShapeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
20-
const logging::Logger& /*logger*/) const {
21-
auto layer = model_builder.CreateNNLayer(node);
22-
layer->mutable_getshape();
23-
*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
24-
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();
25-
model_builder.AddLayer(std::move(layer));
25+
const logging::Logger& logger) const {
26+
const auto& input_defs = node.InputDefs();
27+
28+
#if defined(COREML_ENABLE_MLPROGRAM)
29+
if (model_builder.CreateMLProgram()) {
30+
using namespace CoreML::Specification::MILSpec;
31+
NodeAttrHelper node_attr_helper{node};
32+
int64_t num_dims = input_defs[0]->Shape()->dim_size();
33+
int64_t start = HandleNegativeAxis(node_attr_helper.Get("start", 0), num_dims);
34+
35+
int64_t size = -1;
36+
if (node_attr_helper.HasAttr("end")) {
37+
int64_t end = HandleNegativeAxis(node_attr_helper.Get("end", -1), num_dims);
38+
size = end - start;
39+
}
40+
41+
int32_t output_datatype = ONNX_NAMESPACE::TensorProto_DataType_INT32;
42+
std::unique_ptr<Operation> op = model_builder.CreateOperation(node, "shape");
43+
AddOperationInput(*op, "x", input_defs[0]->Name());
44+
if (size != -1 || start != 0) {
45+
std::string_view layer_input_name_x = model_builder.GetUniqueName(node, "slice_by_size");
46+
std::vector<int64_t> x0_shape{num_dims};
47+
AddIntermediateOperationOutput(*op, layer_input_name_x, output_datatype, x0_shape);
48+
model_builder.AddOperation(std::move(op));
49+
50+
auto slice_op = model_builder.CreateOperation(node, "slice_by_size");
51+
AddOperationInput(*slice_op, "x", layer_input_name_x);
52+
std::vector<int64_t> starts = {start};
53+
std::vector<int64_t> sizes = {size};
54+
AddOperationInput(*slice_op, "begin", model_builder.AddConstant(slice_op->type(), "begin", starts));
55+
AddOperationInput(*slice_op, "size", model_builder.AddConstant(slice_op->type(), "size", sizes));
56+
AddOperationOutput(*slice_op, *node.OutputDefs()[0], output_datatype);
57+
model_builder.AddOperation(std::move(slice_op));
58+
} else {
59+
AddOperationOutput(*op, *node.OutputDefs()[0], output_datatype);
60+
model_builder.AddOperation(std::move(op));
61+
}
62+
} else // NOLINT
63+
#endif
64+
{
65+
auto layer = model_builder.CreateNNLayer(node);
66+
layer->mutable_getshape();
67+
*layer->mutable_input()->Add() = input_defs[0]->Name();
68+
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();
69+
model_builder.AddLayer(std::move(layer));
70+
}
2671
return Status::OK();
2772
}
2873

29-
bool ShapeOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& /*input_params*/,
74+
bool ShapeOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
3075
const logging::Logger& logger) const {
76+
const auto* tensor_shape = node.InputDefs()[0]->Shape();
77+
if (tensor_shape == nullptr) {
78+
return false;
79+
}
80+
3181
NodeAttrHelper node_attr_helper{node};
32-
if (node_attr_helper.Get("start", 0) != 0) {
33-
LOGS(logger, VERBOSE) << "Shape does not support 'start' attribute with value other than 0";
82+
if (!input_params.create_mlprogram) {
83+
if (node_attr_helper.HasAttr("end")) {
84+
LOGS(logger, VERBOSE) << "Shape does not support 'end' attribute";
85+
return false;
86+
}
87+
88+
if (node_attr_helper.Get("start", 0) != 0) {
89+
LOGS(logger, VERBOSE) << "Shape does not support 'start' attribute with value other than 0";
90+
return false;
91+
}
92+
} else {
93+
int64_t size = node_attr_helper.HasAttr("end")
94+
? HandleNegativeAxis(node_attr_helper.Get("end", 0), tensor_shape->dim_size())
95+
: tensor_shape->dim_size();
96+
int64_t start = HandleNegativeAxis(node_attr_helper.Get("start", 0), tensor_shape->dim_size());
97+
size = size - start;
98+
if (size == 0) {
99+
return false;
100+
}
101+
}
102+
103+
return true;
104+
}
105+
106+
bool ShapeOpBuilder::HasSupportedInputsImpl(const Node& node,
107+
[[maybe_unused]] const OpBuilderInputParams& input_params,
108+
const logging::Logger& logger) const {
109+
// We only check the type of input 0
110+
const auto& input = *node.InputDefs()[0];
111+
112+
int32_t input_type;
113+
if (!GetType(input, input_type, logger)) {
34114
return false;
35115
}
36116

37-
if (node_attr_helper.HasAttr("end")) {
38-
LOGS(logger, VERBOSE) << "Shape does not support 'end' attribute";
117+
if (input_params.create_mlprogram) {
118+
if ((input_type == ONNX_NAMESPACE::TensorProto_DataType_INT32 ||
119+
input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT ||
120+
input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16)) {
121+
return true;
122+
} else {
123+
LOGS(logger, VERBOSE) << "[" << node.OpType()
124+
<< "] Input type: [" << input_type
125+
<< "] is not supported.";
126+
return false;
127+
}
128+
} else if (input_type != ONNX_NAMESPACE::TensorProto_DataType_FLOAT) {
129+
LOGS(logger, VERBOSE) << "[" << node.OpType()
130+
<< "] Input type: [" << input_type
131+
<< "] is not supported.";
39132
return false;
40133
}
41134

0 commit comments

Comments
 (0)