Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CoreML ] ML Program more operators support [3/N] #22710

Merged
merged 20 commits into from
Nov 28, 2024
Merged
17 changes: 3 additions & 14 deletions onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,6 @@ using namespace CoreML::Specification;
namespace onnxruntime {
namespace coreml {

// Once all ops are supportted FP16, we can remove it. Before that, we keep a set of ops to
// filter suppported ones.
static std::set<std::string> Float16Ops = {
"Add", "ArgMax", "AveragePool", "BatchNormalization", "Cast", "Clip", "Concat", "Conv", "ConvTranspose",
"DepthToSpace", "Div", "Gelu", "Gemm", "GlobalAveragePool", "GlobalMaxPool", "GridSample", "GroupNormalization",
"InstanceNormalization", "LayerNormalization", "LeakyRelu", "MatMul", "MaxPool", "Mul", "PRelu", "Pow",
"Reciprocal", "Relu", "Reshape", "Resize", "Sigmoid", "Slice", "Split", "Sqrt", "Sub", "Tanh", "Transpose"};

namespace {
// TODO, move this to shared_library
bool HasExternalInitializer(const InitializedTensorSet& initializers, const Node& node,
Expand Down Expand Up @@ -114,13 +106,10 @@ bool BaseOpBuilder::IsInputDtypeSupport(const Node& node, size_t idx,
return true;
}

// only support MLProgram for FP16
#if defined(COREML_ENABLE_MLPROGRAM)
if (input_params.create_mlprogram && input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16 &&
Float16Ops.count(node.OpType())) {
return true;
// only MLProgram support FP16
if (input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) {
return input_params.create_mlprogram;
}
#endif

LOGS(logger, VERBOSE) << "[" << node.OpType() << "] Input type: [" << input_type << "] is not currently supported";
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/impl/base_op_builder.h"
#include "core/providers/coreml/builders/impl/builder_utils.h"
#include "core/providers/coreml/shape_utils.h"
#include "core/providers/coreml/builders/model_builder.h"
#include "core/providers/coreml/builders/op_builder_factory.h"
#include "core/providers/shared/utils/utils.h"
Expand Down Expand Up @@ -55,6 +56,64 @@ bool CheckIfBothInputShapesMatch(const Node& node, const logging::Logger& logger
}
} // namespace

#if defined(COREML_ENABLE_MLPROGRAM)
static std::vector<int64_t> InferOutputShape(const std::vector<int64_t>& a, const std::vector<int64_t>& b) {
std::vector<int64_t> output_shape;
int64_t i_a = 0, j_b = 0;
if (a.size() >= b.size()) {
output_shape = a;
j_b -= a.size() - b.size();
} else {
output_shape = b;
i_a -= b.size() - a.size();
}

for (size_t i = 0; i < output_shape.size(); i++, i_a++, j_b++) {
const int64_t a_dim = (i_a>=0) ? a[i_a] : 1;
const int64_t b_dim = (j_b>=0) ? b[j_b] : 1;
if (a_dim == -1 || b_dim == -1) {
wejoncy marked this conversation as resolved.
Show resolved Hide resolved
wejoncy marked this conversation as resolved.
Show resolved Hide resolved
output_shape[i] = -1;
} else {
output_shape[i] = std::max(a_dim, b_dim);
}
}
return output_shape;
}

// Add variadic inputs to the model builder
// in onnx spec, some node allows variadic inputs, such as max(x, y, z, ...)
// while in coreml, maximum op only allows two inputs maximum(x, y)
// the conversion is doing the following:
// max(x, y, z, ...) -> max(max(x, y), z, ...)
static void AddVariadicInputs(std::unique_ptr<CoreML::Specification::MILSpec::Operation>* op,
ModelBuilder& model_builder,
const Node& node,
const logging::Logger& logger) {
using namespace CoreML::Specification::MILSpec;
const auto& input_defs(node.InputDefs());
std::string_view layer_input_name_x = model_builder.GetUniqueName(node, "variadic");
auto input_dtype = input_defs[0]->TypeAsProto()->tensor_type().elem_type();
const int32_t elem_type = static_cast<int32_t>(input_dtype);
std::vector<int64_t> x0_shape, x1_shape;
GetShape(*input_defs[0], x0_shape, logger);
GetShape(*input_defs[1], x1_shape, logger);
x0_shape = InferOutputShape(x0_shape, x1_shape);
std::unique_ptr<Operation> op_prev = std::move(*op);
for (size_t i = 2; i < input_defs.size(); i++) {
AddIntermediateOperationOutput(*op_prev, layer_input_name_x, elem_type, x0_shape);
std::unique_ptr<Operation> op_cur = model_builder.CreateOperation(node, op_prev->type());
AddOperationInput(*op_cur, "x", layer_input_name_x);
AddOperationInput(*op_cur, "y", input_defs[i]->Name());
model_builder.AddOperation(std::move(op_prev));
op_prev = std::move(op_cur);
layer_input_name_x = model_builder.GetUniqueName(node, "variadic");
GetShape(*input_defs[i], x1_shape, logger);
x0_shape = InferOutputShape(x0_shape, x1_shape);
}
*op = std::move(op_prev);
}
#endif

Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const Node& node,
const logging::Logger& logger) const {
const auto& op_type(node.OpType());
Expand All @@ -70,6 +129,8 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
coreml_op_type = "add";
} else if (op_type == "Mul") {
coreml_op_type = "mul";
} else if (op_type == "Max") {
coreml_op_type = "maximum";
} else if (op_type == "Sub") {
coreml_op_type = "sub";
} else if (op_type == "Div") {
Expand All @@ -86,8 +147,11 @@ Status BinaryOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const
std::unique_ptr<Operation> op = model_builder.CreateOperation(node, coreml_op_type);
AddOperationInput(*op, "x", input_defs[0]->Name());
AddOperationInput(*op, "y", input_defs[1]->Name());
if (input_defs.size() > 2) {
skottmckay marked this conversation as resolved.
Show resolved Hide resolved
// "max" node may have variadic inputs
AddVariadicInputs(&op, model_builder, node, logger);
}
AddOperationOutput(*op, *node.OutputDefs()[0]);

model_builder.AddOperation(std::move(op));
} else
#endif // defined (COREML_ENABLE_MLPROGRAM)
Expand Down Expand Up @@ -157,6 +221,10 @@ bool BinaryOpBuilder::HasSupportedInputsImpl(const Node& node, const OpBuilderIn
return false;
}

if (node.OpType() == "Max" && !input_params.create_mlprogram) {
return false;
}

return true;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include "core/providers/common.h"
#include "core/providers/coreml/builders/helper.h"
#include "core/providers/coreml/builders/impl/base_op_builder.h"
#include "core/providers/coreml/builders/impl/builder_utils.h"
#include "core/providers/coreml/builders/model_builder.h"
#include "core/providers/coreml/builders/op_builder_factory.h"
#include "core/providers/shared/utils/utils.h"
Expand All @@ -20,6 +21,7 @@ class ReductionOpBuilder : public BaseOpBuilder {

bool IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const override;
bool SupportsMLProgram() const override { return true; }
};

namespace {
Expand Down Expand Up @@ -48,13 +50,12 @@ Status ReductionOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, co
const logging::Logger& /* logger */) const {
const auto& op_type(node.OpType());
const auto& input_defs(node.InputDefs());
const auto& initializers(model_builder.GetInitializerTensors());

std::vector<int64_t> axes;

NodeAttrHelper helper(node);
if (input_defs.size() > 1 && input_defs[1]->Exists()) {
auto& axes_tensor = *initializers.at(input_defs[1]->Name());
auto& axes_tensor = *model_builder.GetConstantInitializer(input_defs[1]->Name());
Initializer axes_initializer(axes_tensor);
int64_t* data = axes_initializer.data<int64_t>();
int64_t size = axes_initializer.size();
Expand All @@ -66,29 +67,67 @@ Status ReductionOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, co

const bool keepdims = helper.Get("keepdims", 1) != 0;
const bool noop_with_empty_axes = helper.Get("noop_with_empty_axes", 0) != 0;
#if defined(COREML_ENABLE_MLPROGRAM)
if (model_builder.CreateMLProgram()) {
using namespace CoreML::Specification::MILSpec;

std::string_view coreml_op_type;
if (noop_with_empty_axes && axes.size() == 0) {
coreml_op_type = "identity";
} else if (op_type == "ReduceSum") {
coreml_op_type = "reduce_sum";
} else if (op_type == "ReduceMean") {
coreml_op_type = "reduce_mean";
} else if (op_type == "ReduceMax") {
coreml_op_type = "reduce_max";
skottmckay marked this conversation as resolved.
Show resolved Hide resolved
} else if (op_type == "ReduceMin") {
coreml_op_type = "reduce_min";
} else if (op_type == "ReduceProd") {
coreml_op_type = "reduce_prod";
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"ReductionOpBuilder::AddToModelBuilderImpl, unexpected op: ", op_type);
}
std::unique_ptr<Operation> op = model_builder.CreateOperation(node, coreml_op_type);
AddOperationInput(*op, "x", input_defs[0]->Name());
if (coreml_op_type != "identity") {
if (axes.size() > 0) {
AddOperationInput(*op, "axes", model_builder.AddConstant(op->type(), "axes", axes));
}
AddOperationInput(*op, "keep_dims", model_builder.AddScalarConstant(op->type(), "keep_dims", keepdims));
}
AddOperationOutput(*op, *node.OutputDefs()[0]);

model_builder.AddOperation(std::move(op));
} else
#endif // (COREML_ENABLE_MLPROGRAM)
{
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);

if (op_type == "ReduceSum") {
AddReductionParams(layer->mutable_reducesum(), axes, keepdims, noop_with_empty_axes);
} else if (op_type == "ReduceMean") {
AddReductionParams(layer->mutable_reducemean(), axes, keepdims, noop_with_empty_axes);
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"ReductionOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type);
}

std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);
*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

if (op_type == "ReduceSum") {
AddReductionParams(layer->mutable_reducesum(), axes, keepdims, noop_with_empty_axes);
} else if (op_type == "ReduceMean") {
AddReductionParams(layer->mutable_reducemean(), axes, keepdims, noop_with_empty_axes);
} else {
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
"ReductionOpBuilder::AddToModelBuilderImpl, unknown op: ", op_type);
model_builder.AddLayer(std::move(layer));
}

*layer->mutable_input()->Add() = node.InputDefs()[0]->Name();
*layer->mutable_output()->Add() = node.OutputDefs()[0]->Name();

model_builder.AddLayer(std::move(layer));
return Status::OK();
}

bool ReductionOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInputParams& input_params,
const logging::Logger& logger) const {
const auto& input_defs = node.InputDefs();

if (!input_params.create_mlprogram &&
(node.OpType() == "ReduceMax" || node.OpType() == "ReduceMin" || node.OpType() == "ReduceProd")) {
return false;
}
NodeAttrHelper helper(node);

// noop_with_empty_axes defaults to false and is only available in newer opsets where 'axes' is an optional input
Expand All @@ -99,18 +138,16 @@ bool ReductionOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInpu
if (input_defs.size() > 1 && input_defs[1]->Exists()) {
// 'axes' is optional input in new opsets
const auto& axes_name = input_defs[1]->Name();
const auto& initializers = input_params.graph_viewer.GetAllInitializedTensors();
if (!Contains(initializers, axes_name)) {
const auto* axes = input_params.graph_viewer.GetConstantInitializer(axes_name);
if (!axes) {
LOGS(logger, VERBOSE) << "Axes of reduction must be a constant initializer";
return false;
}

empty_axes = initializers.at(axes_name)->int64_data_size() == 0;
empty_axes = axes->int64_data_size() == 0;
}

if (empty_axes && noop_with_empty_axes) {
// TODO: When we add ML Program support we should enable this as it makes the node an Identity op
LOGS(logger, VERBOSE) << "CoreML doesn't support noop on empty axes for reduction layers" << std::endl;
if (empty_axes && noop_with_empty_axes && !input_params.create_mlprogram) {
LOGS(logger, VERBOSE) << "NeuralNetwork doesn't support noop on empty axes for reduction layers";
return false;
}

Expand Down
Loading
Loading