Skip to content

Commit 14950cc

Browse files
wejoncyskottmckaygithub-actions[bot]
authored andcommitted
[CoreML ] ML Program more operators support [3/N] (microsoft#22710)
### Description - Erf - Round - Max - ReduceMax - ReduceMean - ReduceSum - Unsqueeze - Squeeze - Softmax ### Motivation and Context <!-- - Why is this change required? What problem does it solve? - If it fixes an open issue, please link to the issue here. --> --------- Co-authored-by: Scott McKay <[email protected]> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
1 parent 09e622f commit 14950cc

File tree

3 files changed

+15
-26
lines changed

3 files changed

+15
-26
lines changed

onnxruntime/core/providers/coreml/builders/impl/base_op_builder.cc

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,6 @@ using namespace CoreML::Specification;
1313
namespace onnxruntime {
1414
namespace coreml {
1515

16-
// Once all ops are supportted FP16, we can remove it. Before that, we keep a set of ops to
17-
// filter suppported ones.
18-
static std::set<std::string> Float16Ops = {
19-
"Add", "ArgMax", "AveragePool", "BatchNormalization", "Cast", "Clip", "Concat", "Conv", "ConvTranspose",
20-
"DepthToSpace", "Div", "Gelu", "Gemm", "GlobalAveragePool", "GlobalMaxPool", "GridSample", "GroupNormalization",
21-
"InstanceNormalization", "LayerNormalization", "LeakyRelu", "MatMul", "MaxPool", "Mul", "PRelu", "Pow",
22-
"Reciprocal", "Relu", "Reshape", "Resize", "Sigmoid", "Slice", "Split", "Sqrt", "Sub", "Tanh", "Transpose"};
23-
2416
namespace {
2517
// TODO, move this to shared_library
2618
bool HasExternalInitializer(const InitializedTensorSet& initializers, const Node& node,

onnxruntime/core/providers/coreml/builders/impl/reduction_op_builder.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,9 @@ bool ReductionOpBuilder::IsOpSupportedImpl(const Node& node, const OpBuilderInpu
133133
return false;
134134
}
135135

136-
#if defined(TARGET_OS_IOS) && defined(TARGET_CPU_X86_64) && TARGET_OS_IOS && TARGET_CPU_X86_64
137-
// skip ReductionOpTest.ReduceSum_half_bert because reduce_sum will output all zeros
136+
#if defined(TARGET_OS_IOS) && defined(TARGET_CPU_X86_64)
137+
// to pass https://dev.azure.com/onnxruntime/onnxruntime/_build/results?buildId=1563483&view=logs&j=f7cc61a9-cc70-56e7-b06c-4668ca17e426
138+
// ReductionOpTest.ReduceSum_half_bert
138139
int32_t input_type;
139140
GetType(*input_defs[0], input_type, logger);
140141
if (node.OpType() == "ReduceSum" && input_type == ONNX_NAMESPACE::TensorProto_DataType_FLOAT16) {

onnxruntime/core/providers/coreml/builders/impl/squeeze_op_builder.cc

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,6 @@
1313
#include "core/optimizer/initializer.h"
1414
#include "core/providers/cpu/tensor/unsqueeze.h"
1515

16-
#ifdef __APPLE__
17-
#include <TargetConditionals.h>
18-
#endif
19-
2016
namespace onnxruntime {
2117
namespace coreml {
2218

@@ -80,28 +76,28 @@ Status SqueezeOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder,
8076
const Node& node,
8177
[[maybe_unused]] const logging::Logger& logger) const {
8278
std::unique_ptr<COREML_SPEC::NeuralNetworkLayer> layer = model_builder.CreateNNLayer(node);
79+
const auto& input_defs(node.InputDefs());
8380
auto* coreml_squeeze = layer->mutable_squeeze();
8481
TensorShapeVector axes;
8582
GetAxes(model_builder, node, axes);
83+
std::vector<int64_t> input_shape;
84+
GetShape(*input_defs[0], input_shape, logger);
8685
#if defined(COREML_ENABLE_MLPROGRAM)
87-
const auto& input_defs(node.InputDefs());
8886
if (model_builder.CreateMLProgram()) {
8987
using namespace CoreML::Specification::MILSpec;
9088

91-
#if defined(TARGET_CPU_X86_64) && TARGET_CPU_X86_64
92-
// expand_dims has limited requirements for static shape, however, X86_64 has a bug that it can't handle scalar input
93-
if (node.OpType() == "Unsqueeze" && input_defs[0]->Shape()->dim_size() < 2) {
94-
HandleX86ArchUnsqueezeScalarInput(model_builder, node, logger);
95-
return Status::OK();
96-
}
97-
#endif
98-
std::string_view coreml_op_type = node.OpType() == "Squeeze" ? "squeeze" : "expand_dims";
89+
std::string_view coreml_op_type = node.OpType() == "Squeeze" ? "squeeze" : "reshape";
9990
std::unique_ptr<Operation> op = model_builder.CreateOperation(node, coreml_op_type);
10091
AddOperationInput(*op, "x", input_defs[0]->Name());
10192

102-
if (!axes.empty()) {
103-
// coreml supports negative axes
104-
AddOperationInput(*op, "axes", model_builder.AddConstant(op->type(), "axes", AsSpan(axes)));
93+
if (coreml_op_type == "squeeze") {
94+
if (!axes.empty()) {
95+
// coreml squeeze op does support negative axes
96+
AddOperationInput(*op, "axes", model_builder.AddConstant(op->type(), "axes", AsSpan(axes)));
97+
}
98+
} else {
99+
TensorShapeVector output_shape = UnsqueezeBase::ComputeOutputShape(TensorShape(input_shape), axes);
100+
AddOperationInput(*op, "shape", model_builder.AddConstant(op->type(), "shape", AsSpan(output_shape)));
105101
}
106102
AddOperationOutput(*op, *node.OutputDefs()[0]);
107103
model_builder.AddOperation(std::move(op));

0 commit comments

Comments
 (0)