3
3
//
4
4
5
5
#include " core/operator_set.hpp"
6
+ #include " openvino/op/divide.hpp"
6
7
#include " openvino/op/gather.hpp"
7
8
#include " openvino/op/log.hpp"
9
+ #include " openvino/op/multiply.hpp"
8
10
#include " openvino/op/negative.hpp"
9
11
#include " openvino/op/reduce_mean.hpp"
10
12
#include " openvino/op/reduce_sum.hpp"
11
13
#include " openvino/op/softmax.hpp"
12
14
#include " softmax_cross_entropy_loss.hpp"
13
- #include " openvino/op/multiply.hpp"
14
- #include " openvino/op/divide.hpp"
15
15
16
16
namespace ov {
17
17
namespace frontend {
18
18
namespace onnx {
19
19
namespace {
20
- OutputVector impl_softmax_cross_entropy (const Node& node, int64_t axis_default) {
21
- const auto inputs = node.get_ov_inputs ();
20
+ OutputVector impl_softmax_cross_entropy (const Node& node, int64_t axis_default) {
21
+ const auto inputs = node.get_ov_inputs ();
22
22
23
- const auto scores = inputs[0 ];
24
- const auto labels = inputs[1 ];
23
+ const auto scores = inputs[0 ];
24
+ const auto labels = inputs[1 ];
25
25
26
- // Optional weights paramater
27
- bool has_weights = inputs.size () > 2 ;
28
- std::shared_ptr<ov::Node> weights_gather = nullptr ;
26
+ // Optional weights paramater
27
+ bool has_weights = inputs.size () > 2 ;
28
+ std::shared_ptr<ov::Node> weights_gather = nullptr ;
29
29
30
- if (has_weights) {
31
- const auto weights = inputs[2 ];
32
- const auto axis_for_weights = ov::op::v0::Constant::create (element::i64 , {}, {0 });
33
- weights_gather = std::make_shared<ov::op::v8::Gather>(weights, labels, axis_for_weights);
34
- }
30
+ if (has_weights) {
31
+ const auto weights = inputs[2 ];
32
+ const auto axis_for_weights = ov::op::v0::Constant::create (element::i64 , {}, {0 });
33
+ weights_gather = std::make_shared<ov::op::v8::Gather>(weights, labels, axis_for_weights);
34
+ }
35
35
36
- // Getting attributes for axis and reduction mode
37
- const auto axis = node.get_attribute_value <int64_t >(" axis" , axis_default);
38
- const auto reduction = node.get_attribute_value <std::string>(" reduction" , " mean" );
36
+ // Getting attributes for axis and reduction mode
37
+ const auto axis = node.get_attribute_value <int64_t >(" axis" , axis_default);
38
+ const auto reduction = node.get_attribute_value <std::string>(" reduction" , " mean" );
39
39
40
- // Computing softmax & it's logarithm
41
- const auto softmax = std::make_shared<ov::op::v8::Softmax>(scores, axis);
42
- const auto log_softmax = std::make_shared<ov::op::v0::Log>(softmax);
40
+ // Computing softmax & it's logarithm
41
+ const auto softmax = std::make_shared<ov::op::v8::Softmax>(scores, axis);
42
+ const auto log_softmax = std::make_shared<ov::op::v0::Log>(softmax);
43
43
44
- const auto axis_const = ov::op::v0::Constant::create (element::i64 , {}, {axis});
45
- const auto gathered = std::make_shared<ov::op::v8::Gather>(log_softmax, labels, axis_const);
44
+ const auto axis_const = ov::op::v0::Constant::create (element::i64 , {}, {axis});
45
+ const auto gathered = std::make_shared<ov::op::v8::Gather>(log_softmax, labels, axis_const);
46
46
47
+ // Computing loss
48
+ std::shared_ptr<ov::Node> loss = std::make_shared<ov::op::v0::Negative>(gathered);
47
49
48
- // Computing loss
49
- std::shared_ptr<ov::Node> loss = std::make_shared<ov::op::v0::Negative>(gathered);
50
+ if (has_weights) {
51
+ loss = std::make_shared<ov::op::v1::Multiply>(loss, weights_gather);
52
+ }
50
53
51
- if (has_weights) {
52
- loss = std::make_shared<ov::op::v1::Multiply>(loss, weights_gather);
53
- }
54
+ // applying reduction as mentioned in
55
+ // https://github.com/onnx/onnx/blob/main/docs/Changelog.md#softmaxcrossentropyloss-12
54
56
55
- // applying reduction as mentioned in https://github.com/onnx/onnx/blob/main/docs/Changelog.md#softmaxcrossentropyloss-12
56
-
57
- if (reduction != " None" ) {
58
- // Reduce over the axis corresponding to each sample
59
- // Reducing over axis 0, assuming the loss tensor shape is [batch_size]
60
- const auto reduce_axis = ov::op::v0::Constant::create (ov::element::i64 , {1 }, {0 });
57
+ if (reduction != " None" ) {
58
+ // Reduce over the axis corresponding to each sample
59
+ // Reducing over axis 0, assuming the loss tensor shape is [batch_size]
60
+ const auto reduce_axis = ov::op::v0::Constant::create (ov::element::i64 , {1 }, {0 });
61
61
62
- if (reduction == " mean" ) {
63
- if (has_weights) {
64
- auto loss_sum = std::make_shared<ov::op::v1::ReduceSum>(loss->output (0 ), reduce_axis, true );
65
- auto weight_sum = std::make_shared<ov::op::v1::ReduceSum>(weights_gather->output (0 ), reduce_axis, true );
66
- loss = std::make_shared<ov::op::v1::Divide>(loss_sum, weight_sum);
67
- } else {
68
- loss = std::make_shared<ov::op::v1::ReduceMean>(loss->output (0 ), reduce_axis, true );
69
- }
70
- } else if (reduction == " sum" ) {
71
- loss = std::make_shared<ov::op::v1::ReduceSum>(loss->output (0 ), reduce_axis, true );
62
+ if (reduction == " mean" ) {
63
+ if (has_weights) {
64
+ auto loss_sum = std::make_shared<ov::op::v1::ReduceSum>(loss->output (0 ), reduce_axis, true );
65
+ auto weight_sum = std::make_shared<ov::op::v1::ReduceSum>(weights_gather->output (0 ), reduce_axis, true );
66
+ loss = std::make_shared<ov::op::v1::Divide>(loss_sum, weight_sum);
67
+ } else {
68
+ loss = std::make_shared<ov::op::v1::ReduceMean>(loss->output (0 ), reduce_axis, true );
72
69
}
70
+ } else if (reduction == " sum" ) {
71
+ loss = std::make_shared<ov::op::v1::ReduceSum>(loss->output (0 ), reduce_axis, true );
73
72
}
74
-
75
- return {loss};
76
73
}
74
+
75
+ return {loss};
77
76
}
77
+ } // namespace
78
78
namespace ai_onnx {
79
- namespace opset_12 {
80
- OutputVector ov::frontend::onnx::ai_onnx::opset_12::softmax_cross_entropy_loss (const Node& node) {
81
- return impl_softmax_cross_entropy (node, 1 );
82
- }
83
- ONNX_OP (" SoftmaxCrossEntropyLoss" , OPSET_SINCE(12 ), ai_onnx::opset_12::softmax_cross_entropy_loss);
84
- }
85
- namespace opset_13 {
86
- OutputVector ov::frontend::onnx::ai_onnx::opset_13::softmax_cross_entropy_loss (const Node& node) {
87
- return impl_softmax_cross_entropy (node, 1 );
88
- }
89
-
90
- ONNX_OP (" SoftmaxCrossEntropyLoss" , OPSET_SINCE(13 ), ai_onnx::opset_13::softmax_cross_entropy_loss);
91
- }
92
- }
79
+ namespace opset_12 {
80
+ OutputVector ov::frontend::onnx::ai_onnx::opset_12::softmax_cross_entropy_loss (const Node& node) {
81
+ return impl_softmax_cross_entropy (node, 1 );
93
82
}
83
+ ONNX_OP (" SoftmaxCrossEntropyLoss" , OPSET_SINCE(12 ), ai_onnx::opset_12::softmax_cross_entropy_loss);
84
+ } // namespace opset_12
85
+ namespace opset_13 {
86
+ OutputVector ov::frontend::onnx::ai_onnx::opset_13::softmax_cross_entropy_loss (const Node& node) {
87
+ return impl_softmax_cross_entropy (node, 1 );
94
88
}
95
- }
89
+
90
+ ONNX_OP (" SoftmaxCrossEntropyLoss" , OPSET_SINCE(13 ), ai_onnx::opset_13::softmax_cross_entropy_loss);
91
+ } // namespace opset_13
92
+ } // namespace ai_onnx
93
+ } // namespace onnx
94
+ } // namespace frontend
95
+ } // namespace ov
0 commit comments