Skip to content

Commit f517975

Browse files
committed
Merge commit '3001ed2f3d6cc0d651b851986328a711973dc92d' into 1.3.2
2 parents d7bbd8b + 3001ed2 commit f517975

File tree

6 files changed

+82
-59
lines changed

6 files changed

+82
-59
lines changed

tools/Vitis-AI-Runtime/VART/xir/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
#
1616

1717
cmake_minimum_required(VERSION 3.9)
18-
project(xir VERSION 1.3.1 LANGUAGES C CXX)
18+
project(xir VERSION 1.3.2 LANGUAGES C CXX)
1919

2020
include(${CMAKE_SOURCE_DIR}/cmake/VitisCommon.cmake)
2121

tools/Vitis-AI-Runtime/VART/xir/include/xir/op/op_def.hpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,7 @@ class OpDef {
7777
OpDef(const std::string& name);
7878
/// Create a definition of an op by name, inputs,
7979
/// attributes, shape_infer function and annotation.
80-
OpDef(const std::string& name,
81-
const std::vector<OpArgDef>& input_args,
80+
OpDef(const std::string& name, const std::vector<OpArgDef>& input_args,
8281
const std::vector<AttrDef>& attrs,
8382
const std::function<void(Op* op)>& shape_infer,
8483
const std::string& annotation);

tools/Vitis-AI-Runtime/VART/xir/include/xir/tensor/tensor.hpp

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -62,10 +62,13 @@ class Tensor {
6262
*
6363
* @return A unique pointer to the new Tensor object.
6464
*/
65-
static std::unique_ptr<Tensor> create(const std::string& name,
66-
const std::vector<std::int32_t>& shape,
67-
const DataType::Type& data_type,
68-
const std::int32_t bit_width);
65+
[[deprecated(
66+
"This API will be removed in the future release version. Please use "
67+
"static std::unique_ptr<Tensor> Tensor::create(const std::string& name, "
68+
"const std::vector<std::int32_t>& shape, const DataType& data_type) "
69+
"instead.")]] static std::unique_ptr<Tensor>
70+
create(const std::string& name, const std::vector<std::int32_t>& shape,
71+
const DataType::Type& data_type, const std::int32_t bit_width);
6972

7073
/**
7174
* @brief Create a Tensor instance of the Tensor from an existing one.
@@ -114,7 +117,7 @@ class Tensor {
114117

115118
/**
116119
* @brief Get the tensor shape.
117-
*
120+
118121
* @return A vector of the tensor shape.
119122
*/
120123
virtual const std::vector<std::int32_t> get_shape() const = 0;
@@ -125,15 +128,23 @@ class Tensor {
125128
*
126129
* @return A vector of the tensor shape.
127130
*/
128-
virtual const std::vector<std::int32_t> get_dims() const = 0;
131+
[[deprecated(
132+
"Tensor::get_dims() will be removed in the future version. Please use "
133+
"the Tensor::get_shape() instead.")]] virtual const std::
134+
vector<std::int32_t>
135+
get_dims() const = 0;
129136

130137
/**
131138
* @brief Get the tensor shape size. This API will be removed in the future
132139
* release version.
133140
*
134141
* @return A vector of the tensor shape size.
135142
*/
136-
virtual const std::int32_t get_dim_num() const = 0;
143+
[
144+
[deprecated("Tensor::get_dim_num() will be removed in the future "
145+
"version. Please use the Tensor::get_shape().get_size() "
146+
"instead.")]] virtual const std::int32_t
147+
get_dim_num() const = 0;
137148

138149
/**
139150
* @brief Get the dimension size of one specific dimension indicated by idx.
@@ -143,7 +154,11 @@ class Tensor {
143154
*
144155
* @return The dimension size.
145156
*/
146-
virtual const std::int32_t get_dim_size(std::int32_t idx) const = 0;
157+
[[deprecated(
158+
"Tensor::get_dim_size(std::int32_t idx) will be removed in the future "
159+
"version. Please use the Tensor::get_shape().at(idx) "
160+
"instead.")]] virtual const std::int32_t
161+
get_dim_size(std::int32_t idx) const = 0;
147162

148163
/**
149164
* @brief Get the number of data in the current Tensor object.
@@ -165,7 +180,11 @@ class Tensor {
165180
*
166181
* @return bit_width.
167182
*/
168-
virtual const std::int32_t get_bit_width() const = 0;
183+
[[deprecated(
184+
"Tensor::get_bit_width() API will be removed in the future version, "
185+
"please use the Tensor::get_data_type() API to get the data type and "
186+
"read the bit width information in it.")]] virtual const std::int32_t
187+
get_bit_width() const = 0;
169188

170189
/**
171190
* @brief Get the number of elements in the current Tensor object.

tools/Vitis-AI-Runtime/VART/xir/src/xir/op/built_in_ops.cpp

Lines changed: 51 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -586,41 +586,49 @@ std::function<void(xir::OpDef&)> BroadcastOpDefGenerator(
586586
return [=](xir::OpDef& op_def) {
587587
auto input = xir::OpArgDef{"input", OpArgDef::REQUIRED_AND_REPEATED, T,
588588
"The feature maps, can be x-dimension."};
589-
op_def.add_input_arg(input).set_annotation(
590-
"We support broadcasting operations:\n\n"
591-
" \"add\": input[0] + input[1]\n"
592-
" \"sub\": input[0] - input[1]\n"
593-
" \"mul\": input[0] * input[1]\n"
594-
" \"div\": input[0] / input[1]\n"
595-
" \"min\": min(input[0], input[1])\n"
596-
" \"max\": max(input[0], input[1])\n"
597-
"What is broadcasting?\n\n"
598-
"When operating on two arrays, we compare their shapes element-wise. \n"
599-
"It starts with the trailing dimensions, and works its way forward.\n\n"
600-
"Two dimensions are compatible when:\n\n"
601-
"1. they are equal, or\n"
602-
"2. one of them is 1\n"
603-
"If these conditions are not met, a mismatch would be thrown, \n"
604-
"indicating that the arrays have incompatible shapes. \n"
605-
"The size of the resulting array is the maximum size \n"
606-
"along each dimension of the input arrays.\n"
607-
"For example,\n\n"
608-
"(1). bias_add, which is a channel-wise operation:\n\n"
609-
" input[0] (4d tensor): 1 x 112 x 112 x 64\n"
610-
" input[1] (1d tensor): 64\n"
611-
" result (4d tensor): 1 x 112 x 112 x 64\n"
612-
"(2). element-wise add, which is an element-wise operation:\n\n"
613-
" input[0] (3d tensor): 32 x 32 x 10\n"
614-
" input[1] (3d tensor): 32 x 32 x 10\n"
615-
" result (3d tensor): 32 x 32 x 10\n"
616-
"(3). more examples:\n\n"
617-
" input[0] (4d tensor): 1 x 32 x 32 x 10\n"
618-
" input[1] (3d tensor): 32 x 1 x 1\n"
619-
" result (4d tensor): 1 x 32 x 32 x 10\n"
620-
"(4). mismatched examples:\n\n"
621-
" input[0] (4d tensor): 1 x 32 x 32 x 10\n"
622-
" input[1] (3d tensor): 1 x 32 x 2\n"
623-
" result : mismatch\n");
589+
op_def.add_input_arg(input)
590+
.set_annotation(
591+
"We support broadcasting operations:\n\n"
592+
" \"add\": input[0] + input[1]\n"
593+
" \"sub\": input[0] - input[1]\n"
594+
" \"mul\": input[0] * input[1]\n"
595+
" \"div\": input[0] / input[1]\n"
596+
" \"min\": min(input[0], input[1])\n"
597+
" \"max\": max(input[0], input[1])\n"
598+
"What is broadcasting?\n\n"
599+
"When operating on two arrays, we compare their shapes "
600+
"element-wise. \n"
601+
"It starts with the trailing dimensions, and works its way "
602+
"forward.\n\n"
603+
"Two dimensions are compatible when:\n\n"
604+
"1. they are equal, or\n"
605+
"2. one of them is 1\n"
606+
"If these conditions are not met, a mismatch would be thrown, \n"
607+
"indicating that the arrays have incompatible shapes. \n"
608+
"The size of the resulting array is the maximum size \n"
609+
"along each dimension of the input arrays.\n"
610+
"For example,\n\n"
611+
"(1). bias_add, which is a channel-wise operation:\n\n"
612+
" input[0] (4d tensor): 1 x 112 x 112 x 64\n"
613+
" input[1] (1d tensor): 64\n"
614+
" result (4d tensor): 1 x 112 x 112 x 64\n"
615+
"(2). element-wise add, which is an element-wise operation:\n\n"
616+
" input[0] (3d tensor): 32 x 32 x 10\n"
617+
" input[1] (3d tensor): 32 x 32 x 10\n"
618+
" result (3d tensor): 32 x 32 x 10\n"
619+
"(3). more examples:\n\n"
620+
" input[0] (4d tensor): 1 x 32 x 32 x 10\n"
621+
" input[1] (3d tensor): 32 x 1 x 1\n"
622+
" result (4d tensor): 1 x 32 x 32 x 10\n"
623+
"(4). mismatched examples:\n\n"
624+
" input[0] (4d tensor): 1 x 32 x 32 x 10\n"
625+
" input[1] (3d tensor): 1 x 32 x 2\n"
626+
" result : mismatch\n")
627+
.add_constraint([](xir::Op* op) {
628+
UNI_LOG_CHECK(op->get_input_num() > 1, XIR_INVALID_ARG_OCCUR)
629+
<< op->to_string() << " only has " << op->get_input_num()
630+
<< " input arguments, but it requires at least 2 inputs.";
631+
});
624632
};
625633
}
626634

@@ -1509,10 +1517,16 @@ auto eltwise_fix =
15091517
"The feature maps, can be x-dimension. "
15101518
"eltwise-fix operator implements element-wise add."})
15111519
.add_attr(xir::AttrDefBuilder<std::string>::build(
1512-
"nonlinear", AttrDef::REQUIRED,
1520+
"nonlinear", AttrDef::OPTIONAL,
15131521
"`Datatype`: `string`\n\n"
15141522
"nonlinear type, \"NONE\", \"RELU\", \"PRELU\", "
1515-
"\"LEAKYRELU\",\"RELU6\"."))
1523+
"\"LEAKYRELU\",\"RELU6\". Default is \"NONE\"",
1524+
"NONE"))
1525+
.add_attr(xir::AttrDefBuilder<std::string>::build(
1526+
"type", AttrDef::OPTIONAL,
1527+
"`Datatype`: `string`\n\n"
1528+
"eltwise type, \"ADD\", \"MUL\". Default is \"ADD\"",
1529+
"ADD"))
15161530
.set_shape_infer(xir::shape_infer_eltwise_fix);
15171531

15181532
XIR_REGISTER_BUILT_IN_OP(eltwise_fix);

tools/Vitis-AI-Runtime/VART/xir/src/xir/op/op_imp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static void op_arg_occur_check(const OpArgDef& arg, std::uint32_t num) {
3434
} else if (arg.occur_type == OpArgDef::REPEATED) {
3535
;
3636
} else if (arg.occur_type == OpArgDef::REQUIRED_AND_REPEATED) {
37-
UNI_LOG_CHECK(num >= 1, XIR_INVALID_ARG_OCCUR)
37+
UNI_LOG_CHECK(num > 0, XIR_INVALID_ARG_OCCUR)
3838
<< "Arg " << arg.name << " has type REQUIRED_AND_REPEATED, but try set "
3939
<< num << " elements";
4040
} else {

tools/Vitis-AI-Runtime/VART/xir/src/xir/tensor/tensor_imp.cpp

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -53,18 +53,13 @@ const std::vector<std::int32_t> TensorImp::get_shape() const {
5353

5454
// TODO: legacy API
5555
const std::vector<std::int32_t> TensorImp::get_dims() const {
56-
UNI_LOG_WARNING << "Tensor::get_dims() will be removed in the future "
57-
"version. Please use the Tensor::get_shape() instead.";
5856
return this->shape_;
5957
}
6058

6159
const std::int32_t TensorImp::get_dim_num() const {
6260
return this->shape_.size();
6361
}
6462
const std::int32_t TensorImp::get_dim_size(std::int32_t idx) const {
65-
UNI_LOG_WARNING
66-
<< "Tensor::get_dim_size(std::int32_t idx) will be removed in the future "
67-
"version. Please use the Tensor::get_shape().at(idx) instead.";
6863
return this->shape_.at(idx);
6964
}
7065

@@ -86,10 +81,6 @@ const std::int32_t TensorImp::get_element_num() const {
8681
const DataType& TensorImp::get_data_type() const { return data_type_; }
8782

8883
const std::int32_t TensorImp::get_bit_width() const {
89-
UNI_LOG_WARNING
90-
<< "Tensor::get_bit_width() API will be removed in the future version, "
91-
"please use the Tensor::get_data_type() API to get the data type and "
92-
"read the bit width information in it.";
9384
return this->data_type_.bit_width;
9485
}
9586

0 commit comments

Comments
 (0)