Skip to content

Commit d96c258

Browse files
praaszEvgenya Stepyreva
and
Evgenya Stepyreva
authored
Review opset1 unsqueeze for shape inference aspects (#13538)
* Use non-blocking assertions in unsqueeze tests for independent properties * Review unsqueeze interval shape propagation: - extend partial shape propagation tests - add invalid axis value tests - fix issues when repeated axes on input * Shape inference test, unsqueeze using default ctor - private fields set by setters * Review unsqeeze bounds propagation: - preserve and propagate labels - bounds propagation lowe/upper * Add template shape inference for unsqueeze - extract current implementation and unify it for shape types - add unit test for static shape inference - add unsqueeze in/out indexes * Unify axes normalization * Use common fixture for static shape inference * Fix build issue in GPU plugin - include unsqueeze shape inference * Remove enum with in/out indexes Due to build issue on Windows * Remove make move iterator minor changes validation util * Add test for label propagation - expand static shape inference tests * Add common validation for axes input * Fix build issues Co-authored-by: Evgenya Stepyreva <[email protected]>
1 parent d8f7e79 commit d96c258

File tree

12 files changed

+582
-174
lines changed

12 files changed

+582
-174
lines changed

src/core/include/openvino/core/validation_util.hpp

+18-3
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,9 @@ int64_t normalize_axis(const std::string& node_description,
108108
/// If any negative axis in vector, it counts from the last to the first axis,
109109
/// by adding tensor_rank to axis. Changes axes vector inplace.
110110
///
111-
/// \param[in] node The node with requested axes.
112-
/// \param[in] tensor_rank The corresponding tensor rank.
113-
/// \param[in] axes The requested vector of axes.
111+
/// \param[in] node The node with requested axes.
112+
/// \param[in] tensor_rank The corresponding tensor rank.
113+
/// \param[in,out] axes The requested vector of axes.
114114
///
115115
OPENVINO_API
116116
void normalize_axes(const Node* node, const int64_t& tensor_rank, std::vector<int64_t>& axes);
@@ -157,4 +157,19 @@ OPENVINO_API bool is_valid_axes_order(const std::vector<int64_t>& axes_order, co
157157
/// \param labels Label tensor for check.
158158
/// \return True if there is no labels, otherwise false.
159159
OPENVINO_API bool has_no_labels(const TensorLabel& labels);
160+
161+
/// \brief Get the node input partial shapes.
162+
///
163+
/// \param node Node to extract input shapes.
164+
///
165+
/// \return Vector of PartialShapes of each input.
166+
OPENVINO_API std::vector<PartialShape> get_node_input_partial_shapes(const ov::Node& node);
167+
168+
/// \brief Check if rank is compatible to any of rank from container.
169+
///
170+
/// \param rank Rank to check.
171+
/// \param ranks VEctor of ranks used to check input rank compatibility.
172+
///
173+
/// \return True if rank compatible to any from ranks, otherwise false.
174+
OPENVINO_API bool is_rank_compatible_any_of(const ov::Rank& rank, const std::vector<ov::Rank>& ranks);
160175
} // namespace ov

src/core/shape_inference/include/compare.hpp

+14-6
Original file line numberDiff line numberDiff line change
@@ -23,29 +23,37 @@ enum Bound : uint8_t { NONE, LOWER, UPPER, BOTH };
2323
*/
2424
template <class T, Bound BMode = Bound::NONE>
2525
class Between {
26-
T _lower_bound, _upper_bound;
26+
const T m_lower_bound, m_upper_bound;
2727

2828
public:
29-
constexpr Between(const T& lower, const T& upper) : _lower_bound{lower}, _upper_bound{upper} {}
29+
constexpr Between(const T& lower, const T& upper) : m_lower_bound{lower}, m_upper_bound{upper} {}
3030

3131
template <Bound B = BMode, typename std::enable_if<B == Bound::NONE>::type* = nullptr>
3232
constexpr bool operator()(const T& value) const {
33-
return (_lower_bound < value) && (value < _upper_bound);
33+
return (lower() < value) && (value < upper());
3434
}
3535

3636
template <Bound B = BMode, typename std::enable_if<B == Bound::LOWER>::type* = nullptr>
3737
constexpr bool operator()(const T& value) const {
38-
return (_lower_bound <= value) && (value < _upper_bound);
38+
return (lower() <= value) && (value < upper());
3939
}
4040

4141
template <Bound B = BMode, typename std::enable_if<B == Bound::UPPER>::type* = nullptr>
4242
constexpr bool operator()(const T& value) const {
43-
return (_lower_bound < value) && (value <= _upper_bound);
43+
return (lower() < value) && (value <= upper());
4444
}
4545

4646
template <Bound B = BMode, typename std::enable_if<B == Bound::BOTH>::type* = nullptr>
4747
constexpr bool operator()(const T& value) const {
48-
return (_lower_bound <= value) && (value <= _upper_bound);
48+
return (lower() <= value) && (value <= upper());
49+
}
50+
51+
const T& upper() const {
52+
return m_upper_bound;
53+
}
54+
55+
const T& lower() const {
56+
return m_lower_bound;
4957
}
5058
};
5159

src/core/shape_inference/include/shape_nodes.hpp

-27
Original file line numberDiff line numberDiff line change
@@ -124,33 +124,6 @@ void shape_infer(const ov::opset1::Squeeze* op,
124124
}
125125
}
126126

127-
template <class T>
128-
void shape_infer(const ov::opset1::Unsqueeze* op,
129-
const std::vector<T>& input_shapes,
130-
std::vector<T>& output_shapes,
131-
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
132-
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
133-
std::vector<int64_t> axes;
134-
bool axes_is_constant = get_data_as_int64<T>(1, op, axes, constant_data);
135-
NODE_VALIDATION_CHECK(op, axes_is_constant, "Shape inference lacks input data");
136-
137-
auto& input_shape = input_shapes[0];
138-
OPENVINO_ASSERT(input_shape.is_static());
139-
auto& output_shape = output_shapes[0];
140-
output_shape = input_shape;
141-
142-
NODE_VALIDATION_CHECK(op, !axes.empty(), "'axes' input is mandatory");
143-
144-
int64_t expanded_rank = input_shape.size() + axes.size();
145-
ov::normalize_axes(op, static_cast<int64_t>(expanded_rank), axes);
146-
147-
std::set<int64_t> unique_sorted_axes(axes.begin(), axes.end());
148-
for (const auto& axis : unique_sorted_axes) {
149-
NODE_VALIDATION_CHECK(op, axis <= expanded_rank, "provided 'axes' value ", axis, " is not valid.");
150-
output_shape.insert(next(output_shape.begin(), axis), 1);
151-
}
152-
}
153-
154127
template <class T>
155128
inline void dynamic_shape(T& output_shape) {
156129
OPENVINO_UNREACHABLE("This code should be executed only for PartialShape class");
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
// Copyright (C) 2022 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
#pragma once
5+
6+
#include "openvino/op/unsqueeze.hpp"
7+
#include "utils.hpp"
8+
9+
namespace ov {
10+
namespace op {
11+
namespace v0 {
12+
13+
template <class TOp>
14+
void check_unsqueeze_axes_rank(const TOp* op, const Rank& rank) {
15+
NODE_VALIDATION_CHECK(op,
16+
is_rank_compatible_any_of(rank, {0, 1}),
17+
"Second input (axes) should not be of rank higher than 1. Got: ",
18+
rank);
19+
}
20+
21+
template <class T>
22+
void shape_infer(const Unsqueeze* op,
23+
const std::vector<T>& input_shapes,
24+
std::vector<T>& output_shapes,
25+
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
26+
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
27+
check_unsqueeze_axes_rank(op, input_shapes[1].rank());
28+
const auto& arg_shape = input_shapes[0];
29+
auto& out_shape = output_shapes[0];
30+
31+
std::vector<int64_t> axes_val;
32+
const auto has_axes = get_data_as_int64<T>(1, op, axes_val, constant_data);
33+
34+
if (has_axes && arg_shape.rank().is_static()) {
35+
NODE_VALIDATION_CHECK(op, !axes_val.empty(), "'axes' input is mandatory");
36+
// Remove repeated axes on input
37+
std::unordered_set<int64_t> tmp(axes_val.begin(), axes_val.end());
38+
std::vector<int64_t> unique_axes(tmp.begin(), tmp.end());
39+
40+
const auto expanded_rank = arg_shape.rank().get_length() + unique_axes.size();
41+
42+
// Normalize then remove repeated axes after normalization.
43+
normalize_axes(op, expanded_rank, unique_axes);
44+
const std::set<int64_t> axes(unique_axes.begin(), unique_axes.end());
45+
46+
out_shape = arg_shape;
47+
for (const auto& axis : axes) {
48+
NODE_VALIDATION_CHECK(op,
49+
static_cast<size_t>(axis) <= out_shape.size() + 1,
50+
"provided 'axes' value ",
51+
axis,
52+
" is not valid.");
53+
// As shape not throw exception on repeated axis it has to be check if insert or append dimension.
54+
// This will be not required if this op has same behaviour as numpy expand_dims.
55+
if (static_cast<size_t>(axis) <= out_shape.size()) {
56+
out_shape.insert(std::next(std::begin(out_shape), axis), 1);
57+
} else {
58+
// Append dimension at end when there is difference in size of input axes and after normalization
59+
// e.g. input shape {2,3,4} axes_value(4,-1) then output rank is determined as 5,
60+
// but after final normalization and removing duplicates it points sam location in shape.
61+
// The numpy throws exception "repeated axis" in that case.
62+
out_shape.push_back(1);
63+
}
64+
}
65+
} else {
66+
out_shape = ov::PartialShape::dynamic();
67+
}
68+
}
69+
} // namespace v0
70+
} // namespace op
71+
} // namespace ov

src/core/src/op/unsqueeze.cpp

+25-50
Original file line numberDiff line numberDiff line change
@@ -9,12 +9,9 @@
99
#include <set>
1010

1111
#include "itt.hpp"
12-
#include "ngraph/builder/reshape.hpp"
13-
#include "ngraph/op/constant.hpp"
14-
#include "ngraph/op/reshape.hpp"
15-
#include "ngraph/op/util/op_types.hpp"
1612
#include "ngraph/runtime/reference/copy.hpp"
1713
#include "ngraph/validation_util.hpp"
14+
#include "unsqueeze_shape_inference.hpp"
1815

1916
using namespace std;
2017
using namespace ngraph;
@@ -27,38 +24,13 @@ op::v0::Unsqueeze::Unsqueeze(const Output<Node>& data, const Output<Node>& axes)
2724

2825
void op::v0::Unsqueeze::validate_and_infer_types() {
2926
OV_OP_SCOPE(v0_Unsqueeze_validate_and_infer_types);
30-
const auto data = input_value(0);
31-
auto data_partial_shape = data.get_partial_shape();
32-
const auto data_rank = data_partial_shape.rank();
3327

34-
const auto axes_constant = get_constant_from_source(input_value(1));
35-
auto axes_pshape = get_input_partial_shape(1);
28+
const auto input_shapes = get_node_input_partial_shapes(*this);
29+
auto output_shapes = std::vector<ov::PartialShape>(1);
3630

37-
NODE_VALIDATION_CHECK(this,
38-
axes_pshape.rank().compatible(0) || axes_pshape.rank().compatible(1),
39-
"Second input (axes) should not be of rank higher than 1. Got: ",
40-
axes_pshape.rank().get_length());
31+
shape_infer(this, input_shapes, output_shapes);
4132

42-
if (data_rank.is_dynamic() || !axes_constant) {
43-
set_output_type(0, get_input_element_type(0), ov::PartialShape::dynamic());
44-
return;
45-
}
46-
47-
const auto axes_values = axes_constant->cast_vector<int64_t>();
48-
uint64_t data_rank_value = data_partial_shape.rank().get_length();
49-
const int64_t expanded_rank = data_rank_value + axes_values.size();
50-
51-
NODE_VALIDATION_CHECK(this, !axes_values.empty(), "'axes' input is mandatory");
52-
53-
auto normalized_axes = normalize_axes(this->description(), axes_values, expanded_rank);
54-
set<int64_t> axes(begin(normalized_axes), end(normalized_axes));
55-
vector<Dimension> output_shape{data_partial_shape};
56-
for (auto axis : axes) {
57-
NODE_VALIDATION_CHECK(this, axis <= expanded_rank, "provided 'axes' value ", axis, " is not valid.");
58-
59-
output_shape.insert(next(begin(output_shape), axis), 1);
60-
}
61-
set_output_type(0, get_input_element_type(0), ov::PartialShape{output_shape});
33+
set_output_type(0, get_input_element_type(0), output_shapes[0]);
6234
}
6335

6436
bool op::v0::Unsqueeze::visit_attributes(AttributeVisitor& visitor) {
@@ -82,29 +54,32 @@ bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out) {
8254
return true;
8355
}
8456

85-
bool evaluate_unsqueeze(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out) {
57+
// The evaluate cannot use shape_infer for output shape calculation as shape inference accepts
58+
// repeated axis and evaluate not. When shape inference will changed to be compatible with `numpy` then
59+
// evaluate and inference can use same function to calculate output shape. TODO for next version for this operator.
60+
bool evaluate_unsqueeze(const Node* node,
61+
const HostTensorPtr& arg0,
62+
const HostTensorPtr& arg1,
63+
const HostTensorPtr& out) {
8664
auto element_type = arg0->get_element_type();
8765
out->set_element_type(element_type);
8866

89-
auto data_shape = arg0->get_shape();
90-
int64_t data_rank = static_cast<int64_t>(data_shape.size());
91-
auto axes_shape = arg1->get_shape();
92-
NGRAPH_CHECK(axes_shape.size() == 1 || axes_shape.empty(),
93-
"Axes to add must be a scalar or 1D tensor with 1 element");
67+
const auto& axes_shape = arg1->get_shape();
68+
ov::op::v0::check_unsqueeze_axes_rank(node, Rank(axes_shape.size()));
69+
70+
const auto& data_shape = arg0->get_shape();
71+
const auto out_rank = static_cast<int64_t>(data_shape.size() + shape_size(axes_shape));
72+
73+
// Get axes and normalize
74+
auto axes = read_index_vector(arg1);
75+
normalize_axes(node, out_rank, axes);
9476

95-
auto out_shape = data_shape;
96-
int64_t out_rank = data_rank + static_cast<int64_t>(shape_size(axes_shape));
97-
// Get axes
98-
vector<int64_t> axes = read_index_vector(arg1);
99-
// Normalize axes
100-
std::transform(axes.begin(), axes.end(), axes.begin(), [out_rank](int64_t i) -> int64_t {
101-
return i < 0 ? out_rank + i : i;
102-
});
10377
// Sort in increasing order
104-
std::set<int64_t, less<int64_t>> axes_set(axes.begin(), axes.end());
78+
std::set<int64_t> axes_set(axes.begin(), axes.end());
10579
NGRAPH_CHECK(axes.size() == axes_set.size(), "Axes has duplicate axis.");
80+
81+
auto out_shape = data_shape;
10682
for (int64_t axis : axes_set) {
107-
NGRAPH_CHECK(axis >= 0 && axis < out_rank, "Axis is out of bounds: ", axis);
10883
out_shape.insert(out_shape.begin() + axis, 1);
10984
}
11085
out->set_shape(out_shape);
@@ -130,7 +105,7 @@ bool op::v0::Unsqueeze::evaluate(const HostTensorVector& outputs, const HostTens
130105
OV_OP_SCOPE(v0_Unsqueeze_evaluate);
131106
NGRAPH_CHECK(validate_host_tensor_vector(inputs, 2));
132107
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
133-
return unsqueeze::evaluate_unsqueeze(inputs[0], inputs[1], outputs[0]);
108+
return unsqueeze::evaluate_unsqueeze(this, inputs[0], inputs[1], outputs[0]);
134109
}
135110

136111
bool op::v0::Unsqueeze::has_evaluate() const {

0 commit comments

Comments
 (0)