Skip to content

Commit ab21a9d

Browse files
barnasm1Diya910praaszmlukasze
authored
[CORE] Clamp pre post processing (#29446)
### Details: - add clamp operator for pre post processing ### Tickets: - [*161080*](https://jira.devtools.intel.com/browse/CVS-161080) --------- Co-authored-by: Diya910 <[email protected]> Co-authored-by: Diya910 <[email protected]> Co-authored-by: Pawel Raasz <[email protected]> Co-authored-by: Michal Lukaszewski <[email protected]>
1 parent e3316ca commit ab21a9d

File tree

6 files changed

+158
-22
lines changed

6 files changed

+158
-22
lines changed

src/core/include/openvino/core/preprocess/postprocess_steps.hpp

+7
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,13 @@ class OPENVINO_API PostProcessSteps final {
3333
/// \brief Default destructor
3434
~PostProcessSteps();
3535

36+
/// \brief Add clamp postprocess operation. Clamp each element of input to the specified range [min_value,max_value]
37+
/// \param min_value Minimum value to clamp to.
38+
/// \param max_value Maximum value to clamp to.
39+
///
40+
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
41+
PostProcessSteps& clamp(double min_value, double max_value);
42+
3643
/// \brief Add convert element type post-process operation
3744
///
3845
/// \param type Desired type of output. If not specified, type will be obtained from 'tensor' output information

src/core/include/openvino/core/preprocess/preprocess_steps.hpp

+8
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,14 @@ class OPENVINO_API PreProcessSteps final {
3636
/// \brief Default destructor
3737
~PreProcessSteps();
3838

39+
/// \brief Add clamp preprocess operation. Clamp each element of input to the specified range [min_value, max_value]
40+
///
41+
/// \param min_value Minimum value to clamp to.
42+
/// \param max_value Maximum value to clamp to.
43+
///
44+
/// \return Reference to 'this' to allow chaining with other calls in a builder-like manner
45+
PreProcessSteps& clamp(double min_value, double max_value);
46+
3947
/// \brief Add convert element type preprocess operation
4048
///
4149
/// \param type Desired type of input.

src/core/src/preprocess/pre_post_process.cpp

+10
Original file line numberDiff line numberDiff line change
@@ -379,6 +379,11 @@ InputTensorInfo& InputTensorInfo::set_from(const ov::Tensor& runtime_tensor) {
379379
PreProcessSteps::PreProcessSteps() : m_impl(std::unique_ptr<PreProcessStepsImpl>(new PreProcessStepsImpl())) {}
380380
PreProcessSteps::~PreProcessSteps() = default;
381381

382+
PreProcessSteps& PreProcessSteps::clamp(double min_value, double max_value) {
383+
m_impl->add_clamp(min_value, max_value);
384+
return *this;
385+
}
386+
382387
PreProcessSteps& PreProcessSteps::scale(float value) {
383388
m_impl->add_scale_impl(std::vector<float>{value});
384389
return *this;
@@ -508,6 +513,11 @@ OutputModelInfo& OutputModelInfo::set_color_format(const ov::preprocess::ColorFo
508513
PostProcessSteps::PostProcessSteps() : m_impl(std::unique_ptr<PostProcessStepsImpl>(new PostProcessStepsImpl())) {}
509514
PostProcessSteps::~PostProcessSteps() = default;
510515

516+
PostProcessSteps& PostProcessSteps::clamp(double min_value, double max_value) {
517+
m_impl->add_clamp(min_value, max_value);
518+
return *this;
519+
}
520+
511521
PostProcessSteps& PostProcessSteps::convert_element_type(const element::Type& type) {
512522
m_impl->add_convert_impl(type);
513523
return *this;

src/core/src/preprocess/preprocess_steps_impl.cpp

+31
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,25 @@ void PreStepsList::add_scale_impl(const std::vector<float>& values) {
9999
"scale " + vector_to_string(values));
100100
}
101101

102+
void PreStepsList::add_clamp(double min_value, double max_value) {
103+
std::stringstream name_builder;
104+
name_builder << "clamp(min " << min_value << ", max " << max_value << ")";
105+
106+
m_actions.emplace_back(
107+
[min_value, max_value](const std::vector<Output<Node>>& nodes,
108+
const std::shared_ptr<Model>& function,
109+
PreprocessingContext& ctxt) {
110+
OPENVINO_ASSERT(nodes.size() == 1,
111+
"Can't apply clamp to multi-plane input. Suggesting to convert current image to "
112+
"RGB/BGR color format using 'PreProcessSteps::convert_color'");
113+
114+
const auto& node = nodes.front();
115+
auto clamp_op = std::make_shared<ov::op::v0::Clamp>(node, min_value, max_value);
116+
return std::make_tuple(std::vector<Output<Node>>{clamp_op}, true);
117+
},
118+
name_builder.str());
119+
}
120+
102121
void PreStepsList::add_mean_impl(const std::vector<float>& values) {
103122
m_actions.emplace_back(
104123
[values](const std::vector<Output<Node>>& nodes,
@@ -688,6 +707,18 @@ std::tuple<std::vector<Output<Node>>, bool> PreStepsList::cut_last_channel(const
688707
}
689708

690709
//------------- Post processing ------
710+
void PostStepsList::add_clamp(double min_value, double max_value) {
711+
std::stringstream name_builder;
712+
name_builder << "clamp(min " << min_value << ", max " << max_value << ")";
713+
714+
m_actions.emplace_back(
715+
[min_value, max_value](const Output<Node>& node, PostprocessingContext& ctxt) {
716+
auto clamp_op = std::make_shared<ov::op::v0::Clamp>(node, min_value, max_value);
717+
return std::make_tuple(Output<Node>{clamp_op}, true);
718+
},
719+
name_builder.str());
720+
}
721+
691722
void PostStepsList::add_convert_impl(const element::Type& type) {
692723
m_actions.emplace_back(
693724
[type](const Output<Node>& node, PostprocessingContext& ctxt) {

src/core/src/preprocess/preprocess_steps_impl.hpp

+2
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ struct InternalPreprocessAction {
157157
class PreStepsList {
158158
public:
159159
void add_scale_impl(const std::vector<float>& values);
160+
void add_clamp(double min_value, double max_value);
160161
void add_mean_impl(const std::vector<float>& values);
161162
void add_pad_impl(const std::vector<int>& pads_begin,
162163
const std::vector<int>& pads_end,
@@ -220,6 +221,7 @@ struct InternalPostprocessAction {
220221
/// \brief PostProcessStepsImpl - internal data structure
221222
class PostStepsList {
222223
public:
224+
void add_clamp(double min_value, double max_value);
223225
void add_convert_impl(const element::Type& type);
224226
void add_convert_layout_impl(const Layout& layout);
225227
void add_convert_layout_impl(const std::vector<uint64_t>& dims);

src/core/tests/preprocess.cpp

+100-22
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,10 @@ static std::shared_ptr<Model> create_conv(element::Type in_type, const PartialSh
6060
return std::make_shared<Model>(ResultVector{res}, ParameterVector{data1});
6161
}
6262

63-
template <int N>
64-
static std::shared_ptr<Model> create_n_inputs(element::Type type, const PartialShape& shape) {
63+
static std::shared_ptr<Model> create_n_inputs(int N, element::Type type, const PartialShape& shape) {
6564
ResultVector res;
6665
ParameterVector params;
67-
for (size_t i = 0; i < N; i++) {
66+
for (int i = 0; i < N; i++) {
6867
auto index_str = std::to_string(i);
6968
auto data1 = std::make_shared<op::v0::Parameter>(type, shape);
7069
data1->set_friendly_name("input" + index_str);
@@ -80,6 +79,21 @@ static std::shared_ptr<Model> create_n_inputs(element::Type type, const PartialS
8079
return std::make_shared<Model>(res, params);
8180
}
8281

82+
static std::shared_ptr<Model> create_no_inputs(element::Type type) {
83+
ResultVector res;
84+
ParameterVector params;
85+
auto index_str = std::to_string(0);
86+
auto data1 = std::make_shared<op::v0::Constant>(type, Shape{}, 1);
87+
data1->set_friendly_name("input" + index_str);
88+
data1->get_output_tensor(0).set_names({"tensor_input" + index_str});
89+
auto res1 = std::make_shared<op::v0::Result>(data1);
90+
res1->set_friendly_name("Result" + index_str);
91+
res1->get_output_tensor(0).set_names({"tensor_output" + index_str});
92+
res.push_back(res1);
93+
94+
return std::make_shared<Model>(res, params);
95+
}
96+
8397
namespace {
8498
void set_model_as_v10(ov::Model& model) {
8599
model.get_rt_info()["version"] = static_cast<int64_t>(10);
@@ -110,6 +124,70 @@ TEST(pre_post_process, simple_mean_scale_getters_f64) {
110124
EXPECT_EQ(f->get_output_element_type(0), element::f64);
111125
}
112126

127+
TEST(pre_post_process, clamp_operation_on_input_preprocess) {
128+
auto model = create_simple_function(element::f32, Shape{1, 3, 2, 2});
129+
130+
{
131+
auto input_node = model->get_parameters().front();
132+
auto connected_node = input_node->output(0).get_target_inputs().begin()->get_node();
133+
EXPECT_STREQ(connected_node->get_type_name(), "Relu");
134+
}
135+
auto p = PrePostProcessor(model);
136+
p.input().preprocess().clamp(0.0, 1.0);
137+
model = p.build();
138+
{
139+
auto input_node = model->get_parameters().front();
140+
auto connected_node = input_node->output(0).get_target_inputs().begin()->get_node();
141+
EXPECT_STREQ(connected_node->get_type_name(), "Clamp");
142+
}
143+
}
144+
145+
TEST(pre_post_process, clamp_operation_on_input_preprocess_throw_no_input) {
146+
auto model = create_no_inputs(element::f32);
147+
148+
{
149+
auto input_node = model->get_ordered_ops().front();
150+
auto connected_node = input_node->output(0).get_target_inputs().begin()->get_node();
151+
EXPECT_STREQ(connected_node->get_type_name(), "Result");
152+
}
153+
154+
auto p = PrePostProcessor(model);
155+
156+
EXPECT_ANY_THROW(p.input().preprocess().clamp(0.0, 1.0); model = p.build());
157+
}
158+
159+
TEST(pre_post_process, clamp_operation_on_input_preprocess_throw_more_than_one_input) {
160+
auto model = create_n_inputs(2, element::f32, Shape{1, 3, 2, 2});
161+
162+
{
163+
auto input_node = model->get_parameters().front();
164+
auto connected_node = input_node->output(0).get_target_inputs().begin()->get_node();
165+
EXPECT_STREQ(connected_node->get_type_name(), "Relu");
166+
}
167+
168+
auto p = PrePostProcessor(model);
169+
170+
EXPECT_ANY_THROW(p.input().preprocess().clamp(0.0, 1.0); model = p.build());
171+
}
172+
173+
TEST(pre_post_process, clamp_operation_on_output_postprocess) {
174+
auto model = create_simple_function(element::f32, Shape{1, 3, 2, 2});
175+
176+
{
177+
auto result_node = model->get_results().front();
178+
auto connected_node = result_node->input_value(0).get_node_shared_ptr();
179+
EXPECT_STREQ(connected_node->get_type_name(), "Relu");
180+
}
181+
auto p = PrePostProcessor(model);
182+
p.output().postprocess().clamp(0.0, 1.0);
183+
model = p.build();
184+
{
185+
auto result_node = model->get_results().front();
186+
auto connected_node = result_node->input_value(0).get_node_shared_ptr();
187+
EXPECT_STREQ(connected_node->get_type_name(), "Clamp");
188+
}
189+
}
190+
113191
TEST(pre_post_process, convert_element_type_and_scale) {
114192
auto f = create_simple_function(element::i8, Shape{1, 3, 2, 2});
115193
auto p = PrePostProcessor(f);
@@ -198,7 +276,7 @@ TEST(pre_post_process, empty_preprocess) {
198276
}
199277

200278
TEST(pre_post_process, preprocess_assert_input_without_index) {
201-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
279+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 2, 2});
202280
auto p = PrePostProcessor(f);
203281
EXPECT_ANY_THROW(p.input().preprocess().mean(0.f); f = p.build());
204282
EXPECT_ANY_THROW(p.input("some_non_existing_name").preprocess().mean(0.f); f = p.build());
@@ -612,7 +690,7 @@ TEST(pre_post_process, convert_color_incorrect_subnames) {
612690
}
613691

614692
TEST(pre_post_process, convert_color_duplicate_subnames) {
615-
auto f = create_n_inputs<2>(element::f32, PartialShape{1, 2, 2, 3});
693+
auto f = create_n_inputs(2, element::f32, PartialShape{1, 2, 2, 3});
616694
f->get_parameters()[0]->get_output_tensor(0).set_names({"tensor_input1"});
617695
f->get_parameters()[1]->get_output_tensor(0).set_names({"tensor_input1/CustomUV"});
618696
auto p = PrePostProcessor(f);
@@ -670,7 +748,7 @@ TEST(pre_post_process, convert_layout_implicit_several_time) {
670748
}
671749

672750
TEST(pre_post_process, tensor_set_layout) {
673-
auto f = create_n_inputs<6>(element::f32, Shape{1, 3, 480, 640});
751+
auto f = create_n_inputs(6, element::f32, Shape{1, 3, 480, 640});
674752
PrePostProcessor preprocessor(f);
675753
preprocessor.input(0).tensor().set_layout("NCHW");
676754
preprocessor.input(0).preprocess().mean({1.0, 2.0, 3.0});
@@ -709,7 +787,7 @@ TEST(pre_post_process, tensor_set_layout) {
709787
}
710788

711789
TEST(pre_post_process, postprocess_set_model_layout) {
712-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 224, 224});
790+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 224, 224});
713791
PrePostProcessor p(f);
714792
p.output(0).model().set_layout("NCHW");
715793
p.output(0).postprocess().convert_layout("NHWC");
@@ -790,7 +868,7 @@ TEST(pre_post_process, custom_preprocessing) {
790868
}
791869

792870
TEST(pre_post_process, test_2_inputs_basic) {
793-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 1, 1});
871+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 1, 1});
794872
auto p = PrePostProcessor(f);
795873
p.input(1).preprocess().mean(1.f).scale(2.0f);
796874
f = p.build();
@@ -1196,7 +1274,7 @@ TEST(pre_post_process, preprocess_convert_layout_invalid_dims_dyn_shape) {
11961274
}
11971275

11981276
TEST(pre_post_process, preprocess_convert_layout_partially_defined) {
1199-
auto f = create_n_inputs<8>(element::f32, Shape{1, 2, 3, 4, 5});
1277+
auto f = create_n_inputs(8, element::f32, Shape{1, 2, 3, 4, 5});
12001278

12011279
auto p = PrePostProcessor(f);
12021280
p.input(0).tensor().set_layout("nc???");
@@ -1235,7 +1313,7 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined) {
12351313
}
12361314

12371315
TEST(pre_post_process, preprocess_convert_layout_partially_defined_trivial) {
1238-
auto f = create_n_inputs<4>(element::f32, Shape{1, 2, 3, 4, 5});
1316+
auto f = create_n_inputs(4, element::f32, Shape{1, 2, 3, 4, 5});
12391317
auto ops_num = f->get_ordered_ops().size();
12401318

12411319
auto p = PrePostProcessor(f);
@@ -1261,7 +1339,7 @@ TEST(pre_post_process, preprocess_convert_layout_partially_defined_trivial) {
12611339
}
12621340

12631341
TEST(pre_post_process, preprocess_convert_layout_squeeze) {
1264-
auto f = create_n_inputs<3>(element::f32, Shape{1, 3, 1, 480, 640});
1342+
auto f = create_n_inputs(3, element::f32, Shape{1, 3, 1, 480, 640});
12651343
auto p = PrePostProcessor(f);
12661344

12671345
p.input(0).tensor().set_layout("HWC");
@@ -1283,7 +1361,7 @@ TEST(pre_post_process, preprocess_convert_layout_squeeze) {
12831361
}
12841362

12851363
TEST(pre_post_process, preprocess_convert_layout_squeeze_dynamic) {
1286-
auto f = create_n_inputs<2>(element::f32, PartialShape{Dimension::dynamic(), 3, 1, 480, 640});
1364+
auto f = create_n_inputs(2, element::f32, PartialShape{Dimension::dynamic(), 3, 1, 480, 640});
12871365
auto p = PrePostProcessor(f);
12881366

12891367
p.input(0).tensor().set_layout("HWC");
@@ -1300,7 +1378,7 @@ TEST(pre_post_process, preprocess_convert_layout_squeeze_dynamic) {
13001378
}
13011379

13021380
TEST(pre_post_process, preprocess_convert_layout_squeeze_unsupported) {
1303-
auto f = create_n_inputs<1>(element::f32, PartialShape{Dimension::dynamic(), 3, 1, 480, 640});
1381+
auto f = create_n_inputs(1, element::f32, PartialShape{Dimension::dynamic(), 3, 1, 480, 640});
13041382
EXPECT_THROW(
13051383
{
13061384
auto p = PrePostProcessor(f);
@@ -1490,7 +1568,7 @@ TEST(pre_post_process, preprocess_from) {
14901568
}
14911569

14921570
TEST(pre_post_process, preprocess_crop) {
1493-
auto model = create_n_inputs<1>(element::f32, PartialShape::dynamic());
1571+
auto model = create_n_inputs(1, element::f32, PartialShape::dynamic());
14941572
auto p = PrePostProcessor(model);
14951573

14961574
p.input().tensor().set_shape(Shape{1, 3, 200, 400});
@@ -1513,7 +1591,7 @@ TEST(pre_post_process, preprocess_crop) {
15131591
}
15141592

15151593
TEST(pre_post_process, preprocess_crop_wrong_dims) {
1516-
auto model = create_n_inputs<1>(element::f32, PartialShape::dynamic());
1594+
auto model = create_n_inputs(1, element::f32, PartialShape::dynamic());
15171595
auto p = PrePostProcessor(model);
15181596

15191597
p.input().tensor().set_shape(Shape{1, 3, 200, 400});
@@ -1533,7 +1611,7 @@ TEST(pre_post_process, preprocess_crop_wrong_dims) {
15331611
}
15341612

15351613
TEST(pre_post_process, preprocess_crop_wrong_dims_not_aligned) {
1536-
auto model = create_n_inputs<1>(element::f32, PartialShape{1, 3, 100, 200});
1614+
auto model = create_n_inputs(1, element::f32, PartialShape{1, 3, 100, 200});
15371615
auto p = PrePostProcessor(model);
15381616

15391617
p.input().tensor().set_shape(Shape{1, 3, 200});
@@ -1611,7 +1689,7 @@ TEST(pre_post_process, trivial_model_convert_element_type_explicit) {
16111689
}
16121690

16131691
TEST(pre_post_process, postprocess_convert_element_type_default) {
1614-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
1692+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 2, 2});
16151693
auto name = f->output(1).get_node_shared_ptr()->get_friendly_name();
16161694
auto name_last_op = f->get_results().front()->get_input_source_output(0).get_node_shared_ptr()->get_friendly_name();
16171695
auto tensor_names = f->output(1).get_tensor().get_names();
@@ -1657,7 +1735,7 @@ TEST(pre_post_process, postprocess_convert_element_type_implicit) {
16571735
}
16581736

16591737
TEST(pre_post_process, preprocess_keep_params_order) {
1660-
auto f = create_n_inputs<3>(element::f32, Shape{1, 2, 2, 3});
1738+
auto f = create_n_inputs(3, element::f32, Shape{1, 2, 2, 3});
16611739
auto p = PrePostProcessor(f);
16621740

16631741
p.input(1).tensor().set_color_format(ColorFormat::NV12_TWO_PLANES, {"Y", "UV"});
@@ -1726,7 +1804,7 @@ TEST(pre_post_process, postprocess_set_model_layout_when_already_exists) {
17261804
}
17271805

17281806
TEST(pre_post_process, postprocess_convert_layout_explicit_no_target) {
1729-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
1807+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 2, 2});
17301808
auto p = PrePostProcessor(f);
17311809

17321810
p.output(1).model().set_layout("NCHW");
@@ -2033,14 +2111,14 @@ TEST(pre_post_process, postprocess_implicit_convert_element_type_and_layout) {
20332111
}
20342112

20352113
TEST(pre_post_process, postprocess_assert_output_without_index) {
2036-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 2, 2});
2114+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 2, 2});
20372115
auto p = PrePostProcessor(f);
20382116
EXPECT_ANY_THROW(p.output().tensor().set_element_type(element::f32); p.build());
20392117
EXPECT_ANY_THROW(p.output("some_non_existing_name").tensor().set_element_type(element::f32); p.build());
20402118
}
20412119

20422120
TEST(pre_post_process, postprocess_keep_results_order) {
2043-
auto f = create_n_inputs<3>(element::f32, Shape{1, 3, 2, 2});
2121+
auto f = create_n_inputs(3, element::f32, Shape{1, 3, 2, 2});
20442122
auto names0 = f->output(0).get_tensor().get_names();
20452123
auto names1 = f->output(1).get_tensor().get_names();
20462124
auto names2 = f->output(2).get_tensor().get_names();
@@ -2244,7 +2322,7 @@ TEST(pre_post_process, postprocess_nothing_applied) {
22442322
}
22452323

22462324
TEST(pre_post_process, exception_safety) {
2247-
auto f = create_n_inputs<2>(element::f32, Shape{1, 3, 224, 224});
2325+
auto f = create_n_inputs(2, element::f32, Shape{1, 3, 224, 224});
22482326
auto name0 = f->input(0).get_node_shared_ptr()->get_friendly_name();
22492327
auto tensor_names0 = f->input(0).get_tensor().get_names();
22502328
auto name1 = f->input(1).get_node_shared_ptr()->get_friendly_name();

0 commit comments

Comments
 (0)