Skip to content

Commit a5a25ff

Browse files
author
Roman Donchenko
committed
Remove trailing whitespace
1 parent 1e5bab7 commit a5a25ff

File tree

61 files changed

+320
-320
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+320
-320
lines changed

ci/update-requirements.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def pc(target, *sources):
6565
pc('ci/requirements-ac.txt',
6666
'tools/accuracy_checker/requirements-core.in', 'tools/accuracy_checker/requirements.in')
6767
pc('ci/requirements-ac-test.txt',
68-
'tools/accuracy_checker/requirements.in', 'tools/accuracy_checker/requirements-test.in',
68+
'tools/accuracy_checker/requirements.in', 'tools/accuracy_checker/requirements-test.in',
6969
'tools/accuracy_checker/requirements-core.in')
7070
pc('ci/requirements-conversion.txt',
7171
'tools/downloader/requirements-pytorch.in', 'tools/downloader/requirements-caffe2.in',

demos/classification_demo/grid_mat.hpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ class GridMat {
5151
outImg.create((cellSize.height * size.height) + presenter.graphSize.height,
5252
cellSize.width * size.width, CV_8UC3);
5353
outImg.setTo(0);
54-
54+
5555
textSize = cv::getTextSize("", fontType, fontScale, thickness, &baseline);
5656
accuracyMessageSize = cv::getTextSize("Accuracy (top 0): 0.000", fontType, fontScale, thickness, &baseline);
5757
testMessageSize = cv::getTextSize(testMessage, fontType, fontScale, thickness, &baseline);
@@ -65,7 +65,7 @@ class GridMat {
6565
cv::Scalar(0, 0, 0), cv::FILLED);
6666

6767
presenter.drawGraphs(outImg);
68-
68+
6969
cv::Scalar textColor = cv::Scalar(255, 255, 255);
7070
int textPadding = 10;
7171

@@ -77,7 +77,7 @@ class GridMat {
7777
cv::format("Latency: %dms", static_cast<int>(avgLatency * 1000)),
7878
cv::Point(textPadding, (textSize.height + textPadding) * 2),
7979
fontType, fontScale, textColor, thickness);
80-
80+
8181
if (showAccuracy) {
8282
cv::putText(outImg,
8383
cv::format("Accuracy (top %d): %.3f", FLAGS_nt, accuracy),
@@ -131,13 +131,13 @@ class GridMat {
131131
imageInfo.label,
132132
cv::Point(labelThickness, cellSize.height - labelThickness - labelTextSize.height),
133133
fontType, labelFontScale, textColor, 2);
134-
134+
135135
prevImgs.push(frame);
136136

137-
cv::Mat cell = outImg(cv::Rect(points[currSourceId], cellSize));
138-
frame.copyTo(cell);
137+
cv::Mat cell = outImg(cv::Rect(points[currSourceId], cellSize));
138+
frame.copyTo(cell);
139139
cv::rectangle(cell, {0, 0}, {frame.cols, frame.rows}, {255, 50, 50}, labelThickness); // draw a border
140-
140+
141141
if (currSourceId == points.size() - 1) {
142142
currSourceId = 0;
143143
} else {

demos/classification_demo/main.cpp

+15-15
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ std::vector<std::vector<unsigned>> topResults(Blob& inputBlob, unsigned numTop)
8686
TBlob<float>& tblob = dynamic_cast<TBlob<float>&>(inputBlob);
8787
size_t batchSize = tblob.getTensorDesc().getDims()[0];
8888
numTop = static_cast<unsigned>(std::min<size_t>(size_t(numTop), tblob.size()));
89-
89+
9090
std::vector<std::vector<unsigned>> output(batchSize);
9191
for (size_t i = 0; i < batchSize; i++) {
9292
size_t offset = i * (tblob.size() / batchSize);
@@ -100,7 +100,7 @@ std::vector<std::vector<unsigned>> topResults(Blob& inputBlob, unsigned numTop)
100100

101101
output[i].assign(indices.begin(), indices.begin() + numTop);
102102
}
103-
103+
104104
return output;
105105
}
106106

@@ -143,7 +143,7 @@ int main(int argc, char *argv[]) {
143143
std::map<std::string, unsigned> classIndicesMap;
144144
std::ifstream inputGtFile(FLAGS_gt);
145145
if (!inputGtFile.is_open()) throw std::runtime_error("Can't open the ground truth file.");
146-
146+
147147
std::string line;
148148
while (std::getline(inputGtFile, line))
149149
{
@@ -218,7 +218,7 @@ int main(int argc, char *argv[]) {
218218
}
219219
if (inputShape[2] != inputShape[3]) {
220220
throw std::logic_error("Model input has incorrect image shape. Must be NxN square."
221-
" Got " + std::to_string(inputShape[2]) +
221+
" Got " + std::to_string(inputShape[2]) +
222222
"x" + std::to_string(inputShape[3]) + ".");
223223
}
224224
int modelInputResolution = inputShape[2];
@@ -324,12 +324,12 @@ int main(int argc, char *argv[]) {
324324
inferRequests.push_back(executableNetwork.CreateInferRequest());
325325
}
326326
// ---------------------------------------------------------------------------------------------------
327-
327+
328328
// ----------------------------------------Create output info-----------------------------------------
329329
Presenter presenter(FLAGS_u, 0);
330330
int width;
331331
int height;
332-
std::vector<std::string> gridMatRowsCols = split(FLAGS_res, 'x');
332+
std::vector<std::string> gridMatRowsCols = split(FLAGS_res, 'x');
333333
if (gridMatRowsCols.size() != 2) {
334334
throw std::runtime_error("The value of GridMat resolution flag is not valid.");
335335
} else {
@@ -338,7 +338,7 @@ int main(int argc, char *argv[]) {
338338
}
339339
GridMat gridMat(presenter, cv::Size(width, height));
340340
// ---------------------------------------------------------------------------------------------------
341-
341+
342342
// -----------------------------Prepare variables and data for main loop------------------------------
343343
typedef std::chrono::duration<double, std::chrono::seconds::period> Sec;
344344
double avgFPS = 0;
@@ -353,7 +353,7 @@ int main(int argc, char *argv[]) {
353353
std::condition_variable condVar;
354354
std::mutex mutex;
355355
std::exception_ptr irCallbackException;
356-
356+
357357
std::queue<InferRequestInfo> emptyInferRequests;
358358
std::queue<InferRequestInfo> completedInferRequests;
359359
for (std::size_t i = 0; i < inferRequests.size(); i++) {
@@ -363,7 +363,7 @@ int main(int argc, char *argv[]) {
363363
auto startTime = std::chrono::steady_clock::now();
364364
auto elapsedSeconds = std::chrono::steady_clock::duration{0};
365365
// ---------------------------------------------------------------------------------------------------
366-
366+
367367
// -------------------------------------Processing infer requests-------------------------------------
368368
int framesNumOnCalculationStart = 0;
369369
auto testDuration = std::chrono::seconds{3};
@@ -396,7 +396,7 @@ int main(int argc, char *argv[]) {
396396
}
397397
}
398398
if (completedInferRequestInfo) {
399-
emptyInferRequests.push({completedInferRequestInfo->inferRequest,
399+
emptyInferRequests.push({completedInferRequestInfo->inferRequest,
400400
std::vector<InferRequestInfo::InferRequestImage>()});
401401

402402
std::vector<unsigned> correctClasses = {};
@@ -433,7 +433,7 @@ int main(int argc, char *argv[]) {
433433
}
434434

435435
framesNum += FLAGS_b;
436-
436+
437437
avgFPS = framesNum / std::chrono::duration_cast<Sec>(
438438
std::chrono::steady_clock::now() - startTime).count();
439439
gridMat.updateMat(shownImagesInfo);
@@ -444,7 +444,7 @@ int main(int argc, char *argv[]) {
444444
avgLatency = std::chrono::duration_cast<Sec>(latencySum).count() / framesNum;
445445
accuracy = static_cast<double>(correctPredictionsCount) / framesNum;
446446
gridMat.textUpdate(avgFPS, avgLatency, accuracy, isTestMode, !FLAGS_gt.empty(), presenter);
447-
447+
448448
if (!FLAGS_no_show) {
449449
cv::imshow("classification_demo", gridMat.outImg);
450450
key = static_cast<char>(cv::waitKey(1));
@@ -474,7 +474,7 @@ int main(int argc, char *argv[]) {
474474
&irCallbackException] {
475475
{
476476
std::lock_guard<std::mutex> callbackLock(mutex);
477-
477+
478478
try {
479479
completedInferRequests.push(emptyInferRequest);
480480
}
@@ -486,7 +486,7 @@ int main(int argc, char *argv[]) {
486486
}
487487
condVar.notify_one();
488488
});
489-
489+
490490
auto inputBlob = emptyInferRequest.inferRequest.GetBlob(inputBlobName);
491491
for (unsigned i = 0; i < FLAGS_b; i++) {
492492
matU8ToBlob<uint8_t>(emptyInferRequest.images[i].mat, inputBlob, i);
@@ -515,7 +515,7 @@ int main(int argc, char *argv[]) {
515515
}
516516
std::cout << presenter.reportMeans() << std::endl;
517517
// ---------------------------------------------------------------------------------------------------
518-
518+
519519
// ------------------------------------Wait for all infer requests------------------------------------
520520
for (InferRequest& inferRequest : inferRequests)
521521
inferRequest.Wait(IInferRequest::WaitMode::RESULT_READY);

demos/common/src/performance_metrics.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ void PerformanceMetrics::update(TimePoint lastRequestStartTime,
2525
firstFrameProcessed = true;
2626
return;
2727
}
28-
28+
2929
currentMovingStatistic.latency += currentTime - lastRequestStartTime;
3030
currentMovingStatistic.period = currentTime - lastUpdateTime;
3131
currentMovingStatistic.frameCount++;
@@ -40,7 +40,7 @@ void PerformanceMetrics::update(TimePoint lastRequestStartTime,
4040

4141
// Draw performance stats over frame
4242
Metrics metrics = getLast();
43-
43+
4444
std::ostringstream out;
4545
if (!std::isnan(metrics.latency)) {
4646
out << "Latency: " << std::fixed << std::setprecision(1) << metrics.latency << " ms";
@@ -65,7 +65,7 @@ PerformanceMetrics::Metrics PerformanceMetrics::getLast() const {
6565
? lastMovingStatistic.frameCount
6666
/ std::chrono::duration_cast<Sec>(lastMovingStatistic.period).count()
6767
: std::numeric_limits<double>::signaling_NaN();
68-
68+
6969
return metrics;
7070
}
7171

demos/crossroad_camera_demo/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ If Person Attributes Recognition or Person Reidentification Retail are enabled,
8989
* **Person Attributes Recognition time** - Inference time of Person Attributes Recognition averaged by the number of detected persons.
9090
* **Person Reidentification time** - Inference time of Person Reidentification averaged by the number of detected persons.
9191

92-
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
92+
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
9393
> * `person-attributes-recognition-crossroad-0230`
9494
> * `person-reidentification-retail-0031`
9595
> * `person-vehicle-bike-detection-crossroad-0078`

demos/gaze_estimation_demo/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,4 +10,4 @@ ie_add_sample(NAME gaze_estimation_demo
1010
HEADERS ${HEADERS}
1111
INCLUDE_DIRECTORIES "${CMAKE_CURRENT_SOURCE_DIR}/include"
1212
DEPENDENCIES monitors
13-
OPENCV_DEPENDENCIES highgui)
13+
OPENCV_DEPENDENCIES highgui)

demos/gaze_estimation_demo/README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ The demo also relies on the following auxiliary networks:
77
* `face-detection-retail-0004` or `face-detection-adas-0001` detection networks for finding faces
88
* `head-pose-estimation-adas-0001`, which estimates head pose in Tait-Bryan angles, serving as an input for gaze estimation model
99
* `facial-landmarks-35-adas-0002`, which estimates coordinates of facial landmarks for detected faces. The keypoints at the corners of eyes are used to locate eyes regions required for the gaze estimation model
10-
* `open-closed-eye-0001`, which estimates eyes state of detected faces.
10+
* `open-closed-eye-0001`, which estimates eyes state of detected faces.
1111

1212
For more information about the pre-trained models, refer to the [model documentation](../../models/intel/index.md).
1313

@@ -72,7 +72,7 @@ For example, to do inference on a CPU, run the following command:
7272

7373
## Demo Output
7474

75-
The demo uses OpenCV to display the resulting frame with marked gaze vectors, text reports of **FPS** (frames per second performance) for the demo, and, optionally, marked facial landmarks, head pose angles, and face bounding boxes.
75+
The demo uses OpenCV to display the resulting frame with marked gaze vectors, text reports of **FPS** (frames per second performance) for the demo, and, optionally, marked facial landmarks, head pose angles, and face bounding boxes.
7676
By default, it shows only gaze estimation results. To see inference results of auxiliary networks, use run-time control keys.
7777

7878
### Run-Time Control Keys
@@ -82,14 +82,14 @@ The following keys are supported:
8282
* G - to toggle displaying gaze vector
8383
* B - to toggle displaying face detector bounding boxes
8484
* O - to toggle displaying head pose information
85-
* L - to toggle displaying facial landmarks
85+
* L - to toggle displaying facial landmarks
8686
* E - to toggle displaying eyes state
8787
* A - to switch on displaying all inference results
8888
* N - to switch off displaying all inference results
8989
* F - to flip frames horizontally
9090
* Esc - to quit the demo
9191

92-
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
92+
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
9393
>* `face-detection-adas-0001`
9494
>* `face-detection-retail-0004`
9595
>* `facial-landmarks-35-adas-0002`

demos/gaze_estimation_demo/include/gaze_estimator.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ class GazeEstimator: public BaseEstimator {
2828
IEWrapper ieWrapper;
2929
std::string outputBlobName;
3030
bool rollAlign;
31-
31+
3232
void rotateImageAroundCenter(const cv::Mat& srcImage, cv::Mat& dstImage, float angle) const;
3333
};
3434
} // namespace gaze_estimation

demos/gaze_estimation_demo/src/ie_wrapper.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ void IEWrapper::getOutputBlob(const std::string& blobName,
9999
for (auto dim : blobDims) {
100100
dataSize *= dim;
101101
}
102-
102+
103103
LockedMemory<const void> blobMapped = as<MemoryBlob>(request.GetBlob(blobName))->rmap();
104104
auto buffer = blobMapped.as<float *>();
105105

demos/human_pose_estimation_demo/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ For example, to do inference on a CPU, run the following command:
5656
## Demo Output
5757

5858
The demo uses OpenCV to display the resulting frame with estimated poses and text report of **FPS** - frames per second performance for the human pose estimation demo.
59-
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
59+
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
6060
>* `human-pose-estimation-0001`
6161
> Other models may produce unexpected results on these devices.
6262

demos/interactive_face_detection_demo/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ For example, to do inference on a GPU with the OpenVINO&trade; toolkit pre-train
101101
The demo uses OpenCV to display the resulting frame with detections (rendered as bounding boxes and labels, if provided).
102102
The demo reports total image throughput which includes frame decoding time, inference time, time to render bounding boxes and labels, and time to display the results.
103103

104-
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
104+
> **NOTE**: On VPU devices (Intel® Movidius™ Neural Compute Stick, Intel® Neural Compute Stick 2, and Intel® Vision Accelerator Design with Intel® Movidius™ VPUs) this demo has been tested on the following Model Downloader available topologies:
105105
>* `age-gender-recognition-retail-0013`
106106
>* `emotions-recognition-retail-0003`
107107
>* `face-detection-adas-0001`

demos/multi_channel/object_detection_demo_yolov3/README.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
This demo provides an inference pipeline for multi-channel yolo v3. The demo uses Yolo v3 Object Detection network. You can follow [this](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html) page convert the YOLO V3 and tiny YOLO V3 into IR model and execute this demo with converted IR model.
44

55
> **NOTES**:
6-
> If you don't use [this](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html) page to convert the model, it may not work.
6+
> If you don't use [this](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html) page to convert the model, it may not work.
77
88
Other demo objectives are:
99

@@ -49,7 +49,7 @@ Options:
4949
-u Optional. List of monitors to show initially.
5050
```
5151

52-
To run the demo, you can use public pre-train model and follow [this](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html) page for instruction of how to convert it to IR model.
52+
To run the demo, you can use public pre-train model and follow [this](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html) page for instruction of how to convert it to IR model.
5353

5454
> **NOTE**: Before running the demo with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](https://docs.openvinotoolkit.org/latest/_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html).
5555
@@ -67,7 +67,7 @@ Video files will be processed repeatedly.
6767
To achieve 100% utilization of one Myriad X, the thumb rule is to run 4 infer requests on each Myriad X. Option `-nireq 32` can be added to above command to use 100% of HDDL-R card. The 32 here is 8 (Myriad X on HDDL-R card) x 4 (infer requests), such as following command:
6868

6969
```sh
70-
./multi_channel_object_detection_demo_yolov3 -m $PATH_OF_YOLO_V3_MODEL -d HDDL
70+
./multi_channel_object_detection_demo_yolov3 -m $PATH_OF_YOLO_V3_MODEL -d HDDL
7171
-i /path/to/file1 /path/to/file2 /path/to/file3 /path/to/file4 -nireq 32
7272
```
7373

0 commit comments

Comments
 (0)