Skip to content

Commit 6ea8ae2

Browse files
author
zhangdanfeng
committed
reformat
Signed-off-by: zhangdanfeng <[email protected]>
1 parent 0f44496 commit 6ea8ae2

File tree

4 files changed

+91
-107
lines changed

4 files changed

+91
-107
lines changed

model_utils.cc

Lines changed: 70 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -27,103 +27,86 @@
2727
#include "tensorflow/lite/builtin_op_data.h"
2828
#include "tensorflow/lite/kernels/register.h"
2929

30-
namespace tflite_example
31-
{
30+
namespace tflite_example {
3231

33-
std::unique_ptr<tflite::Interpreter> BuildTfliteInterpreter(
34-
const tflite::FlatBufferModel &model, int num_threads)
35-
{
36-
tflite::ops::builtin::BuiltinOpResolver resolver;
32+
std::unique_ptr<tflite::Interpreter>
33+
BuildTfliteInterpreter(const tflite::FlatBufferModel &model, int num_threads) {
34+
tflite::ops::builtin::BuiltinOpResolver resolver;
3735
#ifdef TIDL_OFFLOAD
38-
resolver.AddCustom(tidl::kTidlSubgraphOp, tidl::RegisterTidlSubgraphOp());
36+
resolver.AddCustom(tidl::kTidlSubgraphOp, tidl::RegisterTidlSubgraphOp());
3937
#endif
4038

41-
std::unique_ptr<tflite::Interpreter> interpreter;
42-
if (tflite::InterpreterBuilder(model, resolver)(&interpreter) != kTfLiteOk)
43-
{
44-
std::cerr << "Failed to build interpreter." << std::endl;
45-
}
46-
interpreter->SetNumThreads(num_threads);
47-
if (interpreter->AllocateTensors() != kTfLiteOk)
48-
{
49-
std::cerr << "Failed to allocate tensors." << std::endl;
50-
}
51-
return interpreter;
39+
std::unique_ptr<tflite::Interpreter> interpreter;
40+
if (tflite::InterpreterBuilder(model, resolver)(&interpreter) != kTfLiteOk) {
41+
std::cerr << "Failed to build interpreter." << std::endl;
5242
}
53-
54-
/*
55-
* Get time in us
56-
*/
57-
double get_us(struct timeval t)
58-
{
59-
return (t.tv_sec * 1000000 + t.tv_usec);
43+
interpreter->SetNumThreads(num_threads);
44+
if (interpreter->AllocateTensors() != kTfLiteOk) {
45+
std::cerr << "Failed to allocate tensors." << std::endl;
6046
}
47+
return interpreter;
48+
}
6149

62-
std::vector<float> RunInference(tflite::Interpreter *interpreter, double &inference_time_ms)
63-
{
64-
std::vector<float> output_data;
65-
66-
struct timeval start_time, stop_time;
67-
gettimeofday(&start_time, nullptr);
68-
69-
// Running the inference
70-
interpreter->Invoke();
71-
72-
gettimeofday(&stop_time, nullptr);
73-
inference_time_ms = (get_us(stop_time) - get_us(start_time)) / 1000;
74-
75-
const auto &output_indices = interpreter->outputs();
76-
const int num_outputs = output_indices.size();
77-
int out_idx = 0;
78-
for (int i = 0; i < num_outputs; ++i)
79-
{
80-
const auto *out_tensor = interpreter->tensor(output_indices[i]);
81-
assert(out_tensor != nullptr);
82-
if (out_tensor->type == kTfLiteUInt8)
83-
{
84-
const int num_values = out_tensor->bytes;
85-
output_data.resize(out_idx + num_values);
86-
const uint8_t *output = interpreter->typed_output_tensor<uint8_t>(i);
87-
for (int j = 0; j < num_values; ++j)
88-
{
89-
output_data[out_idx++] = (output[j] - out_tensor->params.zero_point) *
90-
out_tensor->params.scale;
91-
}
92-
}
93-
else if (out_tensor->type == kTfLiteFloat32)
94-
{
95-
const int num_values = out_tensor->bytes / sizeof(float);
96-
output_data.resize(out_idx + num_values);
97-
const float *output = interpreter->typed_output_tensor<float>(i);
98-
for (int j = 0; j < num_values; ++j)
99-
{
100-
output_data[out_idx++] = output[j];
101-
}
50+
/*
51+
* Get time in us
52+
*/
53+
double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
54+
55+
std::vector<float> RunInference(tflite::Interpreter *interpreter,
56+
double &inference_time_ms) {
57+
std::vector<float> output_data;
58+
59+
struct timeval start_time, stop_time;
60+
gettimeofday(&start_time, nullptr);
61+
62+
// Running the inference
63+
interpreter->Invoke();
64+
65+
gettimeofday(&stop_time, nullptr);
66+
inference_time_ms = (get_us(stop_time) - get_us(start_time)) / 1000;
67+
68+
const auto &output_indices = interpreter->outputs();
69+
const int num_outputs = output_indices.size();
70+
int out_idx = 0;
71+
for (int i = 0; i < num_outputs; ++i) {
72+
const auto *out_tensor = interpreter->tensor(output_indices[i]);
73+
assert(out_tensor != nullptr);
74+
if (out_tensor->type == kTfLiteUInt8) {
75+
const int num_values = out_tensor->bytes;
76+
output_data.resize(out_idx + num_values);
77+
const uint8_t *output = interpreter->typed_output_tensor<uint8_t>(i);
78+
for (int j = 0; j < num_values; ++j) {
79+
output_data[out_idx++] = (output[j] - out_tensor->params.zero_point) *
80+
out_tensor->params.scale;
10281
}
103-
else
104-
{
105-
std::cerr << "Tensor " << out_tensor->name
106-
<< " has unsupported output type: " << out_tensor->type
107-
<< std::endl;
82+
} else if (out_tensor->type == kTfLiteFloat32) {
83+
const int num_values = out_tensor->bytes / sizeof(float);
84+
output_data.resize(out_idx + num_values);
85+
const float *output = interpreter->typed_output_tensor<float>(i);
86+
for (int j = 0; j < num_values; ++j) {
87+
output_data[out_idx++] = output[j];
10888
}
89+
} else {
90+
std::cerr << "Tensor " << out_tensor->name
91+
<< " has unsupported output type: " << out_tensor->type
92+
<< std::endl;
10993
}
110-
return output_data;
111-
}
112-
113-
std::array<int, 3> GetInputShape(const tflite::Interpreter &interpreter,
114-
int index)
115-
{
116-
const int tensor_index = interpreter.inputs()[index];
117-
const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims;
118-
return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]};
119-
}
120-
121-
std::array<int, 3> GetOutputShape(const tflite::Interpreter &interpreter,
122-
int index)
123-
{
124-
const int tensor_index = interpreter.outputs()[index];
125-
const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims;
126-
return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]};
12794
}
95+
return output_data;
96+
}
97+
98+
std::array<int, 3> GetInputShape(const tflite::Interpreter &interpreter,
99+
int index) {
100+
const int tensor_index = interpreter.inputs()[index];
101+
const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims;
102+
return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]};
103+
}
104+
105+
std::array<int, 3> GetOutputShape(const tflite::Interpreter &interpreter,
106+
int index) {
107+
const int tensor_index = interpreter.outputs()[index];
108+
const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims;
109+
return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]};
110+
}
128111

129112
} // namespace tflite_example

model_utils.h

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -27,23 +27,23 @@
2727
#include "tensorflow/lite/interpreter.h"
2828
#include "tensorflow/lite/model.h"
2929

30-
namespace tflite_example
31-
{
30+
namespace tflite_example {
3231

33-
// Builds tflite Interpreter.
34-
std::unique_ptr<tflite::Interpreter> BuildTfliteInterpreter(
35-
const tflite::FlatBufferModel &model, int num_threads);
32+
// Builds tflite Interpreter.
33+
std::unique_ptr<tflite::Interpreter>
34+
BuildTfliteInterpreter(const tflite::FlatBufferModel &model, int num_threads);
3635

37-
// Runs inference using given `interpreter`
38-
std::vector<float> RunInference(tflite::Interpreter *interpreter, double &inference_time_ms);
36+
// Runs inference using given `interpreter`
37+
std::vector<float> RunInference(tflite::Interpreter *interpreter,
38+
double &inference_time_ms);
3939

40-
// Returns input tensor shape in the form {height, width, channels}.
41-
std::array<int, 3> GetInputShape(const tflite::Interpreter &interpreter,
42-
int index);
40+
// Returns input tensor shape in the form {height, width, channels}.
41+
std::array<int, 3> GetInputShape(const tflite::Interpreter &interpreter,
42+
int index);
4343

44-
// Returns output tensor shape in the form {height, width, channels}.
45-
std::array<int, 3> GetOutputShape(const tflite::Interpreter &interpreter,
46-
int index);
44+
// Returns output tensor shape in the form {height, width, channels}.
45+
std::array<int, 3> GetOutputShape(const tflite::Interpreter &interpreter,
46+
int index);
4747

4848
} // namespace tflite_example
4949
#endif // MODEL_UTILS_H_

utils.cc

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ bool SetupInput(eInputType input_source, string input_path, VideoCapture &cap,
2929
if (input_source == INPUT_Image) {
3030
// Read image input
3131
input_image = imread(input_path);
32-
if (!input_image.data) // Check for invalid input
32+
if (!input_image.data) // Check for invalid input
3333
{
3434
cout << "Could not open or find the image" << std::endl;
3535
return false;
@@ -84,12 +84,12 @@ void CollectFrames(std::vector<uint8_t> &output, int input_source,
8484
if (input_source != INPUT_Image) {
8585
if (cap.grab()) {
8686
if (cap.retrieve(in_image)) {
87-
if (input_source == INPUT_Camera) { // Crop central square portion
87+
if (input_source == INPUT_Camera) { // Crop central square portion
8888
int loc_xmin = (in_image.size().width - in_image.size().height) /
89-
2; // Central position
89+
2; // Central position
9090
int loc_ymin = 0;
91-
int loc_w = in_image.size().height; // Set the width to height to get
92-
// the square cropping
91+
int loc_w = in_image.size().height; // Set the width to height to get
92+
// the square cropping
9393
int loc_h = in_image.size().height;
9494
// do cropping and resize
9595
cv::resize(in_image(Rect(loc_xmin, loc_ymin, loc_w, loc_h)), image,
@@ -110,7 +110,8 @@ void CollectFrames(std::vector<uint8_t> &output, int input_source,
110110
frame_retrieved = true;
111111
}
112112

113-
if (!frame_retrieved) return;
113+
if (!frame_retrieved)
114+
return;
114115

115116
Mat *spl = new Mat[channels];
116117
split(image, spl);

utils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,4 +47,4 @@ void PrepareInput(T *out, std::vector<uint8_t> &input,
4747
}
4848
}
4949

50-
#endif // UTILS_H_
50+
#endif // UTILS_H_

0 commit comments

Comments
 (0)