|
27 | 27 | #include "tensorflow/lite/builtin_op_data.h"
|
28 | 28 | #include "tensorflow/lite/kernels/register.h"
|
29 | 29 |
|
30 |
| -namespace tflite_example |
31 |
| -{ |
| 30 | +namespace tflite_example { |
32 | 31 |
|
33 |
| - std::unique_ptr<tflite::Interpreter> BuildTfliteInterpreter( |
34 |
| - const tflite::FlatBufferModel &model, int num_threads) |
35 |
| - { |
36 |
| - tflite::ops::builtin::BuiltinOpResolver resolver; |
| 32 | +std::unique_ptr<tflite::Interpreter> |
| 33 | +BuildTfliteInterpreter(const tflite::FlatBufferModel &model, int num_threads) { |
| 34 | + tflite::ops::builtin::BuiltinOpResolver resolver; |
37 | 35 | #ifdef TIDL_OFFLOAD
|
38 |
| - resolver.AddCustom(tidl::kTidlSubgraphOp, tidl::RegisterTidlSubgraphOp()); |
| 36 | + resolver.AddCustom(tidl::kTidlSubgraphOp, tidl::RegisterTidlSubgraphOp()); |
39 | 37 | #endif
|
40 | 38 |
|
41 |
| - std::unique_ptr<tflite::Interpreter> interpreter; |
42 |
| - if (tflite::InterpreterBuilder(model, resolver)(&interpreter) != kTfLiteOk) |
43 |
| - { |
44 |
| - std::cerr << "Failed to build interpreter." << std::endl; |
45 |
| - } |
46 |
| - interpreter->SetNumThreads(num_threads); |
47 |
| - if (interpreter->AllocateTensors() != kTfLiteOk) |
48 |
| - { |
49 |
| - std::cerr << "Failed to allocate tensors." << std::endl; |
50 |
| - } |
51 |
| - return interpreter; |
| 39 | + std::unique_ptr<tflite::Interpreter> interpreter; |
| 40 | + if (tflite::InterpreterBuilder(model, resolver)(&interpreter) != kTfLiteOk) { |
| 41 | + std::cerr << "Failed to build interpreter." << std::endl; |
52 | 42 | }
|
53 |
| - |
54 |
| - /* |
55 |
| - * Get time in us |
56 |
| - */ |
57 |
| - double get_us(struct timeval t) |
58 |
| - { |
59 |
| - return (t.tv_sec * 1000000 + t.tv_usec); |
| 43 | + interpreter->SetNumThreads(num_threads); |
| 44 | + if (interpreter->AllocateTensors() != kTfLiteOk) { |
| 45 | + std::cerr << "Failed to allocate tensors." << std::endl; |
60 | 46 | }
|
| 47 | + return interpreter; |
| 48 | +} |
61 | 49 |
|
62 |
| - std::vector<float> RunInference(tflite::Interpreter *interpreter, double &inference_time_ms) |
63 |
| - { |
64 |
| - std::vector<float> output_data; |
65 |
| - |
66 |
| - struct timeval start_time, stop_time; |
67 |
| - gettimeofday(&start_time, nullptr); |
68 |
| - |
69 |
| - // Running the inference |
70 |
| - interpreter->Invoke(); |
71 |
| - |
72 |
| - gettimeofday(&stop_time, nullptr); |
73 |
| - inference_time_ms = (get_us(stop_time) - get_us(start_time)) / 1000; |
74 |
| - |
75 |
| - const auto &output_indices = interpreter->outputs(); |
76 |
| - const int num_outputs = output_indices.size(); |
77 |
| - int out_idx = 0; |
78 |
| - for (int i = 0; i < num_outputs; ++i) |
79 |
| - { |
80 |
| - const auto *out_tensor = interpreter->tensor(output_indices[i]); |
81 |
| - assert(out_tensor != nullptr); |
82 |
| - if (out_tensor->type == kTfLiteUInt8) |
83 |
| - { |
84 |
| - const int num_values = out_tensor->bytes; |
85 |
| - output_data.resize(out_idx + num_values); |
86 |
| - const uint8_t *output = interpreter->typed_output_tensor<uint8_t>(i); |
87 |
| - for (int j = 0; j < num_values; ++j) |
88 |
| - { |
89 |
| - output_data[out_idx++] = (output[j] - out_tensor->params.zero_point) * |
90 |
| - out_tensor->params.scale; |
91 |
| - } |
92 |
| - } |
93 |
| - else if (out_tensor->type == kTfLiteFloat32) |
94 |
| - { |
95 |
| - const int num_values = out_tensor->bytes / sizeof(float); |
96 |
| - output_data.resize(out_idx + num_values); |
97 |
| - const float *output = interpreter->typed_output_tensor<float>(i); |
98 |
| - for (int j = 0; j < num_values; ++j) |
99 |
| - { |
100 |
| - output_data[out_idx++] = output[j]; |
101 |
| - } |
| 50 | +/* |
| 51 | + * Get time in us |
| 52 | + */ |
| 53 | +double get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); } |
| 54 | + |
| 55 | +std::vector<float> RunInference(tflite::Interpreter *interpreter, |
| 56 | + double &inference_time_ms) { |
| 57 | + std::vector<float> output_data; |
| 58 | + |
| 59 | + struct timeval start_time, stop_time; |
| 60 | + gettimeofday(&start_time, nullptr); |
| 61 | + |
| 62 | + // Running the inference |
| 63 | + interpreter->Invoke(); |
| 64 | + |
| 65 | + gettimeofday(&stop_time, nullptr); |
| 66 | + inference_time_ms = (get_us(stop_time) - get_us(start_time)) / 1000; |
| 67 | + |
| 68 | + const auto &output_indices = interpreter->outputs(); |
| 69 | + const int num_outputs = output_indices.size(); |
| 70 | + int out_idx = 0; |
| 71 | + for (int i = 0; i < num_outputs; ++i) { |
| 72 | + const auto *out_tensor = interpreter->tensor(output_indices[i]); |
| 73 | + assert(out_tensor != nullptr); |
| 74 | + if (out_tensor->type == kTfLiteUInt8) { |
| 75 | + const int num_values = out_tensor->bytes; |
| 76 | + output_data.resize(out_idx + num_values); |
| 77 | + const uint8_t *output = interpreter->typed_output_tensor<uint8_t>(i); |
| 78 | + for (int j = 0; j < num_values; ++j) { |
| 79 | + output_data[out_idx++] = (output[j] - out_tensor->params.zero_point) * |
| 80 | + out_tensor->params.scale; |
102 | 81 | }
|
103 |
| - else |
104 |
| - { |
105 |
| - std::cerr << "Tensor " << out_tensor->name |
106 |
| - << " has unsupported output type: " << out_tensor->type |
107 |
| - << std::endl; |
| 82 | + } else if (out_tensor->type == kTfLiteFloat32) { |
| 83 | + const int num_values = out_tensor->bytes / sizeof(float); |
| 84 | + output_data.resize(out_idx + num_values); |
| 85 | + const float *output = interpreter->typed_output_tensor<float>(i); |
| 86 | + for (int j = 0; j < num_values; ++j) { |
| 87 | + output_data[out_idx++] = output[j]; |
108 | 88 | }
|
| 89 | + } else { |
| 90 | + std::cerr << "Tensor " << out_tensor->name |
| 91 | + << " has unsupported output type: " << out_tensor->type |
| 92 | + << std::endl; |
109 | 93 | }
|
110 |
| - return output_data; |
111 |
| - } |
112 |
| - |
113 |
| - std::array<int, 3> GetInputShape(const tflite::Interpreter &interpreter, |
114 |
| - int index) |
115 |
| - { |
116 |
| - const int tensor_index = interpreter.inputs()[index]; |
117 |
| - const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims; |
118 |
| - return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]}; |
119 |
| - } |
120 |
| - |
121 |
| - std::array<int, 3> GetOutputShape(const tflite::Interpreter &interpreter, |
122 |
| - int index) |
123 |
| - { |
124 |
| - const int tensor_index = interpreter.outputs()[index]; |
125 |
| - const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims; |
126 |
| - return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]}; |
127 | 94 | }
|
| 95 | + return output_data; |
| 96 | +} |
| 97 | + |
| 98 | +std::array<int, 3> GetInputShape(const tflite::Interpreter &interpreter, |
| 99 | + int index) { |
| 100 | + const int tensor_index = interpreter.inputs()[index]; |
| 101 | + const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims; |
| 102 | + return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]}; |
| 103 | +} |
| 104 | + |
| 105 | +std::array<int, 3> GetOutputShape(const tflite::Interpreter &interpreter, |
| 106 | + int index) { |
| 107 | + const int tensor_index = interpreter.outputs()[index]; |
| 108 | + const TfLiteIntArray *dims = interpreter.tensor(tensor_index)->dims; |
| 109 | + return std::array<int, 3>{dims->data[1], dims->data[2], dims->data[3]}; |
| 110 | +} |
128 | 111 |
|
129 | 112 | } // namespace tflite_example
|
0 commit comments