-
-
Notifications
You must be signed in to change notification settings - Fork 120
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
undefined reference to `DebugLog' in micro_error_reporter.cpp #35
Comments
I met the same error report, not solved yet... |
I get the same error when using
|
The problem comes from the The
but is declared in
and used in
The implementation in |
I found a solution, although not fully tested yet, my version of In
the After correcting the above to make sure
to match the declaration in
right after the line:
|
I am working with Arduino_TensorFlowLite-2.4.0-ALPHA-precompiled library and trying to compile my arduino sketch. But I keep getting this error
Library Arduino_TensorFlowLite has been declared precompiled: Using precompiled library in C:\Users\prane\Documents\Arduino\libraries\Arduino_TensorFlowLite-2.4.0-ALPHA-precompiled\src\cortex-m4\fpv4-sp-d16-softfp C:\Users\prane\Documents\Arduino\libraries\Arduino_TensorFlowLite-2.4.0-ALPHA-precompiled\src\cortex-m4\fpv4-sp-d16-softfp\libtensorflowlite.a(micro_error_reporter.cpp.o): In function
tflite::MicroErrorReporter::Report(char const*, std::__va_list)':/home/arduino/workspace/Libraries-Google-Tensorflow-scraper/Arduino/libraries/tensorflow_lite_mirror/src/tensorflow/lite/micro/micro_error_reporter.cpp:35: undefined reference to
DebugLog' /home/arduino/workspace/Libraries-Google-Tensorflow-scraper/Arduino/libraries/tensorflow_lite_mirror/src/tensorflow/lite/micro/micro_error_reporter.cpp:36: undefined reference to
DebugLog'collect2.exe: error: ld returned 1 exit status
exit status 1
Compilation error: exit status 1`
I've included my sketch below. Any help would be greatly helpful. Thanks
`#include "TensorFlowLite.h"
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
//#include "tensorflow/lite/micro/system_setup.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
#include "image_data.h"
#include "model_data.h"
const int kInputTensorSize = 1 * 28 * 28 * 1;
const int kNumClasses = 10;
namespace{
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
TfLiteTensor* output = nullptr;
int inference_count = 0;
constexpr int kTensorArenaSize = 2*1024;
uint8_t tensor_arena[kTensorArenaSize];
}
void setup() {
Serial.begin(115200);
// tflite::InitializeTarget();
// memset(tensor_arena, 0, kTensorArenaSize*sizeof(uint8_t));
// Set up logging.
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = µ_error_reporter;
model = tflite::GetModel(model_data);
if (model->version() != TFLITE_SCHEMA_VERSION) {
Serial.println("Model provided is schema version "
+ String(model->version()) + " not equal "
+ "to supported version "
+ String(TFLITE_SCHEMA_VERSION));
return;
} else {
Serial.println("Model version: " + String(model->version()));
}
// This pulls in all the operation implementations we need.
static tflite::AllOpsResolver resolver;
// Build an interpreter to run the model with.
static tflite::MicroInterpreter static_interpreter(
model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;
// Build an interpreter to run the model with.
// tflite::MicroInterpreter* static_interpreter_ptr = new tflite::MicroInterpreter(
// model, resolver, tensor_arena, kTensorArenaSize, error_reporter);
// interpreter = static_interpreter_ptr;
// Allocate memory from the tensor_arena for the model's tensors.
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
Serial.println("AllocateTensors() failed");
return;
} else {
Serial.println("AllocateTensor() Success");
}
size_t used_size = interpreter->arena_used_bytes();
Serial.println("Area used bytes: " + String(used_size));
input = interpreter->input(0);
output = interpreter->output(0);
/* check input */
if (input->type != kTfLiteFloat32) {
Serial.println("input type mismatch. expected input type is float32");
return;
} else {
Serial.println("input type is float32");
}
Serial.println("Model input:");
Serial.println("input->type: " + String(input->type));
Serial.println("dims->size: " + String(input->dims->size));
for (int n = 0; n < input->dims->size; ++n) {
Serial.println("dims->data[n]: " + String(input->dims->data[n]));
}
Serial.println("Model output:");
Serial.println("dims->size: " + String(output->dims->size));
for (int n = 0; n < output->dims->size; ++n) {
Serial.println("dims->data[n]: " + String(output->dims->data[n]));
}
}
void loop() {
// Define the input image array
const uint8_t* kImageDataPtr = kImageData; // Pointer to start of image data
uint8_t input_image[kInputTensorSize];
for (int i = 0; i < kInputTensorSize; i++) {
input_image[i] = *(kImageDataPtr++);
}
for(int i=0; i<kInputTensorSize; i++){
input->data.f[i] = (float)input_image[i] / 255.0;
}
// Run inference
interpreter->Invoke();
// Print the predicted class
int predicted_class = -1;
float max_score = -1;
for (int i = 0; i < kNumClasses; i++) {
float score = output->data.f[i];
if (score > max_score) {
predicted_class = i;
max_score = score;
}
}
Serial.println(predicted_class);
}`
The text was updated successfully, but these errors were encountered: