Running the Model on an ESP32 Microcontroller
Tensorflow Lite for Microcontrollers is designed to run machine learning models on various microcontroller devices with
only a few kilobytes of memory. The ESP32 is just one of
the platforms that is supported by the Tensorflow Lite framework. Here I have used Visual Studio Code with the PlatformIO
IDE extension to build an Arduino project that targets the ESP32 microcontroller.
The C++ code listed in XorNN.h and XorNN.cpp files encompasses a XorNeuralNetwork
wrapper class
created to encapsulate the neural network model and for making calls to the neural network model itself. The main.cpp
file listing shows the model is first instantiated when the XorNeuralNetwork
class is constructed, and
subsequently each combination of input to the XOR function is computed by making calls to the RunModel
method.
main.cpp
// XOR Neural Network Problem.
// Guy Fernando (2022)
#include "XorNN.h"
#include "Arduino.h"
XorNeuralNetwork* xorNeuralNetwork;
void setup()
{
Serial.begin(115200);
xorNeuralNetwork = new XorNeuralNetwork();
Serial.printf("\nArena used bytes = %d\n\n", xorNeuralNetwork->GetArenaUsedBytes());
}
void loop()
{
Serial.printf("XOR(0, 0) = %1.0f\n", xorNeuralNetwork->RunModel(0., 0.));
Serial.printf("XOR(0, 1) = %1.0f\n", xorNeuralNetwork->RunModel(0., 1.));
Serial.printf("XOR(1, 0) = %1.0f\n", xorNeuralNetwork->RunModel(1., 0.));
Serial.printf("XOR(1, 1) = %1.0f\n", xorNeuralNetwork->RunModel(1., 1.));
Serial.println("");
esp_deep_sleep_start();
}
Before the neural network can built for an ESP32 microcontroller, the previously saved .tflite flatbuffermodel file
is converted to a C byte array definition using the UNIX xxd
command, and then added to the project, along with
XorNN.h, XorNN.cpp and main.cpp.
After the project is built and uploaded to the ESP32 microcontroller, the PlatformIO serial monitor is used to observe the program output.
The reported heap memory used by the neural network model is 612 bytes, and the XOR function is calculated correctly.
XorNN.h
// XOR Neural Network Problem.
// Guy Fernando (2022)
#ifndef __XOR_NN_H__
#define __XOR_NN_H__
// Include the library headers.
// To use the TensorFlow Lite for Microcontrollers library, we must include the following header files.
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
using namespace tflite;
class XorNeuralNetwork
{
public:
XorNeuralNetwork(void);
float RunModel(float x1, float x2);
size_t GetArenaUsedBytes(void) const;
protected:
~XorNeuralNetwork(void);
private:
void AssertModelShape(void);
private:
uint8_t* tensor_arena;
MicroInterpreter* interpreter;
MicroErrorReporter micro_error_reporter;
ErrorReporter* error_reporter = µ_error_reporter;
};
#endif // __XOR_NN_H__
XorNN.cpp
// XOR Neural Network Problem.
// Guy Fernando (2022)
#include "XorNN.h"
// Include the model header.
#include "XorModel.h"
// Include the unit test framework header.
#include "tensorflow/lite/micro/testing/micro_test.h"
TF_LITE_MICRO_TESTS_BEGIN
// No unit tests defined.
TF_LITE_MICRO_TESTS_END
XorNeuralNetwork::XorNeuralNetwork(void)
{
// Load the model.
const Model* model = GetModel(xor_model_tflite);
if (model->version() != TFLITE_SCHEMA_VERSION)
{
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
}
// Instantiate operations resolver.
AllOpsResolver resolver;
// Allocate memory.
const int tensor_arena_size = 1000;
this->tensor_arena = (uint8_t*) malloc(tensor_arena_size);
if (!tensor_arena_size)
{
TF_LITE_REPORT_ERROR(error_reporter, "Could not allocate arena");
}
// Instantiate the interpreter.
this->interpreter = new MicroInterpreter(
model, resolver, tensor_arena, tensor_arena_size, error_reporter);
// Allocate tensors.
this->interpreter->AllocateTensors();
// Validate the model shape.
AssertModelShape();
}
XorNeuralNetwork::~XorNeuralNetwork(void)
{
// Clean up arena.
delete this->tensor_arena;
this->tensor_arena = nullptr;
}
float XorNeuralNetwork::RunModel(float x1, float x2)
{
// Provide the input.
TfLiteTensor* input = this->interpreter->input(0);
input->data.f[0] = x1;
input->data.f[1] = x2;
// Run the model.
TfLiteStatus invoke_status = this->interpreter->Invoke();
if (invoke_status != kTfLiteOk)
{
TF_LITE_REPORT_ERROR(this->error_reporter, "Invoke failed\n");
}
// Obtain the output.
TfLiteTensor* output = this->interpreter->output(0);
float value = output->data.f[0];
return value;
}
size_t XorNeuralNetwork::GetArenaUsedBytes(void) const
{
return this->interpreter->arena_used_bytes();
}
void XorNeuralNetwork::AssertModelShape(void)
{
TfLiteTensor* input = this->interpreter->input(0);
TF_LITE_MICRO_EXPECT_NE(nullptr, input);
TF_LITE_MICRO_EXPECT_EQ(2, input->dims->size);
TF_LITE_MICRO_EXPECT_EQ(1, input->dims->data[0]);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteFloat32, input->type);
TfLiteTensor* output = this->interpreter->output(0);
TF_LITE_MICRO_EXPECT_EQ(kTfLiteFloat32, output->type);
}