Problem with BLE in edge impulse code-Arduino

→ I have this code of a trained model of voice recognition, downloaded from edge impulse:

/* Edge Impulse ingestion SDK
 * Copyright (c) 2022 EdgeImpulse Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */
// If your target is limited in memory remove this macro to save 10K RAM
#define EIDSP_QUANTIZE_FILTERBANK   0

/*
 ** NOTE: If you run into TFLite arena allocation issue.
 **
 ** This may be due to may dynamic memory fragmentation.
 ** Try defining "-DEI_CLASSIFIER_ALLOCATION_STATIC" in boards.local.txt (create
 ** if it doesn't exist) and copy this file to
 ** `<ARDUINO_CORE_INSTALL_PATH>/arduino/hardware/<mbed_core>/<core_version>/`.
 **
 ** See
 ** (https://support.arduino.cc/hc/en-us/articles/360012076960-Where-are-the-installed-cores-located-)
 ** to find where Arduino installs cores on your machine.
 **
 ** If the problem persists then there's not enough memory for this model and application.
 */

/* Includes ---------------------------------------------------------------- */
#include <PDM.h>
#include <prueba_inferencing.h>

#define RED 22     
#define BLUE 24     
#define GREEN 23
#define BOTON 3

int val;
/** Audio buffers, pointers and selectors */
typedef struct {
    int16_t *buffer;
    uint8_t buf_ready;
    uint32_t buf_count;
    uint32_t n_samples;
} inference_t;

static inference_t inference;
static signed short sampleBuffer[2048];
static bool debug_nn = false; // Set this to true to see e.g. features generated from the raw signal

/**
 * @brief      Arduino setup function
 */
void setup()
{
  pinMode(RED, OUTPUT);
  pinMode(BLUE, OUTPUT);
  pinMode(GREEN, OUTPUT);
  pinMode(BOTON, INPUT_PULLUP);
   
   
    // put your setup code here, to run once:
    Serial.begin(115200);
    // comment out the below line to cancel the wait for USB connection (needed for native USB)
    while (!Serial);
    Serial.println("Edge Impulse Inferencing Demo");

    // summary of inferencing settings (from model_metadata.h)
    ei_printf("Inferencing settings:\n");
    ei_printf("\tInterval: %.2f ms.\n", (float)EI_CLASSIFIER_INTERVAL_MS);
    ei_printf("\tFrame size: %d\n", EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE);
    ei_printf("\tSample length: %d ms.\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT / 16);
    ei_printf("\tNo. of classes: %d\n", sizeof(ei_classifier_inferencing_categories) / sizeof(ei_classifier_inferencing_categories[0]));

    if (microphone_inference_start(EI_CLASSIFIER_RAW_SAMPLE_COUNT) == false) {
        ei_printf("ERR: Could not allocate audio buffer (size %d), this could be due to the window length of your model\r\n", EI_CLASSIFIER_RAW_SAMPLE_COUNT);
        return;
    }


}

/**
 * @brief      Arduino main function. Runs the inferencing loop.
 */
void loop()
{
 
 if  (digitalRead(BOTON) == LOW){
      ei_printf("Starting inferencing in 2 seconds...\n");

      delay(2000);

      ei_printf("Recording...\n");

      bool m = microphone_inference_record();
      if (!m) {
          ei_printf("ERR: Failed to record audio...\n");
          return;
      }

      ei_printf("Recording done\n");

      signal_t signal;
      signal.total_length = EI_CLASSIFIER_RAW_SAMPLE_COUNT;
      signal.get_data = &microphone_audio_signal_get_data;
      ei_impulse_result_t result = { 0 };

      EI_IMPULSE_ERROR r = run_classifier(&signal, &result, debug_nn);
      if (r != EI_IMPULSE_OK) {
          ei_printf("ERR: Failed to run classifier (%d)\n", r);
          return;
      }

    // CODE FOR FLASHING THE NANO 33 BLE SENSE BOARD RGB LED
    if (result.classification[0].value>=0.00){
     digitalWrite(BLUE,LOW);
    } else{
    digitalWrite(BLUE,HIGH);  
    }

    if (result.classification[1].value>=0.65){
      digitalWrite(RED,LOW);
    } else{
    digitalWrite(RED,HIGH);  
    }

    if (result.classification[3].value>=0.65){
      digitalWrite(GREEN,LOW);
    } else{
    digitalWrite(GREEN,HIGH);  
    }

    // print the predictions
        ei_printf("Predictions ");
        ei_printf("(DSP: %d ms., Classification: %d ms., Anomaly: %d ms.)",
            result.timing.dsp, result.timing.classification, result.timing.anomaly);
        ei_printf(": \n");
        for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
            ei_printf("    %s: %.5f\n", result.classification[ix].label, result.classification[ix].value);
        }
    #if EI_CLASSIFIER_HAS_ANOMALY == 1
        ei_printf("    anomaly score: %.3f\n", result.anomaly);
    #endif
  }
}

/**
 * @brief      PDM buffer full callback
 *             Get data and call audio thread callback
 */
static void pdm_data_ready_inference_callback(void)
{
    int bytesAvailable = PDM.available();

    // read into the sample buffer
    int bytesRead = PDM.read((char *)&sampleBuffer[0], bytesAvailable);

    if (inference.buf_ready == 0) {
        for(int i = 0; i < bytesRead>>1; i++) {
            inference.buffer[inference.buf_count++] = sampleBuffer[i];

            if(inference.buf_count >= inference.n_samples) {
                inference.buf_count = 0;
                inference.buf_ready = 1;
                break;
            }
        }
    }
}

/**
 * @brief      Init inferencing struct and setup/start PDM
 *
 * @param[in]  n_samples  The n samples
 *
 * @return     { description_of_the_return_value }
 */
static bool microphone_inference_start(uint32_t n_samples)
{
    inference.buffer = (int16_t *)malloc(n_samples * sizeof(int16_t));

    if(inference.buffer == NULL) {
        return false;
    }

    inference.buf_count  = 0;
    inference.n_samples  = n_samples;
    inference.buf_ready  = 0;

    // configure the data receive callback
    PDM.onReceive(&pdm_data_ready_inference_callback);

    PDM.setBufferSize(4096);

    // initialize PDM with:
    // - one channel (mono mode)
    // - a 16 kHz sample rate
    if (!PDM.begin(1, EI_CLASSIFIER_FREQUENCY)) {
        ei_printf("Failed to start PDM!");
        microphone_inference_end();

        return false;
    }

    // set the gain, defaults to 20
    PDM.setGain(127);

    return true;
}

/**
 * @brief      Wait on new data
 *
 * @return     True when finished
 */
static bool microphone_inference_record(void)
{
    inference.buf_ready = 0;
    inference.buf_count = 0;

    while(inference.buf_ready == 0) {
        delay(10);
    }

    return true;
}

/**
 * Get raw audio signal data
 */
static int microphone_audio_signal_get_data(size_t offset, size_t length, float *out_ptr)
{
    numpy::int16_to_float(&inference.buffer[offset], out_ptr, length);

    return 0;
}

/**
 * @brief      Stop PDM and release buffers
 */
static void microphone_inference_end(void)
{
    PDM.end();
    free(inference.buffer);
}

#if !defined(EI_CLASSIFIER_SENSOR) || EI_CLASSIFIER_SENSOR != EI_CLASSIFIER_SENSOR_MICROPHONE
#error "Invalid model for current sensor."
#endif

→ I want to connect the arduino nano 33 BLE using BLE to a raspberry pi 3. I have added this to my code to get the connection:

#include <ArduinoBLE.h>

BLEService environmentService("984bce84-963e-11ed-a1eb-0242ac120002"); // Standard Environmental Sensing service

BLEIntCharacteristic ComandosCharacteristic("b3ec45b4-9586-11ed-a1eb-0242ac120002", // Standard 16-bit Temperature characteristic
 BLERead | BLENotify); // Remote clients can read and get updates

void setup(){
    BLE.begin();

    BLE.setLocalName("Nano33BLESENSE"); // Set name for connection
    BLE.setAdvertisedService(environmentService); // Advertise environment service
    environmentService.addCharacteristic(ComandosCharacteristic); 
    BLE.addService(environmentService); // Add environment service
    ComandosCharacteristic.setValue(0); // Set initial temperature value ....

→ But at the moment that I paste ble.begin() to my code, the program doesn´t work correctly, the recognition of the voice isn´t done.

→ This appears in the serial monitor:
Recording done
ERR: MFCC failed (-1002)
ERR: Failed to run DSP process (-1002)
ERR: Failed to run classifier (-5)

→ How can I solve this problem?

Thanks.

Hi @Txomin,

Unfortunately, the “-1002” error means that you have run out of RAM, so the processing and inferencing code failed. In my experience, the keyword spotting model (or any sound-classification program) takes up nearly all of the resources (RAM, flash, and processing time) on the nRF52840. As a result, I would not expect to be able to run much else, including BLE services (which can also be resource-intensive).

There are a few options here:

  • Move to a more powerful processor
  • Move to a dual-core processor where you can offload the Bluetooth functionality (e.g. ESP32 or nRF5340)
  • Use a co-processor to handle Bluetooth requests. For example, use one Arduino Nano to perform keyword spotting that sends a signal (e.g. UART, I2C) to another Arduino Nano that performs Bluetooth functions

Hope that helps!

Hi,
But my model’s RAM usage is 7,6Kb and arduino nano 33 ble has 256kb of RAM memory. So, the difference is big. Why I can´t use ble?

Hi @Txomin,

I might recommend watching the RAM usage if possible as you enable BLE functions and then see if you have enough to run inference. Here is a good post on how to make that happen on the Arduino Nano 33 BLE Sense: [SOLVED] Measure free SRAM of Nano 33 BLE Sense at runtime - Nano 33 BLE Sense - Arduino Forum.

You might be able to adjust the heap allocation in ei_malloc/ei_calloc/ei_free as described in this thread: ERR: MFCC failed (-1002)

Hope that helps!