Raspberry pico accuracy problem

Hello!
i am Denis and currently working with the raspberry pico 4ml (A raspberry pico with a camera module).
i trained a Model and everything is working fine! I can even classify the impuls with pasting the raw data of the classification pictures. But it seems to not work with the camera input and i dont understand why.
I think its related to this problem and i even use the code from this example.

The detection percentage is always <50 and most of the time even < 1 percent and i cant understand why.
With the raw data of a classification i get a clean ´53% detection and everything is working as predicted.
Any thoughts ? Would love to understand the problem.
i guess its a problem with converting the camera data ?

Kind Regards
Denis

My main (followed the instructions from the posts etc.)


#include "edge-impulse-sdk/classifier/ei_run_classifier.h"

#include <hardware/gpio.h>
#include <hardware/uart.h>
#include <pico/stdio_usb.h>
#include <stdio.h>

#include <LCD_st7735.h>
#include "image_provider.h"
const uint LED_PIN = 25;

/*
int PIN_CAM_SIOC = 5; // I2C0 SCL
int PIN_CAM_SIOD = 4; // I2C0 SDA
int PIN_CAM_RESETB = 2;
int PIN_CAM_XCLK = 3;
int PIN_CAM_VSYNC = 16;     //GP15 hsync  GP14 pixel clock     
int PIN_CAM_Y2_PIO_BASE = 6;   // data GPIO6*/

#define UART_ID uart0
#define BAUD_RATE 115200
#define DATA_BITS 8
#define STOP_BITS 1
#define PARITY UART_PARITY_NONE
#define UART_TX_PIN 0
#define UART_RX_PIN 1

// Globals, used for compatibility with Arduino-style sketches.
namespace {
tflite::ErrorReporter *   error_reporter = nullptr;
int8_t image[EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE] = {0};
float image_output[EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE] = {0};
ei_impulse_result_t result = { 0 };

static bool debug_nn = false;

}  // namespace

#ifndef DO_NOT_OUTPUT_TO_UART
// RX interrupt handler
void on_uart_rx() {
  char cameraCommand = 0;
  while (uart_is_readable(UART_ID)) {
    cameraCommand = uart_getc(UART_ID);
    // Can we send it back?
    if (uart_is_writable(UART_ID)) {
      uart_putc(UART_ID, cameraCommand);
    }
  }
}

void setup_uart() {
  // Set up our UART with the required speed.
  uint baud = uart_init(UART_ID, BAUD_RATE);
  // Set the TX and RX pins by using the function select on the GPIO
  // Set datasheet for more information on function select
  gpio_set_function(UART_TX_PIN, GPIO_FUNC_UART);
  gpio_set_function(UART_RX_PIN, GPIO_FUNC_UART);
  // Set our data format
  uart_set_format(UART_ID, DATA_BITS, STOP_BITS, PARITY);
  // Turn off FIFO's - we want to do this character by character
  uart_set_fifo_enabled(UART_ID, false);
  // Set up a RX interrupt
  // We need to set up the handler first
  // Select correct interrupt for the UART we are using
  int UART_IRQ = UART_ID == uart0 ? UART0_IRQ : UART1_IRQ;

  // And set up and enable the interrupt handlers
  irq_set_exclusive_handler(UART_IRQ, on_uart_rx);
  irq_set_enabled(UART_IRQ, true);

  // Now enable the UART to send interrupts - RX only
  uart_set_irq_enables(UART_ID, true, false);
}
#else
void setup_uart() {}
#endif


int raw_feature_get_data(size_t offset, size_t length, float *out_ptr)
{
    //size_t pixel_ix = offset * 2;
    size_t pixel_ix = offset; 
    size_t bytes_left = length;
    size_t out_ptr_ix = 0;

    // read byte for byte
    while (bytes_left != 0) {
        // grab the pixel value and convert to r/g/b
        uint16_t pixel   = ST7735_COLOR565(image[pixel_ix], image[pixel_ix], image[pixel_ix]);
         
        uint8_t r, g, b;
        r = ((pixel >> 11) & 0x1f) << 3;
        g = ((pixel >> 5) & 0x3f) << 2;
        b = (pixel & 0x1f) << 3;

        // then convert to out_ptr format
        float pixel_f = (r << 16) + (g << 8) + b;
        out_ptr[out_ptr_ix] = pixel_f;

        // and go to the next pixel
        out_ptr_ix++;
        pixel_ix++;
        bytes_left--;
    }

    // and done!
  //memcpy(out_ptr, features + offset, length * sizeof(float));
  return 0;
}

bool init_sensors() {
    //stdio_uart_init();
    static tflite::MicroErrorReporter micro_error_reporter;
  error_reporter = &micro_error_reporter;
  
  setup_uart();
  stdio_usb_init();
  
  TfLiteStatus setup_status = ScreenInit(error_reporter);
  if (setup_status != kTfLiteOk) {
    TF_LITE_REPORT_ERROR(error_reporter, "Screen Set up failed\n");
    ei_printf("Failed to init screen\n");
  }    
    ei_printf("Sensor init successful!\n");
    return true;

}

int main()
{
  setup_uart();
  stdio_usb_init();

  gpio_init(LED_PIN);
  gpio_set_dir(LED_PIN, GPIO_OUT);
  
  if (!init_sensors()) {
      while(1) {
      ei_printf("Sensor init error!\n");
      ei_sleep(1000);
      }
  }
  ei_impulse_result_t result = {nullptr};
  
  // Get image from camera.
  int kNumCols = 96;
  int kNumRows = 96;
  int kNumChannels = 1;
  
  while (1)
  {
    if (kTfLiteOk
        != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels, image)) {
      TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
      ei_printf("Failed to get image\n");
    }
    
    // Run the model on this input and make sure it succeeds.
    // put Edge Impulse model code here 
    // Turn the raw buffer in a signal which we can the classify
    // this is where we'll write all the output
    signal_t signal;
    signal.total_length = EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE;
    signal.get_data = &raw_feature_get_data;

// read through the signal buffered, like the classifier lib also does
  for (size_t ix = 0; ix < signal.total_length; ix += 1024) {
     size_t bytes_to_read = 1024;
     if (ix + bytes_to_read > signal.total_length) {
         bytes_to_read = signal.total_length - ix;
     }
     int r = signal.get_data(ix, bytes_to_read, image_output + ix);
  }
  
    // Run the classifier
    int err = run_classifier(&signal, &result, debug_nn);
    if (err != EI_IMPULSE_OK) {
       ei_printf("ERR: Failed to run classifier (%d)\n", err);
       return 0;
    }      
    
    // print the predictions
    ei_printf("Predictions ");
    for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) {
        ei_printf("    %s: %.5f\n", result.classification[ix].label, result.classification[ix].value);
    }
    
    // Process the inference results.
    int8_t no_oak_processionary_nest    = floor(result.classification[0].value * 100);
    int8_t oak_processionary_nest      = floor(result.classification[1].value * 100);
    uint8_t header[4] = { 0x55, 0xBB, (uint8_t)oak_processionary_nest, (uint8_t)no_oak_processionary_nest };
    uart_write_blocking(uart0, header, 4);
        
    #if SCREEN
        char array[10];
        sprintf(array, "%d%%", oak_processionary_nest);
        ST7735_FillRectangle(10, 120, ST7735_WIDTH, 60, ST7735_BLACK);
        //if the poison ivy score is > 60%, change text color to red as a warning
        //otherwise, text is green
        if(oak_processionary_nest > 60)                           
        {
          ST7735_WriteString(10, 120, array, Font_16x26, ST7735_RED, ST7735_BLACK);
        }
        else
        {
          ST7735_WriteString(10, 120, array, Font_16x26, ST7735_GREEN, ST7735_BLACK);
        }
    #endif
    TF_LITE_REPORT_ERROR(error_reporter, "**********");
  }
}

Hello @Pico4ML,

I see that you are collected monochrome pictures:

// Get image from camera.
  int kNumCols = 96;
  int kNumRows = 96;
  int kNumChannels = 1;
  
  while (1)
  {
    if (kTfLiteOk
        != GetImage(error_reporter, kNumCols, kNumRows, kNumChannels, image)) {
      TF_LITE_REPORT_ERROR(error_reporter, "Image capture failed.");
      ei_printf("Failed to get image\n");
    }

But the raw_features_get_data uses RGB565:

signal.get_data = &raw_feature_get_data;

Best,

Louis

Hey Louis,

thanks a lot!
Missed that information i guess! Now i can improve the model.
You made my day !

Kind Regards,
Denis

1 Like