Hello everyone,
I am using a Sony Spresense and am having trouble deploying my model onto the board. When I use live classification, with the board camera, I am getting proper results. However, when I bundle it as an Arduino library and try to deploy, there is no output on the bounding box. I am basically trying to use FOMO to classify a rectangle and a square.
Model-Used: FOMO
Target: Sony Spresense
Environment: Arduino
Link: Symbol4 - Dashboard - Edge Impulse
This is the output (on loop):
Timing: DSP 10 ms, inference 1167 ms, anomaly 0 ms
Object detection bounding boxes:
Edge Impulse standalone inferencing (Arduino)
run_classifier returned: 0
Timing: DSP 10 ms, inference 1167 ms, anomaly 0 ms
Object detection bounding boxes:
Edge Impulse standalone inferencing (Arduino)
run_classifier returned: 0
Should I retrain my model or modify anything?
I tried all this:
- Changed the printf statement content to an integer, from a float.
- Added a
print("continue")
statement to the continue section of theif(bb.value==0)
statement. This printedcontinue
multiple times, indicating that thebb.value
is 0.
The model works perfectly on webcam and Spresense when using live classification.
This is the Arduino code:
/* Edge Impulse ingestion SDK
* Copyright (c) 2022 EdgeImpulse Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Includes ---------------------------------------------------------------- */
#include <Symbol4_inferencing.h>
static const float features[] = {
//Add image feature here
};
/**
* @brief Copy raw feature data in out_ptr
* Function called by inference library
*
* @param[in] offset The offset
* @param[in] length The length
* @param out_ptr The out pointer
*
* @return 0
*/
int raw_feature_get_data(size_t offset, size_t length, float *out_ptr) {
memcpy(out_ptr, features + offset, length * sizeof(float));
return 0;
}
void print_inference_result(ei_impulse_result_t result);
/**
* @brief Arduino setup function
*/
void setup()
{
// put your setup code here, to run once:
Serial.begin(115200);
// comment out the below line to cancel the wait for USB connection (needed for native USB)
while (!Serial);
Serial.println("Edge Impulse Inferencing Demo");
}
/**
* @brief Arduino main function
*/
void loop()
{
ei_printf("Edge Impulse standalone inferencing (Arduino)\n");
if (sizeof(features) / sizeof(float) != EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE) {
ei_printf("The size of your 'features' array is not correct. Expected %lu items, but had %lu\n",
EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, sizeof(features) / sizeof(float));
delay(1000);
return;
}
ei_impulse_result_t result = { 0 };
// the features are stored into flash, and we don't want to load everything into RAM
signal_t features_signal;
features_signal.total_length = sizeof(features) / sizeof(features[0]);
features_signal.get_data = &raw_feature_get_data;
// invoke the impulse
EI_IMPULSE_ERROR res = run_classifier(&features_signal, &result, false /* debug */);
if (res != EI_IMPULSE_OK) {
ei_printf("ERR: Failed to run classifier (%d)\n", res);
return;
}
// print inference return code
ei_printf("run_classifier returned: %d\r\n", res);
print_inference_result(result);
delay(1000);
}
void print_inference_result(ei_impulse_result_t result) {
// Print how long it took to perform inference
ei_printf("Timing: DSP %d ms, inference %d ms, anomaly %d ms\r\n",
result.timing.dsp,
result.timing.classification,
result.timing.anomaly);
// Print the prediction results (object detection)
#if EI_CLASSIFIER_OBJECT_DETECTION == 1
ei_printf("Object detection bounding boxes:\r\n");
for (uint32_t i = 0; i < result.bounding_boxes_count; i++) {
ei_impulse_result_bounding_box_t bb = result.bounding_boxes[i];
if (bb.value == 0) {
/// continue;
}
ei_printf("Printing this...");
ei_printf(" %s (%d) [ x: %u, y: %u, width: %u, height: %u ]\r\n",
bb.label,
static_cast<int>(bb.value),
bb.x,
bb.y,
bb.width,
bb.height);
}
// Print the prediction results (classification)
#else
ei_printf("Predictions:\r\n");
for (uint16_t i = 0; i < EI_CLASSIFIER_LABEL_COUNT; i++) {
ei_printf(" %s: ", ei_classifier_inferencing_categories[i]);
ei_printf("%.5f\r\n", result.classification[i].value);
}
#endif
// Print anomaly result (if it exists)
#if EI_CLASSIFIER_HAS_ANOMALY
ei_printf("Anomaly prediction: %.3f\r\n", result.anomaly);
#endif
#if EI_CLASSIFIER_HAS_VISUAL_ANOMALY
ei_printf("Visual anomalies:\r\n");
for (uint32_t i = 0; i < result.visual_ad_count; i++) {
ei_impulse_result_bounding_box_t bb = result.visual_ad_grid_cells[i];
if (bb.value == 0) {
continue;
}
ei_printf(" %s (%f) [ x: %u, y: %u, width: %u, height: %u ]\r\n",
bb.label,
bb.value,
bb.x,
bb.y,
bb.width,
bb.height);
}
#endif
}
(I have removed the image features to shorten the length of the code)
Kindly help me out.