7 #include <tensorflow/lite/core/c/common.h> 8 #include <tensorflow/lite/micro/micro_interpreter.h> 9 #include <tensorflow/lite/micro/micro_mutable_op_resolver.h> 12 namespace micro_wake_word {
14 static const uint32_t STREAMING_MODEL_VARIABLE_ARENA_SIZE = 1024;
29 bool load_model(tflite::MicroMutableOpResolver<20> &op_resolver);
47 tflite::MicroResourceVariables *
mrv_{
nullptr};
48 tflite::MicroAllocator *
ma_{
nullptr};
53 WakeWordModel(
const uint8_t *model_start,
float probability_cutoff,
size_t sliding_window_average_size,
54 const std::string &wake_word,
size_t tensor_arena_size);
71 VADModel(
const uint8_t *model_start,
float probability_cutoff,
size_t sliding_window_size,
size_t tensor_arena_size);
const std::string & get_wake_word() const
tflite::MicroResourceVariables * mrv_
void unload_model()
Destroys the TFLite interpreter and frees the tensor and variable arenas' memory. ...
std::vector< uint8_t > recent_streaming_probabilities_
uint8_t current_stride_step_
float probability_cutoff_
bool load_model(tflite::MicroMutableOpResolver< 20 > &op_resolver)
Allocates tensor and variable arenas and sets up the model interpreter.
size_t tensor_arena_size_
void reset_probabilities()
Sets all recent_streaming_probabilities to 0.
std::unique_ptr< tflite::MicroInterpreter > interpreter_
tflite::MicroAllocator * ma_
const uint8_t * model_start_
bool perform_streaming_inference(const int8_t features[PREPROCESSOR_FEATURE_SIZE])
virtual bool determine_detected()=0
Implementation of SPI Controller mode.
size_t sliding_window_size_
virtual void log_model_config()=0