14 #include <frontend_util.h> 16 #include <tensorflow/lite/core/c/common.h> 17 #include <tensorflow/lite/micro/micro_interpreter.h> 18 #include <tensorflow/lite/micro/micro_mutable_op_resolver.h> 21 namespace micro_wake_word {
33 static const uint8_t MIN_SLICES_BEFORE_DETECTION = 74;
37 void setup()
override;
53 void add_wake_word_model(
const uint8_t *model_start,
float probability_cutoff,
size_t sliding_window_average_size,
54 const std::string &wake_word,
size_t tensor_arena_size);
56 #ifdef USE_MICRO_WAKE_WORD_VAD 57 void add_vad_model(
const uint8_t *model_start,
float probability_cutoff,
size_t sliding_window_size,
58 size_t tensor_arena_size);
71 #ifdef USE_MICRO_WAKE_WORD_VAD 155 inline uint16_t
new_samples_to_get_() {
return (this->features_step_size_ * (AUDIO_SAMPLE_FREQUENCY / 1000)); }
160 void play(Ts...
x)
override { this->parent_->start(); }
165 void play(Ts...
x)
override { this->parent_->stop(); }
170 bool check(Ts...
x)
override {
return this->parent_->is_running(); }
176 #endif // USE_ESP_IDF
void add_wake_word_model(const uint8_t *model_start, float probability_cutoff, size_t sliding_window_average_size, const std::string &wake_word, size_t tensor_arena_size)
void set_features_step_size(uint8_t step_size)
int16_t * preprocessor_audio_buffer_
void set_state_(State state)
Trigger< std::string > * wake_word_detected_trigger_
bool detect_wake_words_()
Checks every model's recent probabilities to determine if the wake word has been predicted.
std::unique_ptr< RingBuffer > ring_buffer_
Helper class to request loop() to be called as fast as possible.
float get_setup_priority() const override
HighFrequencyLoopRequester high_freq_
struct FrontendConfig frontend_config_
std::vector< WakeWordModel > wake_word_models_
void update_model_probabilities_()
Performs inference with each configured model.
void dump_config() override
bool check(Ts... x) override
bool register_streaming_ops_(tflite::MicroMutableOpResolver< 20 > &op_resolver)
Returns true if successfully registered the streaming model's TensorFlow operations.
uint8_t features_step_size_
microphone::Microphone * microphone_
Base class for all automation conditions.
bool allocate_buffers_()
Allocates memory for input_buffer_, preprocessor_audio_buffer_, and ring_buffer_. ...
bool has_enough_samples_()
Tests if there are enough samples in the ring buffer to generate new features.
tflite::MicroMutableOpResolver< 20 > streaming_op_resolver_
bool generate_features_for_window_(int8_t features[PREPROCESSOR_FEATURE_SIZE])
Generates features for a window of audio samples.
void deallocate_buffers_()
Frees memory allocated for input_buffer_ and preprocessor_audio_buffer_.
void play(Ts... x) override
uint16_t new_samples_to_get_()
struct FrontendState frontend_state_
void reset_states_()
Resets the ring buffer, ignore_windows_, and sliding window probabilities.
Trigger< std::string > * get_wake_word_detected_trigger() const
Implementation of SPI Controller mode.
void play(Ts... x) override
std::string detected_wake_word_
void add_vad_model(const uint8_t *model_start, float probability_cutoff, size_t sliding_window_size, size_t tensor_arena_size)
void set_microphone(microphone::Microphone *microphone)
bool load_models_()
Loads streaming models and prepares the feature generation frontend.
std::unique_ptr< VADModel > vad_model_
Helper class to easily give an object a parent of type T.
void unload_models_()
Deletes each model's TFLite interpreters and frees tensor arena memory.
size_t read_microphone_()
Reads audio from microphone into the ring buffer.