forked from xiaozhi/xiaozhi-esp32
Upgrade esp-sr to 2.0.2, improve performance
This commit is contained in:
@@ -6,7 +6,7 @@
|
||||
static const char* TAG = "AudioProcessor";
|
||||
|
||||
AudioProcessor::AudioProcessor()
|
||||
: afe_communication_data_(nullptr) {
|
||||
: afe_data_(nullptr) {
|
||||
event_group_ = xEventGroupCreate();
|
||||
}
|
||||
|
||||
@@ -15,39 +15,30 @@ void AudioProcessor::Initialize(int channels, bool reference) {
|
||||
reference_ = reference;
|
||||
int ref_num = reference_ ? 1 : 0;
|
||||
|
||||
afe_config_t afe_config = {
|
||||
.aec_init = false,
|
||||
.se_init = true,
|
||||
.vad_init = false,
|
||||
.wakenet_init = false,
|
||||
.voice_communication_init = true,
|
||||
.voice_communication_agc_init = true,
|
||||
.voice_communication_agc_gain = 10,
|
||||
.vad_mode = VAD_MODE_3,
|
||||
.wakenet_model_name = NULL,
|
||||
.wakenet_model_name_2 = NULL,
|
||||
.wakenet_mode = DET_MODE_90,
|
||||
.afe_mode = SR_MODE_HIGH_PERF,
|
||||
.afe_perferred_core = 1,
|
||||
.afe_perferred_priority = 1,
|
||||
.afe_ringbuf_size = 50,
|
||||
.memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM,
|
||||
.afe_linear_gain = 1.0,
|
||||
.agc_mode = AFE_MN_PEAK_AGC_MODE_2,
|
||||
.pcm_config = {
|
||||
.total_ch_num = channels_,
|
||||
.mic_num = channels_ - ref_num,
|
||||
.ref_num = ref_num,
|
||||
.sample_rate = 16000,
|
||||
},
|
||||
.debug_init = false,
|
||||
.debug_hook = {{ AFE_DEBUG_HOOK_MASE_TASK_IN, NULL }, { AFE_DEBUG_HOOK_FETCH_TASK_IN, NULL }},
|
||||
.afe_ns_mode = NS_MODE_SSP,
|
||||
.afe_ns_model_name = NULL,
|
||||
.fixed_first_channel = true,
|
||||
};
|
||||
std::string input_format;
|
||||
for (int i = 0; i < channels_ - ref_num; i++) {
|
||||
input_format.push_back('M');
|
||||
}
|
||||
for (int i = 0; i < ref_num; i++) {
|
||||
input_format.push_back('R');
|
||||
}
|
||||
|
||||
afe_communication_data_ = esp_afe_vc_v1.create_from_config(&afe_config);
|
||||
afe_config_t* afe_config = afe_config_init(input_format.c_str(), NULL, AFE_TYPE_VC, AFE_MODE_HIGH_PERF);
|
||||
afe_config->aec_init = false;
|
||||
afe_config->aec_mode = AEC_MODE_VOIP_HIGH_PERF;
|
||||
afe_config->ns_init = true;
|
||||
afe_config->vad_init = true;
|
||||
afe_config->vad_mode = VAD_MODE_0;
|
||||
afe_config->vad_min_noise_ms = 100;
|
||||
afe_config->afe_perferred_core = 1;
|
||||
afe_config->afe_perferred_priority = 1;
|
||||
afe_config->agc_init = true;
|
||||
afe_config->agc_mode = AFE_AGC_MODE_WEBRTC;
|
||||
afe_config->agc_compression_gain_db = 10;
|
||||
afe_config->memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM;
|
||||
|
||||
afe_iface_ = esp_afe_handle_from_config(afe_config);
|
||||
afe_data_ = afe_iface_->create_from_config(afe_config);
|
||||
|
||||
xTaskCreate([](void* arg) {
|
||||
auto this_ = (AudioProcessor*)arg;
|
||||
@@ -57,8 +48,8 @@ void AudioProcessor::Initialize(int channels, bool reference) {
|
||||
}
|
||||
|
||||
AudioProcessor::~AudioProcessor() {
|
||||
if (afe_communication_data_ != nullptr) {
|
||||
esp_afe_vc_v1.destroy(afe_communication_data_);
|
||||
if (afe_data_ != nullptr) {
|
||||
afe_iface_->destroy(afe_data_);
|
||||
}
|
||||
vEventGroupDelete(event_group_);
|
||||
}
|
||||
@@ -66,10 +57,10 @@ AudioProcessor::~AudioProcessor() {
|
||||
void AudioProcessor::Input(const std::vector<int16_t>& data) {
|
||||
input_buffer_.insert(input_buffer_.end(), data.begin(), data.end());
|
||||
|
||||
auto feed_size = esp_afe_vc_v1.get_feed_chunksize(afe_communication_data_) * channels_;
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_) * channels_;
|
||||
while (input_buffer_.size() >= feed_size) {
|
||||
auto chunk = input_buffer_.data();
|
||||
esp_afe_vc_v1.feed(afe_communication_data_, chunk);
|
||||
afe_iface_->feed(afe_data_, chunk);
|
||||
input_buffer_.erase(input_buffer_.begin(), input_buffer_.begin() + feed_size);
|
||||
}
|
||||
}
|
||||
@@ -80,6 +71,7 @@ void AudioProcessor::Start() {
|
||||
|
||||
void AudioProcessor::Stop() {
|
||||
xEventGroupClearBits(event_group_, PROCESSOR_RUNNING);
|
||||
afe_iface_->reset_buffer(afe_data_);
|
||||
}
|
||||
|
||||
bool AudioProcessor::IsRunning() {
|
||||
@@ -90,16 +82,20 @@ void AudioProcessor::OnOutput(std::function<void(std::vector<int16_t>&& data)> c
|
||||
output_callback_ = callback;
|
||||
}
|
||||
|
||||
void AudioProcessor::OnVadStateChange(std::function<void(bool speaking)> callback) {
|
||||
vad_state_change_callback_ = callback;
|
||||
}
|
||||
|
||||
void AudioProcessor::AudioProcessorTask() {
|
||||
auto fetch_size = esp_afe_sr_v1.get_fetch_chunksize(afe_communication_data_);
|
||||
auto feed_size = esp_afe_sr_v1.get_feed_chunksize(afe_communication_data_);
|
||||
auto fetch_size = afe_iface_->get_fetch_chunksize(afe_data_);
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_);
|
||||
ESP_LOGI(TAG, "Audio communication task started, feed size: %d fetch size: %d",
|
||||
feed_size, fetch_size);
|
||||
|
||||
while (true) {
|
||||
xEventGroupWaitBits(event_group_, PROCESSOR_RUNNING, pdFALSE, pdTRUE, portMAX_DELAY);
|
||||
|
||||
auto res = esp_afe_vc_v1.fetch(afe_communication_data_);
|
||||
auto res = afe_iface_->fetch_with_delay(afe_data_, portMAX_DELAY);
|
||||
if ((xEventGroupGetBits(event_group_) & PROCESSOR_RUNNING) == 0) {
|
||||
continue;
|
||||
}
|
||||
@@ -110,6 +106,17 @@ void AudioProcessor::AudioProcessorTask() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// VAD state change
|
||||
if (vad_state_change_callback_) {
|
||||
if (res->vad_state == VAD_SPEECH && !is_speaking_) {
|
||||
is_speaking_ = true;
|
||||
vad_state_change_callback_(true);
|
||||
} else if (res->vad_state == VAD_SILENCE && is_speaking_) {
|
||||
is_speaking_ = false;
|
||||
vad_state_change_callback_(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (output_callback_) {
|
||||
output_callback_(std::vector<int16_t>(res->data, res->data + res->data_size / sizeof(int16_t)));
|
||||
}
|
||||
|
||||
@@ -21,14 +21,18 @@ public:
|
||||
void Stop();
|
||||
bool IsRunning();
|
||||
void OnOutput(std::function<void(std::vector<int16_t>&& data)> callback);
|
||||
void OnVadStateChange(std::function<void(bool speaking)> callback);
|
||||
|
||||
private:
|
||||
EventGroupHandle_t event_group_ = nullptr;
|
||||
esp_afe_sr_data_t* afe_communication_data_ = nullptr;
|
||||
esp_afe_sr_iface_t* afe_iface_ = nullptr;
|
||||
esp_afe_sr_data_t* afe_data_ = nullptr;
|
||||
std::vector<int16_t> input_buffer_;
|
||||
std::function<void(std::vector<int16_t>&& data)> output_callback_;
|
||||
std::function<void(bool speaking)> vad_state_change_callback_;
|
||||
int channels_;
|
||||
bool reference_;
|
||||
bool is_speaking_ = false;
|
||||
|
||||
void AudioProcessorTask();
|
||||
};
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
static const char* TAG = "WakeWordDetect";
|
||||
|
||||
WakeWordDetect::WakeWordDetect()
|
||||
: afe_detection_data_(nullptr),
|
||||
: afe_data_(nullptr),
|
||||
wake_word_pcm_(),
|
||||
wake_word_opus_() {
|
||||
|
||||
@@ -19,8 +19,8 @@ WakeWordDetect::WakeWordDetect()
|
||||
}
|
||||
|
||||
WakeWordDetect::~WakeWordDetect() {
|
||||
if (afe_detection_data_ != nullptr) {
|
||||
esp_afe_sr_v1.destroy(afe_detection_data_);
|
||||
if (afe_data_ != nullptr) {
|
||||
afe_iface_->destroy(afe_data_);
|
||||
}
|
||||
|
||||
if (wake_word_encode_task_stack_ != nullptr) {
|
||||
@@ -50,39 +50,22 @@ void WakeWordDetect::Initialize(int channels, bool reference) {
|
||||
}
|
||||
}
|
||||
|
||||
afe_config_t afe_config = {
|
||||
.aec_init = reference_,
|
||||
.se_init = true,
|
||||
.vad_init = true,
|
||||
.wakenet_init = true,
|
||||
.voice_communication_init = false,
|
||||
.voice_communication_agc_init = false,
|
||||
.voice_communication_agc_gain = 10,
|
||||
.vad_mode = VAD_MODE_3,
|
||||
.wakenet_model_name = wakenet_model_,
|
||||
.wakenet_model_name_2 = NULL,
|
||||
.wakenet_mode = DET_MODE_90,
|
||||
.afe_mode = SR_MODE_HIGH_PERF,
|
||||
.afe_perferred_core = 1,
|
||||
.afe_perferred_priority = 1,
|
||||
.afe_ringbuf_size = 50,
|
||||
.memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM,
|
||||
.afe_linear_gain = 1.0,
|
||||
.agc_mode = AFE_MN_PEAK_AGC_MODE_2,
|
||||
.pcm_config = {
|
||||
.total_ch_num = channels_,
|
||||
.mic_num = channels_ - ref_num,
|
||||
.ref_num = ref_num,
|
||||
.sample_rate = 16000
|
||||
},
|
||||
.debug_init = false,
|
||||
.debug_hook = {{ AFE_DEBUG_HOOK_MASE_TASK_IN, NULL }, { AFE_DEBUG_HOOK_FETCH_TASK_IN, NULL }},
|
||||
.afe_ns_mode = NS_MODE_SSP,
|
||||
.afe_ns_model_name = NULL,
|
||||
.fixed_first_channel = true,
|
||||
};
|
||||
|
||||
afe_detection_data_ = esp_afe_sr_v1.create_from_config(&afe_config);
|
||||
std::string input_format;
|
||||
for (int i = 0; i < channels_ - ref_num; i++) {
|
||||
input_format.push_back('M');
|
||||
}
|
||||
for (int i = 0; i < ref_num; i++) {
|
||||
input_format.push_back('R');
|
||||
}
|
||||
afe_config_t* afe_config = afe_config_init(input_format.c_str(), models, AFE_TYPE_SR, AFE_MODE_HIGH_PERF);
|
||||
afe_config->aec_init = reference_;
|
||||
afe_config->aec_mode = AEC_MODE_SR_HIGH_PERF;
|
||||
afe_config->afe_perferred_core = 1;
|
||||
afe_config->afe_perferred_priority = 1;
|
||||
afe_config->memory_alloc_mode = AFE_MEMORY_ALLOC_MORE_PSRAM;
|
||||
|
||||
afe_iface_ = esp_afe_handle_from_config(afe_config);
|
||||
afe_data_ = afe_iface_->create_from_config(afe_config);
|
||||
|
||||
xTaskCreate([](void* arg) {
|
||||
auto this_ = (WakeWordDetect*)arg;
|
||||
@@ -95,16 +78,13 @@ void WakeWordDetect::OnWakeWordDetected(std::function<void(const std::string& wa
|
||||
wake_word_detected_callback_ = callback;
|
||||
}
|
||||
|
||||
void WakeWordDetect::OnVadStateChange(std::function<void(bool speaking)> callback) {
|
||||
vad_state_change_callback_ = callback;
|
||||
}
|
||||
|
||||
void WakeWordDetect::StartDetection() {
|
||||
xEventGroupSetBits(event_group_, DETECTION_RUNNING_EVENT);
|
||||
}
|
||||
|
||||
void WakeWordDetect::StopDetection() {
|
||||
xEventGroupClearBits(event_group_, DETECTION_RUNNING_EVENT);
|
||||
afe_iface_->reset_buffer(afe_data_);
|
||||
}
|
||||
|
||||
bool WakeWordDetect::IsDetectionRunning() {
|
||||
@@ -114,23 +94,23 @@ bool WakeWordDetect::IsDetectionRunning() {
|
||||
void WakeWordDetect::Feed(const std::vector<int16_t>& data) {
|
||||
input_buffer_.insert(input_buffer_.end(), data.begin(), data.end());
|
||||
|
||||
auto feed_size = esp_afe_sr_v1.get_feed_chunksize(afe_detection_data_) * channels_;
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_) * channels_;
|
||||
while (input_buffer_.size() >= feed_size) {
|
||||
esp_afe_sr_v1.feed(afe_detection_data_, input_buffer_.data());
|
||||
afe_iface_->feed(afe_data_, input_buffer_.data());
|
||||
input_buffer_.erase(input_buffer_.begin(), input_buffer_.begin() + feed_size);
|
||||
}
|
||||
}
|
||||
|
||||
void WakeWordDetect::AudioDetectionTask() {
|
||||
auto fetch_size = esp_afe_sr_v1.get_fetch_chunksize(afe_detection_data_);
|
||||
auto feed_size = esp_afe_sr_v1.get_feed_chunksize(afe_detection_data_);
|
||||
auto fetch_size = afe_iface_->get_fetch_chunksize(afe_data_);
|
||||
auto feed_size = afe_iface_->get_feed_chunksize(afe_data_);
|
||||
ESP_LOGI(TAG, "Audio detection task started, feed size: %d fetch size: %d",
|
||||
feed_size, fetch_size);
|
||||
|
||||
while (true) {
|
||||
xEventGroupWaitBits(event_group_, DETECTION_RUNNING_EVENT, pdFALSE, pdTRUE, portMAX_DELAY);
|
||||
|
||||
auto res = esp_afe_sr_v1.fetch(afe_detection_data_);
|
||||
auto res = afe_iface_->fetch_with_delay(afe_data_, portMAX_DELAY);
|
||||
if (res == nullptr || res->ret_value == ESP_FAIL) {
|
||||
continue;;
|
||||
}
|
||||
@@ -138,17 +118,6 @@ void WakeWordDetect::AudioDetectionTask() {
|
||||
// Store the wake word data for voice recognition, like who is speaking
|
||||
StoreWakeWordData((uint16_t*)res->data, res->data_size / sizeof(uint16_t));
|
||||
|
||||
// VAD state change
|
||||
if (vad_state_change_callback_) {
|
||||
if (res->vad_state == AFE_VAD_SPEECH && !is_speaking_) {
|
||||
is_speaking_ = true;
|
||||
vad_state_change_callback_(true);
|
||||
} else if (res->vad_state == AFE_VAD_SILENCE && is_speaking_) {
|
||||
is_speaking_ = false;
|
||||
vad_state_change_callback_(false);
|
||||
}
|
||||
}
|
||||
|
||||
if (res->wakeup_state == WAKENET_DETECTED) {
|
||||
StopDetection();
|
||||
last_detected_wake_word_ = wake_words_[res->wake_word_index - 1];
|
||||
|
||||
@@ -24,7 +24,6 @@ public:
|
||||
void Initialize(int channels, bool reference);
|
||||
void Feed(const std::vector<int16_t>& data);
|
||||
void OnWakeWordDetected(std::function<void(const std::string& wake_word)> callback);
|
||||
void OnVadStateChange(std::function<void(bool speaking)> callback);
|
||||
void StartDetection();
|
||||
void StopDetection();
|
||||
bool IsDetectionRunning();
|
||||
@@ -33,14 +32,13 @@ public:
|
||||
const std::string& GetLastDetectedWakeWord() const { return last_detected_wake_word_; }
|
||||
|
||||
private:
|
||||
esp_afe_sr_data_t* afe_detection_data_ = nullptr;
|
||||
esp_afe_sr_iface_t* afe_iface_ = nullptr;
|
||||
esp_afe_sr_data_t* afe_data_ = nullptr;
|
||||
char* wakenet_model_ = NULL;
|
||||
std::vector<std::string> wake_words_;
|
||||
std::vector<int16_t> input_buffer_;
|
||||
EventGroupHandle_t event_group_;
|
||||
std::function<void(const std::string& wake_word)> wake_word_detected_callback_;
|
||||
std::function<void(bool speaking)> vad_state_change_callback_;
|
||||
bool is_speaking_ = false;
|
||||
int channels_;
|
||||
bool reference_;
|
||||
std::string last_detected_wake_word_;
|
||||
|
||||
Reference in New Issue
Block a user