NeuralAmpModelerPlugin

Plugin for Neural Amp Modeler
Log | Files | Refs | Submodules | README | LICENSE

commit 70f9253e27d4ec8cfb02f269cc58e815e310dd22
parent 34729a1c0d5af5e72992ea3199fa626f5634d81c
Author: Steven Atkinson <steven@atkinson.mn>
Date:   Fri, 16 Dec 2022 20:25:28 -0800

Implement anti-pop for WaveNet (#13)


Diffstat:
MNeuralAmpModeler/dsp/wavenet.cpp | 35++++++++++++++++++++++++++++++++++-
MNeuralAmpModeler/dsp/wavenet.h | 11+++++++++++
2 files changed, 45 insertions(+), 1 deletion(-)

diff --git a/NeuralAmpModeler/dsp/wavenet.cpp b/NeuralAmpModeler/dsp/wavenet.cpp @@ -95,6 +95,14 @@ void wavenet::_LayerArray::advance_buffers_(const int num_frames) this->_buffer_start += num_frames; } +long wavenet::_LayerArray::get_receptive_field() const +{ + long result = 0; + for (int i = 0; i < this->_layers.size(); i++) + result += this->_layers[i].get_dilation() * (this->_layers[i].get_kernel_size() - 1); + return result; +} + void wavenet::_LayerArray::prepare_for_frames_(const int num_frames) { if (this->_buffer_start + num_frames > this->_get_buffer_size()) @@ -290,6 +298,7 @@ wavenet::WaveNet::WaveNet( } this->_head_output.resize(1, 0); // Mono output! this->set_params_(params); + this->_reset_anti_pop_(); } void wavenet::WaveNet::finalize_(const int num_frames) @@ -377,7 +386,7 @@ void wavenet::WaveNet::_process_core_() for (int s = 0; s < num_frames; s++) this->_core_dsp_output[s] = this->_head_scale * this->_head_arrays[final_head_array](0, s); // Apply anti-pop - // this->_anti_pop_(); + this->_anti_pop_(); } void wavenet::WaveNet::_set_num_frames_(const int num_frames) @@ -397,3 +406,27 @@ void wavenet::WaveNet::_set_num_frames_(const int num_frames) //this->_head.set_num_frames_(num_frames); this->_num_frames = num_frames; } + +void wavenet::WaveNet::_anti_pop_() +{ + if (this->_anti_pop_countdown >= this->_anti_pop_ramp) + return; + const float slope = 1.0f / float(this->_anti_pop_ramp); + for (int i = 0; i < this->_core_dsp_output.size(); i++) + { + if (this->_anti_pop_countdown >= this->_anti_pop_ramp) + break; + const float gain = std::max(slope * float(this->_anti_pop_countdown), 0.0f); + this->_core_dsp_output[i] *= gain; + this->_anti_pop_countdown++; + } +} + +void wavenet::WaveNet::_reset_anti_pop_() +{ + // You need the "real" receptive field, not the buffers. + long receptive_field = 1; + for (int i = 0; i < this->_layer_arrays.size(); i++) + receptive_field += this->_layer_arrays[i].get_receptive_field(); + this->_anti_pop_countdown = -receptive_field; +} diff --git a/NeuralAmpModeler/dsp/wavenet.h b/NeuralAmpModeler/dsp/wavenet.h @@ -140,6 +140,8 @@ namespace wavenet { void set_num_frames_(const int num_frames); void set_params_(std::vector<float>::iterator& it); + long get_receptive_field() const; + private: long _buffer_start; // The rechannel before the layers @@ -240,5 +242,14 @@ namespace wavenet { // Ensure that all buffer arrays are the right size for this num_frames void _set_num_frames_(const int num_frames); + + // The net starts with random parameters inside; we need to wait for a full + // receptive field to pass through before we can count on the output being + // ok. This implements a gentle "ramp-up" so that there's no "pop" at the + // start. + long _anti_pop_countdown; + const long _anti_pop_ramp = 4000; + void _anti_pop_(); + void _reset_anti_pop_(); }; }; // namespace wavenet