diff --git a/core/math/audio_frame.h b/core/math/audio_frame.h index 5773da9211c..a5616b8d798 100644 --- a/core/math/audio_frame.h +++ b/core/math/audio_frame.h @@ -47,6 +47,9 @@ static inline float undenormalise(volatile float f) { return (v.i & 0x7f800000) < 0x08000000 ? 0.0f : f; } +static const float AUDIO_PEAK_OFFSET = 0.0000000001f; +static const float AUDIO_MIN_PEAK_DB = -200.0f; // linear2db(AUDIO_PEAK_OFFSET) + struct AudioFrame { //left and right samples float l, r; diff --git a/servers/audio_server.cpp b/servers/audio_server.cpp index d4f7876b4b2..16c6a26595e 100644 --- a/servers/audio_server.cpp +++ b/servers/audio_server.cpp @@ -401,6 +401,7 @@ void AudioServer::_mix_step() { for (int k = 0; k < bus->channels.size(); k++) { if (!bus->channels[k].active) { + bus->channels.write[k].peak_volume = AudioFrame(AUDIO_MIN_PEAK_DB, AUDIO_MIN_PEAK_DB); continue; } @@ -434,7 +435,7 @@ void AudioServer::_mix_step() { } } - bus->channels.write[k].peak_volume = AudioFrame(Math::linear2db(peak.l + 0.0000000001), Math::linear2db(peak.r + 0.0000000001)); + bus->channels.write[k].peak_volume = AudioFrame(Math::linear2db(peak.l + AUDIO_PEAK_OFFSET), Math::linear2db(peak.r + AUDIO_PEAK_OFFSET)); if (!bus->channels[k].used) { //see if any audio is contained, because channel was not used diff --git a/servers/audio_server.h b/servers/audio_server.h index 51fbc598515..a1a373e1ca0 100644 --- a/servers/audio_server.h +++ b/servers/audio_server.h @@ -199,7 +199,7 @@ private: last_mix_with_audio = 0; used = false; active = false; - peak_volume = AudioFrame(0, 0); + peak_volume = AudioFrame(AUDIO_MIN_PEAK_DB, AUDIO_MIN_PEAK_DB); } };