Fix member names of `AudioFrame` to match extension
This commit is contained in:
parent
dfe226b933
commit
d8b29efe66
|
@ -51,105 +51,123 @@ static const float AUDIO_PEAK_OFFSET = 0.0000000001f;
|
||||||
static const float AUDIO_MIN_PEAK_DB = -200.0f; // linear_to_db(AUDIO_PEAK_OFFSET)
|
static const float AUDIO_MIN_PEAK_DB = -200.0f; // linear_to_db(AUDIO_PEAK_OFFSET)
|
||||||
|
|
||||||
struct AudioFrame {
|
struct AudioFrame {
|
||||||
//left and right samples
|
// Left and right samples.
|
||||||
float l = 0.f, r = 0.f;
|
union {
|
||||||
|
struct {
|
||||||
|
float left;
|
||||||
|
float right;
|
||||||
|
};
|
||||||
|
#ifndef DISABLE_DEPRECATED
|
||||||
|
struct {
|
||||||
|
float l;
|
||||||
|
float r;
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
float levels[2] = { 0.0 };
|
||||||
|
};
|
||||||
|
|
||||||
_ALWAYS_INLINE_ const float &operator[](int idx) const { return idx == 0 ? l : r; }
|
_ALWAYS_INLINE_ const float &operator[](int p_idx) const {
|
||||||
_ALWAYS_INLINE_ float &operator[](int idx) { return idx == 0 ? l : r; }
|
DEV_ASSERT((unsigned int)p_idx < 2);
|
||||||
|
return levels[p_idx];
|
||||||
|
}
|
||||||
|
_ALWAYS_INLINE_ float &operator[](int p_idx) {
|
||||||
|
DEV_ASSERT((unsigned int)p_idx < 2);
|
||||||
|
return levels[p_idx];
|
||||||
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame operator+(const AudioFrame &p_frame) const { return AudioFrame(l + p_frame.l, r + p_frame.r); }
|
_ALWAYS_INLINE_ AudioFrame operator+(const AudioFrame &p_frame) const { return AudioFrame(left + p_frame.left, right + p_frame.right); }
|
||||||
_ALWAYS_INLINE_ AudioFrame operator-(const AudioFrame &p_frame) const { return AudioFrame(l - p_frame.l, r - p_frame.r); }
|
_ALWAYS_INLINE_ AudioFrame operator-(const AudioFrame &p_frame) const { return AudioFrame(left - p_frame.left, right - p_frame.right); }
|
||||||
_ALWAYS_INLINE_ AudioFrame operator*(const AudioFrame &p_frame) const { return AudioFrame(l * p_frame.l, r * p_frame.r); }
|
_ALWAYS_INLINE_ AudioFrame operator*(const AudioFrame &p_frame) const { return AudioFrame(left * p_frame.left, right * p_frame.right); }
|
||||||
_ALWAYS_INLINE_ AudioFrame operator/(const AudioFrame &p_frame) const { return AudioFrame(l / p_frame.l, r / p_frame.r); }
|
_ALWAYS_INLINE_ AudioFrame operator/(const AudioFrame &p_frame) const { return AudioFrame(left / p_frame.left, right / p_frame.right); }
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame operator+(float p_sample) const { return AudioFrame(l + p_sample, r + p_sample); }
|
_ALWAYS_INLINE_ AudioFrame operator+(float p_sample) const { return AudioFrame(left + p_sample, right + p_sample); }
|
||||||
_ALWAYS_INLINE_ AudioFrame operator-(float p_sample) const { return AudioFrame(l - p_sample, r - p_sample); }
|
_ALWAYS_INLINE_ AudioFrame operator-(float p_sample) const { return AudioFrame(left - p_sample, right - p_sample); }
|
||||||
_ALWAYS_INLINE_ AudioFrame operator*(float p_sample) const { return AudioFrame(l * p_sample, r * p_sample); }
|
_ALWAYS_INLINE_ AudioFrame operator*(float p_sample) const { return AudioFrame(left * p_sample, right * p_sample); }
|
||||||
_ALWAYS_INLINE_ AudioFrame operator/(float p_sample) const { return AudioFrame(l / p_sample, r / p_sample); }
|
_ALWAYS_INLINE_ AudioFrame operator/(float p_sample) const { return AudioFrame(left / p_sample, right / p_sample); }
|
||||||
|
|
||||||
_ALWAYS_INLINE_ void operator+=(const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ void operator+=(const AudioFrame &p_frame) {
|
||||||
l += p_frame.l;
|
left += p_frame.left;
|
||||||
r += p_frame.r;
|
right += p_frame.right;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ void operator-=(const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ void operator-=(const AudioFrame &p_frame) {
|
||||||
l -= p_frame.l;
|
left -= p_frame.left;
|
||||||
r -= p_frame.r;
|
right -= p_frame.right;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ void operator*=(const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ void operator*=(const AudioFrame &p_frame) {
|
||||||
l *= p_frame.l;
|
left *= p_frame.left;
|
||||||
r *= p_frame.r;
|
right *= p_frame.right;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ void operator/=(const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ void operator/=(const AudioFrame &p_frame) {
|
||||||
l /= p_frame.l;
|
left /= p_frame.left;
|
||||||
r /= p_frame.r;
|
right /= p_frame.right;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ void operator+=(float p_sample) {
|
_ALWAYS_INLINE_ void operator+=(float p_sample) {
|
||||||
l += p_sample;
|
left += p_sample;
|
||||||
r += p_sample;
|
right += p_sample;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ void operator-=(float p_sample) {
|
_ALWAYS_INLINE_ void operator-=(float p_sample) {
|
||||||
l -= p_sample;
|
left -= p_sample;
|
||||||
r -= p_sample;
|
right -= p_sample;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ void operator*=(float p_sample) {
|
_ALWAYS_INLINE_ void operator*=(float p_sample) {
|
||||||
l *= p_sample;
|
left *= p_sample;
|
||||||
r *= p_sample;
|
right *= p_sample;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ void operator/=(float p_sample) {
|
_ALWAYS_INLINE_ void operator/=(float p_sample) {
|
||||||
l /= p_sample;
|
left /= p_sample;
|
||||||
r /= p_sample;
|
right /= p_sample;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ void undenormalize() {
|
_ALWAYS_INLINE_ void undenormalize() {
|
||||||
l = ::undenormalize(l);
|
left = ::undenormalize(left);
|
||||||
r = ::undenormalize(r);
|
right = ::undenormalize(right);
|
||||||
}
|
}
|
||||||
|
|
||||||
_FORCE_INLINE_ AudioFrame lerp(const AudioFrame &p_b, float p_t) const {
|
_FORCE_INLINE_ AudioFrame lerp(const AudioFrame &p_b, float p_t) const {
|
||||||
AudioFrame res = *this;
|
AudioFrame res = *this;
|
||||||
|
|
||||||
res.l += (p_t * (p_b.l - l));
|
res.left += (p_t * (p_b.left - left));
|
||||||
res.r += (p_t * (p_b.r - r));
|
res.right += (p_t * (p_b.right - right));
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame(float p_l, float p_r) {
|
_ALWAYS_INLINE_ AudioFrame(float p_left, float p_right) {
|
||||||
l = p_l;
|
left = p_left;
|
||||||
r = p_r;
|
right = p_right;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ AudioFrame(const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ AudioFrame(const AudioFrame &p_frame) {
|
||||||
l = p_frame.l;
|
left = p_frame.left;
|
||||||
r = p_frame.r;
|
right = p_frame.right;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ void operator=(const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ void operator=(const AudioFrame &p_frame) {
|
||||||
l = p_frame.l;
|
left = p_frame.left;
|
||||||
r = p_frame.r;
|
right = p_frame.right;
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ operator Vector2() const {
|
_ALWAYS_INLINE_ operator Vector2() const {
|
||||||
return Vector2(l, r);
|
return Vector2(left, right);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame(const Vector2 &p_v2) {
|
_ALWAYS_INLINE_ AudioFrame(const Vector2 &p_v2) {
|
||||||
l = p_v2.x;
|
left = p_v2.x;
|
||||||
r = p_v2.y;
|
right = p_v2.y;
|
||||||
}
|
}
|
||||||
_ALWAYS_INLINE_ AudioFrame() {}
|
_ALWAYS_INLINE_ AudioFrame() {}
|
||||||
};
|
};
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame operator*(float p_scalar, const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ AudioFrame operator*(float p_scalar, const AudioFrame &p_frame) {
|
||||||
return AudioFrame(p_frame.l * p_scalar, p_frame.r * p_scalar);
|
return AudioFrame(p_frame.left * p_scalar, p_frame.right * p_scalar);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame operator*(int32_t p_scalar, const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ AudioFrame operator*(int32_t p_scalar, const AudioFrame &p_frame) {
|
||||||
return AudioFrame(p_frame.l * p_scalar, p_frame.r * p_scalar);
|
return AudioFrame(p_frame.left * p_scalar, p_frame.right * p_scalar);
|
||||||
}
|
}
|
||||||
|
|
||||||
_ALWAYS_INLINE_ AudioFrame operator*(int64_t p_scalar, const AudioFrame &p_frame) {
|
_ALWAYS_INLINE_ AudioFrame operator*(int64_t p_scalar, const AudioFrame &p_frame) {
|
||||||
return AudioFrame(p_frame.l * p_scalar, p_frame.r * p_scalar);
|
return AudioFrame(p_frame.left * p_scalar, p_frame.right * p_scalar);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // AUDIO_FRAME_H
|
#endif // AUDIO_FRAME_H
|
||||||
|
|
|
@ -143,11 +143,11 @@ void AudioStreamPreviewGenerator::_preview_thread(void *p_preview) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int j = from; j < to; j++) {
|
for (int j = from; j < to; j++) {
|
||||||
max = MAX(max, mix_chunk[j].l);
|
max = MAX(max, mix_chunk[j].left);
|
||||||
max = MAX(max, mix_chunk[j].r);
|
max = MAX(max, mix_chunk[j].right);
|
||||||
|
|
||||||
min = MIN(min, mix_chunk[j].l);
|
min = MIN(min, mix_chunk[j].left);
|
||||||
min = MIN(min, mix_chunk[j].r);
|
min = MIN(min, mix_chunk[j].right);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t pfrom = CLAMP((min * 0.5 + 0.5) * 255, 0, 255);
|
uint8_t pfrom = CLAMP((min * 0.5 + 0.5) * 255, 0, 255);
|
||||||
|
|
|
@ -666,11 +666,11 @@ Ref<Texture2D> EditorAudioStreamPreviewPlugin::generate(const Ref<Resource> &p_f
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int j = from; j < to; j++) {
|
for (int j = from; j < to; j++) {
|
||||||
max = MAX(max, frames[j].l);
|
max = MAX(max, frames[j].left);
|
||||||
max = MAX(max, frames[j].r);
|
max = MAX(max, frames[j].right);
|
||||||
|
|
||||||
min = MIN(min, frames[j].l);
|
min = MIN(min, frames[j].left);
|
||||||
min = MIN(min, frames[j].r);
|
min = MIN(min, frames[j].right);
|
||||||
}
|
}
|
||||||
|
|
||||||
int pfrom = CLAMP((min * 0.5 + 0.5) * h / 2, 0, h / 2) + h / 4;
|
int pfrom = CLAMP((min * 0.5 + 0.5) * h / 2, 0, h / 2) + h / 4;
|
||||||
|
|
|
@ -177,13 +177,13 @@ int AudioStreamPlaybackOggVorbis::_mix_frames_vorbis(AudioFrame *p_buffer, int p
|
||||||
|
|
||||||
if (info.channels > 1) {
|
if (info.channels > 1) {
|
||||||
for (int frame = 0; frame < frames; frame++) {
|
for (int frame = 0; frame < frames; frame++) {
|
||||||
p_buffer[frame].l = pcm[0][frame];
|
p_buffer[frame].left = pcm[0][frame];
|
||||||
p_buffer[frame].r = pcm[1][frame];
|
p_buffer[frame].right = pcm[1][frame];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int frame = 0; frame < frames; frame++) {
|
for (int frame = 0; frame < frames; frame++) {
|
||||||
p_buffer[frame].l = pcm[0][frame];
|
p_buffer[frame].left = pcm[0][frame];
|
||||||
p_buffer[frame].r = pcm[0][frame];
|
p_buffer[frame].right = pcm[0][frame];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vorbis_synthesis_read(&dsp_state, frames);
|
vorbis_synthesis_read(&dsp_state, frames);
|
||||||
|
|
|
@ -123,20 +123,20 @@ void AudioStreamPlayer3D::_calc_output_vol(const Vector3 &source_dir, real_t tig
|
||||||
|
|
||||||
switch (AudioServer::get_singleton()->get_speaker_mode()) {
|
switch (AudioServer::get_singleton()->get_speaker_mode()) {
|
||||||
case AudioServer::SPEAKER_SURROUND_71:
|
case AudioServer::SPEAKER_SURROUND_71:
|
||||||
output.write[3].l = volumes[5]; // side-left
|
output.write[3].left = volumes[5]; // side-left
|
||||||
output.write[3].r = volumes[6]; // side-right
|
output.write[3].right = volumes[6]; // side-right
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case AudioServer::SPEAKER_SURROUND_51:
|
case AudioServer::SPEAKER_SURROUND_51:
|
||||||
output.write[2].l = volumes[3]; // rear-left
|
output.write[2].left = volumes[3]; // rear-left
|
||||||
output.write[2].r = volumes[4]; // rear-right
|
output.write[2].right = volumes[4]; // rear-right
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case AudioServer::SPEAKER_SURROUND_31:
|
case AudioServer::SPEAKER_SURROUND_31:
|
||||||
output.write[1].r = 1.0; // LFE - always full power
|
output.write[1].right = 1.0; // LFE - always full power
|
||||||
output.write[1].l = volumes[2]; // center
|
output.write[1].left = volumes[2]; // center
|
||||||
[[fallthrough]];
|
[[fallthrough]];
|
||||||
case AudioServer::SPEAKER_MODE_STEREO:
|
case AudioServer::SPEAKER_MODE_STEREO:
|
||||||
output.write[0].r = volumes[1]; // front-right
|
output.write[0].right = volumes[1]; // front-right
|
||||||
output.write[0].l = volumes[0]; // front-left
|
output.write[0].left = volumes[0]; // front-left
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,25 +168,25 @@ void AudioStreamPlayer3D::_calc_reverb_vol(Area3D *area, Vector3 listener_area_p
|
||||||
|
|
||||||
// Stereo pair.
|
// Stereo pair.
|
||||||
float c = rev_pos.x * 0.5 + 0.5;
|
float c = rev_pos.x * 0.5 + 0.5;
|
||||||
reverb_vol.write[0].l = 1.0 - c;
|
reverb_vol.write[0].left = 1.0 - c;
|
||||||
reverb_vol.write[0].r = c;
|
reverb_vol.write[0].right = c;
|
||||||
|
|
||||||
if (channel_count >= 3) {
|
if (channel_count >= 3) {
|
||||||
// Center pair + Side pair
|
// Center pair + Side pair
|
||||||
float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
|
float xl = Vector3(-1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
|
||||||
float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
|
float xr = Vector3(1, 0, -1).normalized().dot(rev_pos) * 0.5 + 0.5;
|
||||||
|
|
||||||
reverb_vol.write[1].l = xl;
|
reverb_vol.write[1].left = xl;
|
||||||
reverb_vol.write[1].r = xr;
|
reverb_vol.write[1].right = xr;
|
||||||
reverb_vol.write[2].l = 1.0 - xr;
|
reverb_vol.write[2].left = 1.0 - xr;
|
||||||
reverb_vol.write[2].r = 1.0 - xl;
|
reverb_vol.write[2].right = 1.0 - xl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (channel_count >= 4) {
|
if (channel_count >= 4) {
|
||||||
// Rear pair
|
// Rear pair
|
||||||
// FIXME: Not sure what math should be done here
|
// FIXME: Not sure what math should be done here
|
||||||
reverb_vol.write[3].l = 1.0 - c;
|
reverb_vol.write[3].left = 1.0 - c;
|
||||||
reverb_vol.write[3].r = c;
|
reverb_vol.write[3].right = c;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < channel_count; i++) {
|
for (int i = 0; i < channel_count; i++) {
|
||||||
|
|
|
@ -213,8 +213,8 @@ void AudioStreamPlaybackWAV::do_resample(const Depth *p_src, AudioFrame *p_dst,
|
||||||
final_r = final; //copy to right channel if stereo
|
final_r = final; //copy to right channel if stereo
|
||||||
}
|
}
|
||||||
|
|
||||||
p_dst->l = final / 32767.0;
|
p_dst->left = final / 32767.0;
|
||||||
p_dst->r = final_r / 32767.0;
|
p_dst->right = final_r / 32767.0;
|
||||||
p_dst++;
|
p_dst++;
|
||||||
|
|
||||||
p_offset += p_increment;
|
p_offset += p_increment;
|
||||||
|
|
|
@ -49,7 +49,7 @@ PackedVector2Array AudioEffectCapture::get_buffer(int p_frames) {
|
||||||
streaming_data.resize(p_frames);
|
streaming_data.resize(p_frames);
|
||||||
buffer.read(streaming_data.ptrw(), p_frames);
|
buffer.read(streaming_data.ptrw(), p_frames);
|
||||||
for (int32_t i = 0; i < p_frames; i++) {
|
for (int32_t i = 0; i < p_frames; i++) {
|
||||||
ret.write[i] = Vector2(streaming_data[i].l, streaming_data[i].r);
|
ret.write[i] = Vector2(streaming_data[i].left, streaming_data[i].right);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,8 +96,8 @@ void AudioEffectChorusInstance::_process_chunk(const AudioFrame *p_src_frames, A
|
||||||
//vol modifier
|
//vol modifier
|
||||||
|
|
||||||
AudioFrame vol_modifier = AudioFrame(base->wet, base->wet) * Math::db_to_linear(v.level);
|
AudioFrame vol_modifier = AudioFrame(base->wet, base->wet) * Math::db_to_linear(v.level);
|
||||||
vol_modifier.l *= CLAMP(1.0 - v.pan, 0, 1);
|
vol_modifier.left *= CLAMP(1.0 - v.pan, 0, 1);
|
||||||
vol_modifier.r *= CLAMP(1.0 + v.pan, 0, 1);
|
vol_modifier.right *= CLAMP(1.0 + v.pan, 0, 1);
|
||||||
|
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
/** COMPUTE WAVEFORM **/
|
/** COMPUTE WAVEFORM **/
|
||||||
|
|
|
@ -59,10 +59,10 @@ void AudioEffectCompressorInstance::process(const AudioFrame *p_src_frames, Audi
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
AudioFrame s = src[i];
|
AudioFrame s = src[i];
|
||||||
//convert to positive
|
//convert to positive
|
||||||
s.l = Math::abs(s.l);
|
s.left = Math::abs(s.left);
|
||||||
s.r = Math::abs(s.r);
|
s.right = Math::abs(s.right);
|
||||||
|
|
||||||
float peak = MAX(s.l, s.r);
|
float peak = MAX(s.left, s.right);
|
||||||
|
|
||||||
float overdb = 2.08136898f * Math::linear_to_db(peak / threshold);
|
float overdb = 2.08136898f * Math::linear_to_db(peak / threshold);
|
||||||
|
|
||||||
|
|
|
@ -64,13 +64,13 @@ void AudioEffectDelayInstance::_process_chunk(const AudioFrame *p_src_frames, Au
|
||||||
|
|
||||||
AudioFrame tap1_vol = AudioFrame(tap_1_level_f, tap_1_level_f);
|
AudioFrame tap1_vol = AudioFrame(tap_1_level_f, tap_1_level_f);
|
||||||
|
|
||||||
tap1_vol.l *= CLAMP(1.0 - base->tap_1_pan, 0, 1);
|
tap1_vol.left *= CLAMP(1.0 - base->tap_1_pan, 0, 1);
|
||||||
tap1_vol.r *= CLAMP(1.0 + base->tap_1_pan, 0, 1);
|
tap1_vol.right *= CLAMP(1.0 + base->tap_1_pan, 0, 1);
|
||||||
|
|
||||||
AudioFrame tap2_vol = AudioFrame(tap_2_level_f, tap_2_level_f);
|
AudioFrame tap2_vol = AudioFrame(tap_2_level_f, tap_2_level_f);
|
||||||
|
|
||||||
tap2_vol.l *= CLAMP(1.0 - base->tap_2_pan, 0, 1);
|
tap2_vol.left *= CLAMP(1.0 - base->tap_2_pan, 0, 1);
|
||||||
tap2_vol.r *= CLAMP(1.0 + base->tap_2_pan, 0, 1);
|
tap2_vol.right *= CLAMP(1.0 + base->tap_2_pan, 0, 1);
|
||||||
|
|
||||||
// feedback lowpass here
|
// feedback lowpass here
|
||||||
float lpf_c = expf(-Math_TAU * base->feedback_lowpass / mix_rate); // 0 .. 10khz
|
float lpf_c = expf(-Math_TAU * base->feedback_lowpass / mix_rate); // 0 .. 10khz
|
||||||
|
|
|
@ -46,14 +46,14 @@ void AudioEffectEQInstance::process(const AudioFrame *p_src_frames, AudioFrame *
|
||||||
AudioFrame dst = AudioFrame(0, 0);
|
AudioFrame dst = AudioFrame(0, 0);
|
||||||
|
|
||||||
for (int j = 0; j < band_count; j++) {
|
for (int j = 0; j < band_count; j++) {
|
||||||
float l = src.l;
|
float l = src.left;
|
||||||
float r = src.r;
|
float r = src.right;
|
||||||
|
|
||||||
proc_l[j].process_one(l);
|
proc_l[j].process_one(l);
|
||||||
proc_r[j].process_one(r);
|
proc_r[j].process_one(r);
|
||||||
|
|
||||||
dst.l += l * bgain[j];
|
dst.left += l * bgain[j];
|
||||||
dst.r += r * bgain[j];
|
dst.right += r * bgain[j];
|
||||||
}
|
}
|
||||||
|
|
||||||
p_dst_frames[i] = dst;
|
p_dst_frames[i] = dst;
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
template <int S>
|
template <int S>
|
||||||
void AudioEffectFilterInstance::_process_filter(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {
|
void AudioEffectFilterInstance::_process_filter(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
float f = p_src_frames[i].l;
|
float f = p_src_frames[i].left;
|
||||||
filter_process[0][0].process_one(f);
|
filter_process[0][0].process_one(f);
|
||||||
if constexpr (S > 1) {
|
if constexpr (S > 1) {
|
||||||
filter_process[0][1].process_one(f);
|
filter_process[0][1].process_one(f);
|
||||||
|
@ -46,11 +46,11 @@ void AudioEffectFilterInstance::_process_filter(const AudioFrame *p_src_frames,
|
||||||
filter_process[0][3].process_one(f);
|
filter_process[0][3].process_one(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
p_dst_frames[i].l = f;
|
p_dst_frames[i].left = f;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
float f = p_src_frames[i].r;
|
float f = p_src_frames[i].right;
|
||||||
filter_process[1][0].process_one(f);
|
filter_process[1][0].process_one(f);
|
||||||
if constexpr (S > 1) {
|
if constexpr (S > 1) {
|
||||||
filter_process[1][1].process_one(f);
|
filter_process[1][1].process_one(f);
|
||||||
|
@ -62,7 +62,7 @@ void AudioEffectFilterInstance::_process_filter(const AudioFrame *p_src_frames,
|
||||||
filter_process[1][3].process_one(f);
|
filter_process[1][3].process_one(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
p_dst_frames[i].r = f;
|
p_dst_frames[i].right = f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,8 +41,8 @@ void AudioEffectLimiterInstance::process(const AudioFrame *p_src_frames, AudioFr
|
||||||
float scmult = Math::abs((ceildb - sc) / (peakdb - sc));
|
float scmult = Math::abs((ceildb - sc) / (peakdb - sc));
|
||||||
|
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
float spl0 = p_src_frames[i].l;
|
float spl0 = p_src_frames[i].left;
|
||||||
float spl1 = p_src_frames[i].r;
|
float spl1 = p_src_frames[i].right;
|
||||||
spl0 = spl0 * makeup;
|
spl0 = spl0 * makeup;
|
||||||
spl1 = spl1 * makeup;
|
spl1 = spl1 * makeup;
|
||||||
float sign0 = (spl0 < 0.0 ? -1.0 : 1.0);
|
float sign0 = (spl0 < 0.0 ? -1.0 : 1.0);
|
||||||
|
@ -62,8 +62,8 @@ void AudioEffectLimiterInstance::process(const AudioFrame *p_src_frames, AudioFr
|
||||||
spl0 = MIN(ceiling, Math::abs(spl0)) * (spl0 < 0.0 ? -1.0 : 1.0);
|
spl0 = MIN(ceiling, Math::abs(spl0)) * (spl0 < 0.0 ? -1.0 : 1.0);
|
||||||
spl1 = MIN(ceiling, Math::abs(spl1)) * (spl1 < 0.0 ? -1.0 : 1.0);
|
spl1 = MIN(ceiling, Math::abs(spl1)) * (spl1 < 0.0 ? -1.0 : 1.0);
|
||||||
|
|
||||||
p_dst_frames[i].l = spl0;
|
p_dst_frames[i].left = spl0;
|
||||||
p_dst_frames[i].r = spl1;
|
p_dst_frames[i].right = spl1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,8 +35,8 @@ void AudioEffectPannerInstance::process(const AudioFrame *p_src_frames, AudioFra
|
||||||
float rvol = CLAMP(1.0 + base->pan, 0, 1);
|
float rvol = CLAMP(1.0 + base->pan, 0, 1);
|
||||||
|
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
p_dst_frames[i].l = p_src_frames[i].l * lvol + p_src_frames[i].r * (1.0 - rvol);
|
p_dst_frames[i].left = p_src_frames[i].left * lvol + p_src_frames[i].right * (1.0 - rvol);
|
||||||
p_dst_frames[i].r = p_src_frames[i].r * rvol + p_src_frames[i].l * (1.0 - lvol);
|
p_dst_frames[i].right = p_src_frames[i].right * rvol + p_src_frames[i].left * (1.0 - lvol);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,20 +61,20 @@ void AudioEffectPhaserInstance::process(const AudioFrame *p_src_frames, AudioFra
|
||||||
allpass[0][2].update(
|
allpass[0][2].update(
|
||||||
allpass[0][3].update(
|
allpass[0][3].update(
|
||||||
allpass[0][4].update(
|
allpass[0][4].update(
|
||||||
allpass[0][5].update(p_src_frames[i].l + h.l * base->feedback))))));
|
allpass[0][5].update(p_src_frames[i].left + h.left * base->feedback))))));
|
||||||
h.l = y;
|
h.left = y;
|
||||||
|
|
||||||
p_dst_frames[i].l = p_src_frames[i].l + y * base->depth;
|
p_dst_frames[i].left = p_src_frames[i].left + y * base->depth;
|
||||||
|
|
||||||
y = allpass[1][0].update(
|
y = allpass[1][0].update(
|
||||||
allpass[1][1].update(
|
allpass[1][1].update(
|
||||||
allpass[1][2].update(
|
allpass[1][2].update(
|
||||||
allpass[1][3].update(
|
allpass[1][3].update(
|
||||||
allpass[1][4].update(
|
allpass[1][4].update(
|
||||||
allpass[1][5].update(p_src_frames[i].r + h.r * base->feedback))))));
|
allpass[1][5].update(p_src_frames[i].right + h.right * base->feedback))))));
|
||||||
h.r = y;
|
h.right = y;
|
||||||
|
|
||||||
p_dst_frames[i].r = p_src_frames[i].r + y * base->depth;
|
p_dst_frames[i].right = p_src_frames[i].right + y * base->depth;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,8 +87,8 @@ void AudioEffectRecordInstance::_io_store_buffer() {
|
||||||
|
|
||||||
while (to_read) {
|
while (to_read) {
|
||||||
AudioFrame buffered_frame = rb_buf[ring_buffer_read_pos & ring_buffer_mask];
|
AudioFrame buffered_frame = rb_buf[ring_buffer_read_pos & ring_buffer_mask];
|
||||||
recording_data.push_back(buffered_frame.l);
|
recording_data.push_back(buffered_frame.left);
|
||||||
recording_data.push_back(buffered_frame.r);
|
recording_data.push_back(buffered_frame.right);
|
||||||
|
|
||||||
ring_buffer_read_pos++;
|
ring_buffer_read_pos++;
|
||||||
to_read--;
|
to_read--;
|
||||||
|
|
|
@ -51,20 +51,20 @@ void AudioEffectReverbInstance::process(const AudioFrame *p_src_frames, AudioFra
|
||||||
int to_mix = MIN(todo, Reverb::INPUT_BUFFER_MAX_SIZE);
|
int to_mix = MIN(todo, Reverb::INPUT_BUFFER_MAX_SIZE);
|
||||||
|
|
||||||
for (int j = 0; j < to_mix; j++) {
|
for (int j = 0; j < to_mix; j++) {
|
||||||
tmp_src[j] = p_src_frames[offset + j].l;
|
tmp_src[j] = p_src_frames[offset + j].left;
|
||||||
}
|
}
|
||||||
|
|
||||||
reverb[0].process(tmp_src, tmp_dst, to_mix);
|
reverb[0].process(tmp_src, tmp_dst, to_mix);
|
||||||
|
|
||||||
for (int j = 0; j < to_mix; j++) {
|
for (int j = 0; j < to_mix; j++) {
|
||||||
p_dst_frames[offset + j].l = tmp_dst[j];
|
p_dst_frames[offset + j].left = tmp_dst[j];
|
||||||
tmp_src[j] = p_src_frames[offset + j].r;
|
tmp_src[j] = p_src_frames[offset + j].right;
|
||||||
}
|
}
|
||||||
|
|
||||||
reverb[1].process(tmp_src, tmp_dst, to_mix);
|
reverb[1].process(tmp_src, tmp_dst, to_mix);
|
||||||
|
|
||||||
for (int j = 0; j < to_mix; j++) {
|
for (int j = 0; j < to_mix; j++) {
|
||||||
p_dst_frames[offset + j].r = tmp_dst[j];
|
p_dst_frames[offset + j].right = tmp_dst[j];
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += to_mix;
|
offset += to_mix;
|
||||||
|
|
|
@ -115,9 +115,9 @@ void AudioEffectSpectrumAnalyzerInstance::process(const AudioFrame *p_src_frames
|
||||||
float *fftw = temporal_fft.ptrw();
|
float *fftw = temporal_fft.ptrw();
|
||||||
for (int i = 0; i < to_fill; i++) { //left and right buffers
|
for (int i = 0; i < to_fill; i++) { //left and right buffers
|
||||||
float window = -0.5 * Math::cos(to_fill_step * (double)temporal_fft_pos) + 0.5;
|
float window = -0.5 * Math::cos(to_fill_step * (double)temporal_fft_pos) + 0.5;
|
||||||
fftw[temporal_fft_pos * 2] = window * p_src_frames->l;
|
fftw[temporal_fft_pos * 2] = window * p_src_frames->left;
|
||||||
fftw[temporal_fft_pos * 2 + 1] = 0;
|
fftw[temporal_fft_pos * 2 + 1] = 0;
|
||||||
fftw[(temporal_fft_pos + fft_size * 2) * 2] = window * p_src_frames->r;
|
fftw[(temporal_fft_pos + fft_size * 2) * 2] = window * p_src_frames->right;
|
||||||
fftw[(temporal_fft_pos + fft_size * 2) * 2 + 1] = 0;
|
fftw[(temporal_fft_pos + fft_size * 2) * 2 + 1] = 0;
|
||||||
++p_src_frames;
|
++p_src_frames;
|
||||||
++temporal_fft_pos;
|
++temporal_fft_pos;
|
||||||
|
@ -135,8 +135,8 @@ void AudioEffectSpectrumAnalyzerInstance::process(const AudioFrame *p_src_frames
|
||||||
|
|
||||||
for (int i = 0; i < fft_size; i++) {
|
for (int i = 0; i < fft_size; i++) {
|
||||||
//abs(vec)/fft_size normalizes each frequency
|
//abs(vec)/fft_size normalizes each frequency
|
||||||
hw[i].l = Vector2(fftw[i * 2], fftw[i * 2 + 1]).length() / float(fft_size);
|
hw[i].left = Vector2(fftw[i * 2], fftw[i * 2 + 1]).length() / float(fft_size);
|
||||||
hw[i].r = Vector2(fftw[fft_size * 4 + i * 2], fftw[fft_size * 4 + i * 2 + 1]).length() / float(fft_size);
|
hw[i].right = Vector2(fftw[fft_size * 4 + i * 2], fftw[fft_size * 4 + i * 2 + 1]).length() / float(fft_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
fft_pos = next; //swap
|
fft_pos = next; //swap
|
||||||
|
@ -199,8 +199,8 @@ Vector2 AudioEffectSpectrumAnalyzerInstance::get_magnitude_for_frequency_range(f
|
||||||
Vector2 max;
|
Vector2 max;
|
||||||
|
|
||||||
for (int i = begin_pos; i <= end_pos; i++) {
|
for (int i = begin_pos; i <= end_pos; i++) {
|
||||||
max.x = MAX(max.x, r[i].l);
|
max.x = MAX(max.x, r[i].left);
|
||||||
max.y = MAX(max.y, r[i].r);
|
max.y = MAX(max.y, r[i].right);
|
||||||
}
|
}
|
||||||
|
|
||||||
return max;
|
return max;
|
||||||
|
|
|
@ -39,8 +39,8 @@ void AudioEffectStereoEnhanceInstance::process(const AudioFrame *p_src_frames, A
|
||||||
unsigned int delay_frames = (base->time_pullout / 1000.0) * AudioServer::get_singleton()->get_mix_rate();
|
unsigned int delay_frames = (base->time_pullout / 1000.0) * AudioServer::get_singleton()->get_mix_rate();
|
||||||
|
|
||||||
for (int i = 0; i < p_frame_count; i++) {
|
for (int i = 0; i < p_frame_count; i++) {
|
||||||
float l = p_src_frames[i].l;
|
float l = p_src_frames[i].left;
|
||||||
float r = p_src_frames[i].r;
|
float r = p_src_frames[i].right;
|
||||||
|
|
||||||
float center = (l + r) / 2.0f;
|
float center = (l + r) / 2.0f;
|
||||||
|
|
||||||
|
@ -65,8 +65,8 @@ void AudioEffectStereoEnhanceInstance::process(const AudioFrame *p_src_frames, A
|
||||||
r = delay_ringbuff[(ringbuff_pos - delay_frames) & ringbuff_mask];
|
r = delay_ringbuff[(ringbuff_pos - delay_frames) & ringbuff_mask];
|
||||||
}
|
}
|
||||||
|
|
||||||
p_dst_frames[i].l = l;
|
p_dst_frames[i].left = l;
|
||||||
p_dst_frames[i].r = r;
|
p_dst_frames[i].right = r;
|
||||||
ringbuff_pos++;
|
ringbuff_pos++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -285,13 +285,13 @@ void AudioServer::_driver_process(int p_frames, int32_t *p_buffer) {
|
||||||
const AudioFrame *buf = master->channels[k].buffer.ptr();
|
const AudioFrame *buf = master->channels[k].buffer.ptr();
|
||||||
|
|
||||||
for (int j = 0; j < to_copy; j++) {
|
for (int j = 0; j < to_copy; j++) {
|
||||||
float l = CLAMP(buf[from + j].l, -1.0, 1.0);
|
float l = CLAMP(buf[from + j].left, -1.0, 1.0);
|
||||||
int32_t vl = l * ((1 << 20) - 1);
|
int32_t vl = l * ((1 << 20) - 1);
|
||||||
int32_t vl2 = (vl < 0 ? -1 : 1) * (ABS(vl) << 11);
|
int32_t vl2 = (vl < 0 ? -1 : 1) * (ABS(vl) << 11);
|
||||||
*dest = vl2;
|
*dest = vl2;
|
||||||
dest++;
|
dest++;
|
||||||
|
|
||||||
float r = CLAMP(buf[from + j].r, -1.0, 1.0);
|
float r = CLAMP(buf[from + j].right, -1.0, 1.0);
|
||||||
int32_t vr = r * ((1 << 20) - 1);
|
int32_t vr = r * ((1 << 20) - 1);
|
||||||
int32_t vr2 = (vr < 0 ? -1 : 1) * (ABS(vr) << 11);
|
int32_t vr2 = (vr < 0 ? -1 : 1) * (ABS(vr) << 11);
|
||||||
*dest = vr2;
|
*dest = vr2;
|
||||||
|
@ -588,22 +588,22 @@ void AudioServer::_mix_step() {
|
||||||
for (uint32_t j = 0; j < buffer_size; j++) {
|
for (uint32_t j = 0; j < buffer_size; j++) {
|
||||||
buf[j] *= volume;
|
buf[j] *= volume;
|
||||||
|
|
||||||
float l = ABS(buf[j].l);
|
float l = ABS(buf[j].left);
|
||||||
if (l > peak.l) {
|
if (l > peak.left) {
|
||||||
peak.l = l;
|
peak.left = l;
|
||||||
}
|
}
|
||||||
float r = ABS(buf[j].r);
|
float r = ABS(buf[j].right);
|
||||||
if (r > peak.r) {
|
if (r > peak.right) {
|
||||||
peak.r = r;
|
peak.right = r;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bus->channels.write[k].peak_volume = AudioFrame(Math::linear_to_db(peak.l + AUDIO_PEAK_OFFSET), Math::linear_to_db(peak.r + AUDIO_PEAK_OFFSET));
|
bus->channels.write[k].peak_volume = AudioFrame(Math::linear_to_db(peak.left + AUDIO_PEAK_OFFSET), Math::linear_to_db(peak.right + AUDIO_PEAK_OFFSET));
|
||||||
|
|
||||||
if (!bus->channels[k].used) {
|
if (!bus->channels[k].used) {
|
||||||
//see if any audio is contained, because channel was not used
|
//see if any audio is contained, because channel was not used
|
||||||
|
|
||||||
if (MAX(peak.r, peak.l) > Math::db_to_linear(channel_disable_threshold_db)) {
|
if (MAX(peak.right, peak.left) > Math::db_to_linear(channel_disable_threshold_db)) {
|
||||||
bus->channels.write[k].last_mix_with_audio = mix_frames;
|
bus->channels.write[k].last_mix_with_audio = mix_frames;
|
||||||
} else if (mix_frames - bus->channels[k].last_mix_with_audio > channel_disable_frames) {
|
} else if (mix_frames - bus->channels[k].last_mix_with_audio > channel_disable_frames) {
|
||||||
bus->channels.write[k].active = false;
|
bus->channels.write[k].active = false;
|
||||||
|
@ -639,7 +639,7 @@ void AudioServer::_mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_sou
|
||||||
ERR_FAIL_NULL(p_processor_l);
|
ERR_FAIL_NULL(p_processor_l);
|
||||||
ERR_FAIL_NULL(p_processor_r);
|
ERR_FAIL_NULL(p_processor_r);
|
||||||
|
|
||||||
bool is_just_started = p_vol_start.l == 0 && p_vol_start.r == 0;
|
bool is_just_started = p_vol_start.left == 0 && p_vol_start.right == 0;
|
||||||
p_processor_l->set_filter(&filter, /* clear_history= */ is_just_started);
|
p_processor_l->set_filter(&filter, /* clear_history= */ is_just_started);
|
||||||
p_processor_l->update_coeffs(buffer_size);
|
p_processor_l->update_coeffs(buffer_size);
|
||||||
p_processor_r->set_filter(&filter, /* clear_history= */ is_just_started);
|
p_processor_r->set_filter(&filter, /* clear_history= */ is_just_started);
|
||||||
|
@ -650,8 +650,8 @@ void AudioServer::_mix_step_for_channel(AudioFrame *p_out_buf, AudioFrame *p_sou
|
||||||
float lerp_param = (float)frame_idx / buffer_size;
|
float lerp_param = (float)frame_idx / buffer_size;
|
||||||
AudioFrame vol = p_vol_final * lerp_param + (1 - lerp_param) * p_vol_start;
|
AudioFrame vol = p_vol_final * lerp_param + (1 - lerp_param) * p_vol_start;
|
||||||
AudioFrame mixed = vol * p_source_buf[frame_idx];
|
AudioFrame mixed = vol * p_source_buf[frame_idx];
|
||||||
p_processor_l->process_one_interp(mixed.l);
|
p_processor_l->process_one_interp(mixed.left);
|
||||||
p_processor_r->process_one_interp(mixed.r);
|
p_processor_r->process_one_interp(mixed.right);
|
||||||
p_out_buf[frame_idx] += mixed;
|
p_out_buf[frame_idx] += mixed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1107,14 +1107,14 @@ float AudioServer::get_bus_peak_volume_left_db(int p_bus, int p_channel) const {
|
||||||
ERR_FAIL_INDEX_V(p_bus, buses.size(), 0);
|
ERR_FAIL_INDEX_V(p_bus, buses.size(), 0);
|
||||||
ERR_FAIL_INDEX_V(p_channel, buses[p_bus]->channels.size(), 0);
|
ERR_FAIL_INDEX_V(p_channel, buses[p_bus]->channels.size(), 0);
|
||||||
|
|
||||||
return buses[p_bus]->channels[p_channel].peak_volume.l;
|
return buses[p_bus]->channels[p_channel].peak_volume.left;
|
||||||
}
|
}
|
||||||
|
|
||||||
float AudioServer::get_bus_peak_volume_right_db(int p_bus, int p_channel) const {
|
float AudioServer::get_bus_peak_volume_right_db(int p_bus, int p_channel) const {
|
||||||
ERR_FAIL_INDEX_V(p_bus, buses.size(), 0);
|
ERR_FAIL_INDEX_V(p_bus, buses.size(), 0);
|
||||||
ERR_FAIL_INDEX_V(p_channel, buses[p_bus]->channels.size(), 0);
|
ERR_FAIL_INDEX_V(p_channel, buses[p_bus]->channels.size(), 0);
|
||||||
|
|
||||||
return buses[p_bus]->channels[p_channel].peak_volume.r;
|
return buses[p_bus]->channels[p_channel].peak_volume.right;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool AudioServer::is_bus_channel_active(int p_bus, int p_channel) const {
|
bool AudioServer::is_bus_channel_active(int p_bus, int p_channel) const {
|
||||||
|
|
Loading…
Reference in New Issue