Merge pull request #19106 from SaracenOne/audio_mic
[WIP] Experimental microphone support
This commit is contained in:
commit
73cf0fd305
|
@ -35,8 +35,23 @@
|
|||
#include "os/os.h"
|
||||
|
||||
#define kOutputBus 0
|
||||
#define kInputBus 1
|
||||
|
||||
#ifdef OSX_ENABLED
|
||||
OSStatus AudioDriverCoreAudio::input_device_address_cb(AudioObjectID inObjectID,
|
||||
UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
|
||||
void *inClientData) {
|
||||
AudioDriverCoreAudio *driver = (AudioDriverCoreAudio *)inClientData;
|
||||
|
||||
// If our selected device is the Default call set_device to update the
|
||||
// kAudioOutputUnitProperty_CurrentDevice property
|
||||
if (driver->capture_device_name == "Default") {
|
||||
driver->capture_set_device("Default");
|
||||
}
|
||||
|
||||
return noErr;
|
||||
}
|
||||
|
||||
OSStatus AudioDriverCoreAudio::output_device_address_cb(AudioObjectID inObjectID,
|
||||
UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
|
||||
void *inClientData) {
|
||||
|
@ -79,6 +94,11 @@ Error AudioDriverCoreAudio::init() {
|
|||
|
||||
result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &output_device_address_cb, this);
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
prop.mSelector = kAudioHardwarePropertyDefaultInputDevice;
|
||||
|
||||
result = AudioObjectAddPropertyListener(kAudioObjectSystemObject, &prop, &input_device_address_cb, this);
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
#endif
|
||||
|
||||
AudioStreamBasicDescription strdesc;
|
||||
|
@ -102,6 +122,26 @@ Error AudioDriverCoreAudio::init() {
|
|||
break;
|
||||
}
|
||||
|
||||
zeromem(&strdesc, sizeof(strdesc));
|
||||
size = sizeof(strdesc);
|
||||
result = AudioUnitGetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, &size);
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
switch (strdesc.mChannelsPerFrame) {
|
||||
case 1: // Mono
|
||||
capture_channels = 1;
|
||||
break;
|
||||
|
||||
case 2: // Stereo
|
||||
capture_channels = 2;
|
||||
break;
|
||||
|
||||
default:
|
||||
// Unknown number of channels, default to stereo
|
||||
capture_channels = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);
|
||||
|
||||
zeromem(&strdesc, sizeof(strdesc));
|
||||
|
@ -117,6 +157,11 @@ Error AudioDriverCoreAudio::init() {
|
|||
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc));
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
strdesc.mChannelsPerFrame = capture_channels;
|
||||
|
||||
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc));
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY);
|
||||
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
|
||||
buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
|
||||
|
@ -126,8 +171,12 @@ Error AudioDriverCoreAudio::init() {
|
|||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
#endif
|
||||
|
||||
buffer_size = buffer_frames * channels;
|
||||
unsigned int buffer_size = buffer_frames * channels;
|
||||
samples_in.resize(buffer_size);
|
||||
input_buf.resize(buffer_size);
|
||||
input_buffer.resize(buffer_size * 8);
|
||||
input_position = 0;
|
||||
input_size = 0;
|
||||
|
||||
if (OS::get_singleton()->is_stdout_verbose()) {
|
||||
print_line("CoreAudio: detected " + itos(channels) + " channels");
|
||||
|
@ -141,6 +190,12 @@ Error AudioDriverCoreAudio::init() {
|
|||
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
zeromem(&callback, sizeof(AURenderCallbackStruct));
|
||||
callback.inputProc = &AudioDriverCoreAudio::input_callback;
|
||||
callback.inputProcRefCon = this;
|
||||
result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
result = AudioUnitInitialize(audio_unit);
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
|
@ -192,6 +247,45 @@ OSStatus AudioDriverCoreAudio::output_callback(void *inRefCon,
|
|||
return 0;
|
||||
};
|
||||
|
||||
OSStatus AudioDriverCoreAudio::input_callback(void *inRefCon,
|
||||
AudioUnitRenderActionFlags *ioActionFlags,
|
||||
const AudioTimeStamp *inTimeStamp,
|
||||
UInt32 inBusNumber, UInt32 inNumberFrames,
|
||||
AudioBufferList *ioData) {
|
||||
|
||||
AudioDriverCoreAudio *ad = (AudioDriverCoreAudio *)inRefCon;
|
||||
if (!ad->active) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ad->lock();
|
||||
|
||||
AudioBufferList bufferList;
|
||||
bufferList.mNumberBuffers = 1;
|
||||
bufferList.mBuffers[0].mData = ad->input_buf.ptrw();
|
||||
bufferList.mBuffers[0].mNumberChannels = ad->capture_channels;
|
||||
bufferList.mBuffers[0].mDataByteSize = ad->input_buf.size() * sizeof(int16_t);
|
||||
|
||||
OSStatus result = AudioUnitRender(ad->audio_unit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
|
||||
if (result == noErr) {
|
||||
for (int i = 0; i < inNumberFrames * ad->capture_channels; i++) {
|
||||
int32_t sample = ad->input_buf[i] << 16;
|
||||
ad->input_buffer_write(sample);
|
||||
|
||||
if (ad->capture_channels == 1) {
|
||||
// In case input device is single channel convert it to Stereo
|
||||
ad->input_buffer_write(sample);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ERR_PRINT(("AudioUnitRender failed, code: " + itos(result)).utf8().get_data());
|
||||
}
|
||||
|
||||
ad->unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void AudioDriverCoreAudio::start() {
|
||||
if (!active) {
|
||||
OSStatus result = AudioOutputUnitStart(audio_unit);
|
||||
|
@ -222,151 +316,6 @@ AudioDriver::SpeakerMode AudioDriverCoreAudio::get_speaker_mode() const {
|
|||
return get_speaker_mode_by_total_channels(channels);
|
||||
};
|
||||
|
||||
#ifdef OSX_ENABLED
|
||||
|
||||
Array AudioDriverCoreAudio::get_device_list() {
|
||||
|
||||
Array list;
|
||||
|
||||
list.push_back("Default");
|
||||
|
||||
AudioObjectPropertyAddress prop;
|
||||
|
||||
prop.mSelector = kAudioHardwarePropertyDevices;
|
||||
prop.mScope = kAudioObjectPropertyScopeGlobal;
|
||||
prop.mElement = kAudioObjectPropertyElementMaster;
|
||||
|
||||
UInt32 size = 0;
|
||||
AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &prop, 0, NULL, &size);
|
||||
AudioDeviceID *audioDevices = (AudioDeviceID *)malloc(size);
|
||||
AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &size, audioDevices);
|
||||
|
||||
UInt32 deviceCount = size / sizeof(AudioDeviceID);
|
||||
for (UInt32 i = 0; i < deviceCount; i++) {
|
||||
prop.mScope = kAudioDevicePropertyScopeOutput;
|
||||
prop.mSelector = kAudioDevicePropertyStreamConfiguration;
|
||||
|
||||
AudioObjectGetPropertyDataSize(audioDevices[i], &prop, 0, NULL, &size);
|
||||
AudioBufferList *bufferList = (AudioBufferList *)malloc(size);
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, bufferList);
|
||||
|
||||
UInt32 outputChannelCount = 0;
|
||||
for (UInt32 j = 0; j < bufferList->mNumberBuffers; j++)
|
||||
outputChannelCount += bufferList->mBuffers[j].mNumberChannels;
|
||||
|
||||
free(bufferList);
|
||||
|
||||
if (outputChannelCount >= 1) {
|
||||
CFStringRef cfname;
|
||||
|
||||
size = sizeof(CFStringRef);
|
||||
prop.mSelector = kAudioObjectPropertyName;
|
||||
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, &cfname);
|
||||
|
||||
CFIndex length = CFStringGetLength(cfname);
|
||||
CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
|
||||
char *buffer = (char *)malloc(maxSize);
|
||||
if (CFStringGetCString(cfname, buffer, maxSize, kCFStringEncodingUTF8)) {
|
||||
// Append the ID to the name in case we have devices with duplicate name
|
||||
list.push_back(String(buffer) + " (" + itos(audioDevices[i]) + ")");
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
free(audioDevices);
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
String AudioDriverCoreAudio::get_device() {
|
||||
|
||||
return device_name;
|
||||
}
|
||||
|
||||
void AudioDriverCoreAudio::set_device(String device) {
|
||||
|
||||
device_name = device;
|
||||
if (!active) {
|
||||
return;
|
||||
}
|
||||
|
||||
AudioDeviceID deviceId;
|
||||
bool found = false;
|
||||
if (device_name != "Default") {
|
||||
AudioObjectPropertyAddress prop;
|
||||
|
||||
prop.mSelector = kAudioHardwarePropertyDevices;
|
||||
prop.mScope = kAudioObjectPropertyScopeGlobal;
|
||||
prop.mElement = kAudioObjectPropertyElementMaster;
|
||||
|
||||
UInt32 size = 0;
|
||||
AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &prop, 0, NULL, &size);
|
||||
AudioDeviceID *audioDevices = (AudioDeviceID *)malloc(size);
|
||||
AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &size, audioDevices);
|
||||
|
||||
UInt32 deviceCount = size / sizeof(AudioDeviceID);
|
||||
for (UInt32 i = 0; i < deviceCount && !found; i++) {
|
||||
prop.mScope = kAudioDevicePropertyScopeOutput;
|
||||
prop.mSelector = kAudioDevicePropertyStreamConfiguration;
|
||||
|
||||
AudioObjectGetPropertyDataSize(audioDevices[i], &prop, 0, NULL, &size);
|
||||
AudioBufferList *bufferList = (AudioBufferList *)malloc(size);
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, bufferList);
|
||||
|
||||
UInt32 outputChannelCount = 0;
|
||||
for (UInt32 j = 0; j < bufferList->mNumberBuffers; j++)
|
||||
outputChannelCount += bufferList->mBuffers[j].mNumberChannels;
|
||||
|
||||
free(bufferList);
|
||||
|
||||
if (outputChannelCount >= 1) {
|
||||
CFStringRef cfname;
|
||||
|
||||
size = sizeof(CFStringRef);
|
||||
prop.mSelector = kAudioObjectPropertyName;
|
||||
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, &cfname);
|
||||
|
||||
CFIndex length = CFStringGetLength(cfname);
|
||||
CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
|
||||
char *buffer = (char *)malloc(maxSize);
|
||||
if (CFStringGetCString(cfname, buffer, maxSize, kCFStringEncodingUTF8)) {
|
||||
String name = String(buffer) + " (" + itos(audioDevices[i]) + ")";
|
||||
if (name == device_name) {
|
||||
deviceId = audioDevices[i];
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
free(audioDevices);
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
// If we haven't found the desired device get the system default one
|
||||
UInt32 size = sizeof(AudioDeviceID);
|
||||
AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
||||
|
||||
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &size, &deviceId);
|
||||
ERR_FAIL_COND(result != noErr);
|
||||
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (found) {
|
||||
OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceId, sizeof(AudioDeviceID));
|
||||
ERR_FAIL_COND(result != noErr);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void AudioDriverCoreAudio::lock() {
|
||||
if (mutex)
|
||||
mutex->lock();
|
||||
|
@ -434,20 +383,215 @@ void AudioDriverCoreAudio::finish() {
|
|||
}
|
||||
};
|
||||
|
||||
Error AudioDriverCoreAudio::capture_start() {
|
||||
|
||||
UInt32 flag = 1;
|
||||
OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
Error AudioDriverCoreAudio::capture_stop() {
|
||||
|
||||
UInt32 flag = 0;
|
||||
OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
|
||||
ERR_FAIL_COND_V(result != noErr, FAILED);
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
#ifdef OSX_ENABLED
|
||||
|
||||
Array AudioDriverCoreAudio::_get_device_list(bool capture) {
|
||||
|
||||
Array list;
|
||||
|
||||
list.push_back("Default");
|
||||
|
||||
AudioObjectPropertyAddress prop;
|
||||
|
||||
prop.mSelector = kAudioHardwarePropertyDevices;
|
||||
prop.mScope = kAudioObjectPropertyScopeGlobal;
|
||||
prop.mElement = kAudioObjectPropertyElementMaster;
|
||||
|
||||
UInt32 size = 0;
|
||||
AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &prop, 0, NULL, &size);
|
||||
AudioDeviceID *audioDevices = (AudioDeviceID *)malloc(size);
|
||||
AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &size, audioDevices);
|
||||
|
||||
UInt32 deviceCount = size / sizeof(AudioDeviceID);
|
||||
for (UInt32 i = 0; i < deviceCount; i++) {
|
||||
prop.mScope = capture ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
|
||||
prop.mSelector = kAudioDevicePropertyStreamConfiguration;
|
||||
|
||||
AudioObjectGetPropertyDataSize(audioDevices[i], &prop, 0, NULL, &size);
|
||||
AudioBufferList *bufferList = (AudioBufferList *)malloc(size);
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, bufferList);
|
||||
|
||||
UInt32 channelCount = 0;
|
||||
for (UInt32 j = 0; j < bufferList->mNumberBuffers; j++)
|
||||
channelCount += bufferList->mBuffers[j].mNumberChannels;
|
||||
|
||||
free(bufferList);
|
||||
|
||||
if (channelCount >= 1) {
|
||||
CFStringRef cfname;
|
||||
|
||||
size = sizeof(CFStringRef);
|
||||
prop.mSelector = kAudioObjectPropertyName;
|
||||
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, &cfname);
|
||||
|
||||
CFIndex length = CFStringGetLength(cfname);
|
||||
CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
|
||||
char *buffer = (char *)malloc(maxSize);
|
||||
if (CFStringGetCString(cfname, buffer, maxSize, kCFStringEncodingUTF8)) {
|
||||
// Append the ID to the name in case we have devices with duplicate name
|
||||
list.push_back(String(buffer) + " (" + itos(audioDevices[i]) + ")");
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
free(audioDevices);
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
void AudioDriverCoreAudio::_set_device(const String &device, bool capture) {
|
||||
|
||||
AudioDeviceID deviceId;
|
||||
bool found = false;
|
||||
if (device != "Default") {
|
||||
AudioObjectPropertyAddress prop;
|
||||
|
||||
prop.mSelector = kAudioHardwarePropertyDevices;
|
||||
prop.mScope = kAudioObjectPropertyScopeGlobal;
|
||||
prop.mElement = kAudioObjectPropertyElementMaster;
|
||||
|
||||
UInt32 size = 0;
|
||||
AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &prop, 0, NULL, &size);
|
||||
AudioDeviceID *audioDevices = (AudioDeviceID *)malloc(size);
|
||||
AudioObjectGetPropertyData(kAudioObjectSystemObject, &prop, 0, NULL, &size, audioDevices);
|
||||
|
||||
UInt32 deviceCount = size / sizeof(AudioDeviceID);
|
||||
for (UInt32 i = 0; i < deviceCount && !found; i++) {
|
||||
prop.mScope = capture ? kAudioDevicePropertyScopeInput : kAudioDevicePropertyScopeOutput;
|
||||
prop.mSelector = kAudioDevicePropertyStreamConfiguration;
|
||||
|
||||
AudioObjectGetPropertyDataSize(audioDevices[i], &prop, 0, NULL, &size);
|
||||
AudioBufferList *bufferList = (AudioBufferList *)malloc(size);
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, bufferList);
|
||||
|
||||
UInt32 channelCount = 0;
|
||||
for (UInt32 j = 0; j < bufferList->mNumberBuffers; j++)
|
||||
channelCount += bufferList->mBuffers[j].mNumberChannels;
|
||||
|
||||
free(bufferList);
|
||||
|
||||
if (channelCount >= 1) {
|
||||
CFStringRef cfname;
|
||||
|
||||
size = sizeof(CFStringRef);
|
||||
prop.mSelector = kAudioObjectPropertyName;
|
||||
|
||||
AudioObjectGetPropertyData(audioDevices[i], &prop, 0, NULL, &size, &cfname);
|
||||
|
||||
CFIndex length = CFStringGetLength(cfname);
|
||||
CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
|
||||
char *buffer = (char *)malloc(maxSize);
|
||||
if (CFStringGetCString(cfname, buffer, maxSize, kCFStringEncodingUTF8)) {
|
||||
String name = String(buffer) + " (" + itos(audioDevices[i]) + ")";
|
||||
if (name == device) {
|
||||
deviceId = audioDevices[i];
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
free(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
free(audioDevices);
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
// If we haven't found the desired device get the system default one
|
||||
UInt32 size = sizeof(AudioDeviceID);
|
||||
UInt32 elem = capture ? kAudioHardwarePropertyDefaultInputDevice : kAudioHardwarePropertyDefaultOutputDevice;
|
||||
AudioObjectPropertyAddress property = { elem, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
|
||||
|
||||
OSStatus result = AudioObjectGetPropertyData(kAudioObjectSystemObject, &property, 0, NULL, &size, &deviceId);
|
||||
ERR_FAIL_COND(result != noErr);
|
||||
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (found) {
|
||||
OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, capture ? kInputBus : kOutputBus, &deviceId, sizeof(AudioDeviceID));
|
||||
ERR_FAIL_COND(result != noErr);
|
||||
|
||||
// Reset audio input to keep synchronisation.
|
||||
input_position = 0;
|
||||
input_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
Array AudioDriverCoreAudio::get_device_list() {
|
||||
|
||||
return _get_device_list();
|
||||
}
|
||||
|
||||
String AudioDriverCoreAudio::get_device() {
|
||||
|
||||
return device_name;
|
||||
}
|
||||
|
||||
void AudioDriverCoreAudio::set_device(String device) {
|
||||
|
||||
device_name = device;
|
||||
if (active) {
|
||||
_set_device(device_name);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioDriverCoreAudio::capture_set_device(const String &p_name) {
|
||||
|
||||
capture_device_name = p_name;
|
||||
if (active) {
|
||||
_set_device(capture_device_name, true);
|
||||
}
|
||||
}
|
||||
|
||||
Array AudioDriverCoreAudio::capture_get_device_list() {
|
||||
|
||||
return _get_device_list(true);
|
||||
}
|
||||
|
||||
String AudioDriverCoreAudio::capture_get_device() {
|
||||
|
||||
return capture_device_name;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
AudioDriverCoreAudio::AudioDriverCoreAudio() {
|
||||
active = false;
|
||||
mutex = NULL;
|
||||
|
||||
mix_rate = 0;
|
||||
channels = 2;
|
||||
capture_channels = 2;
|
||||
|
||||
buffer_size = 0;
|
||||
buffer_frames = 0;
|
||||
|
||||
samples_in.clear();
|
||||
|
||||
device_name = "Default";
|
||||
};
|
||||
capture_device_name = "Default";
|
||||
}
|
||||
|
||||
AudioDriverCoreAudio::~AudioDriverCoreAudio(){};
|
||||
|
||||
|
|
|
@ -48,15 +48,24 @@ class AudioDriverCoreAudio : public AudioDriver {
|
|||
Mutex *mutex;
|
||||
|
||||
String device_name;
|
||||
String capture_device_name;
|
||||
|
||||
int mix_rate;
|
||||
unsigned int channels;
|
||||
unsigned int capture_channels;
|
||||
unsigned int buffer_frames;
|
||||
unsigned int buffer_size;
|
||||
|
||||
Vector<int32_t> samples_in;
|
||||
Vector<int16_t> input_buf;
|
||||
|
||||
#ifdef OSX_ENABLED
|
||||
Array _get_device_list(bool capture = false);
|
||||
void _set_device(const String &device, bool capture = false);
|
||||
|
||||
static OSStatus input_device_address_cb(AudioObjectID inObjectID,
|
||||
UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
|
||||
void *inClientData);
|
||||
|
||||
static OSStatus output_device_address_cb(AudioObjectID inObjectID,
|
||||
UInt32 inNumberAddresses, const AudioObjectPropertyAddress *inAddresses,
|
||||
void *inClientData);
|
||||
|
@ -68,6 +77,12 @@ class AudioDriverCoreAudio : public AudioDriver {
|
|||
UInt32 inBusNumber, UInt32 inNumberFrames,
|
||||
AudioBufferList *ioData);
|
||||
|
||||
static OSStatus input_callback(void *inRefCon,
|
||||
AudioUnitRenderActionFlags *ioActionFlags,
|
||||
const AudioTimeStamp *inTimeStamp,
|
||||
UInt32 inBusNumber, UInt32 inNumberFrames,
|
||||
AudioBufferList *ioData);
|
||||
|
||||
public:
|
||||
const char *get_name() const {
|
||||
return "CoreAudio";
|
||||
|
@ -77,18 +92,27 @@ public:
|
|||
virtual void start();
|
||||
virtual int get_mix_rate() const;
|
||||
virtual SpeakerMode get_speaker_mode() const;
|
||||
#ifdef OSX_ENABLED
|
||||
virtual Array get_device_list();
|
||||
virtual String get_device();
|
||||
virtual void set_device(String device);
|
||||
#endif
|
||||
|
||||
virtual void lock();
|
||||
virtual void unlock();
|
||||
virtual void finish();
|
||||
|
||||
virtual Error capture_start();
|
||||
virtual Error capture_stop();
|
||||
|
||||
bool try_lock();
|
||||
void stop();
|
||||
|
||||
#ifdef OSX_ENABLED
|
||||
virtual Array get_device_list();
|
||||
virtual String get_device();
|
||||
virtual void set_device(String device);
|
||||
|
||||
virtual Array capture_get_device_list();
|
||||
virtual void capture_set_device(const String &p_name);
|
||||
virtual String capture_get_device();
|
||||
#endif
|
||||
|
||||
AudioDriverCoreAudio();
|
||||
~AudioDriverCoreAudio();
|
||||
};
|
||||
|
|
|
@ -64,18 +64,32 @@ void AudioDriverPulseAudio::pa_sink_info_cb(pa_context *c, const pa_sink_info *l
|
|||
ad->pa_status++;
|
||||
}
|
||||
|
||||
void AudioDriverPulseAudio::pa_source_info_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata) {
|
||||
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)userdata;
|
||||
|
||||
// If eol is set to a positive number, you're at the end of the list
|
||||
if (eol > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
ad->pa_rec_map = l->channel_map;
|
||||
ad->pa_status++;
|
||||
}
|
||||
|
||||
void AudioDriverPulseAudio::pa_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata) {
|
||||
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)userdata;
|
||||
|
||||
ad->capture_default_device = i->default_source_name;
|
||||
ad->default_device = i->default_sink_name;
|
||||
ad->pa_status++;
|
||||
}
|
||||
|
||||
void AudioDriverPulseAudio::detect_channels() {
|
||||
void AudioDriverPulseAudio::detect_channels(bool capture) {
|
||||
|
||||
pa_channel_map_init_stereo(&pa_map);
|
||||
pa_channel_map_init_stereo(capture ? &pa_rec_map : &pa_map);
|
||||
|
||||
if (device_name == "Default") {
|
||||
String device = capture ? capture_device_name : device_name;
|
||||
if (device == "Default") {
|
||||
// Get the default output device name
|
||||
pa_status = 0;
|
||||
pa_operation *pa_op = pa_context_get_server_info(pa_ctx, &AudioDriverPulseAudio::pa_server_info_cb, (void *)this);
|
||||
|
@ -93,16 +107,22 @@ void AudioDriverPulseAudio::detect_channels() {
|
|||
}
|
||||
}
|
||||
|
||||
char device[1024];
|
||||
if (device_name == "Default") {
|
||||
strcpy(device, default_device.utf8().get_data());
|
||||
char dev[1024];
|
||||
if (device == "Default") {
|
||||
strcpy(dev, capture ? capture_default_device.utf8().get_data() : default_device.utf8().get_data());
|
||||
} else {
|
||||
strcpy(device, device_name.utf8().get_data());
|
||||
strcpy(dev, device.utf8().get_data());
|
||||
}
|
||||
|
||||
// Now using the device name get the amount of channels
|
||||
pa_status = 0;
|
||||
pa_operation *pa_op = pa_context_get_sink_info_by_name(pa_ctx, device, &AudioDriverPulseAudio::pa_sink_info_cb, (void *)this);
|
||||
pa_operation *pa_op;
|
||||
if (capture) {
|
||||
pa_op = pa_context_get_source_info_by_name(pa_ctx, dev, &AudioDriverPulseAudio::pa_source_info_cb, (void *)this);
|
||||
} else {
|
||||
pa_op = pa_context_get_sink_info_by_name(pa_ctx, dev, &AudioDriverPulseAudio::pa_sink_info_cb, (void *)this);
|
||||
}
|
||||
|
||||
if (pa_op) {
|
||||
while (pa_status == 0) {
|
||||
int ret = pa_mainloop_iterate(pa_ml, 1, NULL);
|
||||
|
@ -113,7 +133,11 @@ void AudioDriverPulseAudio::detect_channels() {
|
|||
|
||||
pa_operation_unref(pa_op);
|
||||
} else {
|
||||
ERR_PRINT("pa_context_get_sink_info_by_name error");
|
||||
if (capture) {
|
||||
ERR_PRINT("pa_context_get_source_info_by_name error");
|
||||
} else {
|
||||
ERR_PRINT("pa_context_get_sink_info_by_name error");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,6 +219,10 @@ Error AudioDriverPulseAudio::init_device() {
|
|||
samples_in.resize(buffer_frames * channels);
|
||||
samples_out.resize(pa_buffer_size);
|
||||
|
||||
// Reset audio input to keep synchronisation.
|
||||
input_position = 0;
|
||||
input_size = 0;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
|
@ -287,74 +315,71 @@ float AudioDriverPulseAudio::get_latency() {
|
|||
void AudioDriverPulseAudio::thread_func(void *p_udata) {
|
||||
|
||||
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)p_udata;
|
||||
unsigned int write_ofs = 0;
|
||||
size_t avail_bytes = 0;
|
||||
|
||||
while (!ad->exit_thread) {
|
||||
|
||||
size_t read_bytes = 0;
|
||||
size_t written_bytes = 0;
|
||||
|
||||
if (avail_bytes == 0) {
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
|
||||
if (!ad->active) {
|
||||
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
|
||||
ad->samples_out.write[i] = 0;
|
||||
}
|
||||
} else {
|
||||
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
|
||||
|
||||
if (ad->channels == ad->pa_map.channels) {
|
||||
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
|
||||
ad->samples_out.write[i] = ad->samples_in[i] >> 16;
|
||||
}
|
||||
} else {
|
||||
// Uneven amount of channels
|
||||
unsigned int in_idx = 0;
|
||||
unsigned int out_idx = 0;
|
||||
|
||||
for (unsigned int i = 0; i < ad->buffer_frames; i++) {
|
||||
for (unsigned int j = 0; j < ad->pa_map.channels - 1; j++) {
|
||||
ad->samples_out.write[out_idx++] = ad->samples_in[in_idx++] >> 16;
|
||||
}
|
||||
uint32_t l = ad->samples_in[in_idx++];
|
||||
uint32_t r = ad->samples_in[in_idx++];
|
||||
ad->samples_out.write[out_idx++] = (l >> 1 + r >> 1) >> 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
avail_bytes = ad->pa_buffer_size * sizeof(int16_t);
|
||||
write_ofs = 0;
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
}
|
||||
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
|
||||
if (!ad->active) {
|
||||
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
|
||||
ad->samples_out.write[i] = 0;
|
||||
}
|
||||
|
||||
} else {
|
||||
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
|
||||
|
||||
if (ad->channels == ad->pa_map.channels) {
|
||||
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
|
||||
ad->samples_out.write[i] = ad->samples_in[i] >> 16;
|
||||
}
|
||||
} else {
|
||||
// Uneven amount of channels
|
||||
unsigned int in_idx = 0;
|
||||
unsigned int out_idx = 0;
|
||||
|
||||
for (unsigned int i = 0; i < ad->buffer_frames; i++) {
|
||||
for (unsigned int j = 0; j < ad->pa_map.channels - 1; j++) {
|
||||
ad->samples_out.write[out_idx++] = ad->samples_in[in_idx++] >> 16;
|
||||
}
|
||||
uint32_t l = ad->samples_in[in_idx++];
|
||||
uint32_t r = ad->samples_in[in_idx++];
|
||||
ad->samples_out.write[out_idx++] = (l >> 1 + r >> 1) >> 16;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int error_code;
|
||||
int byte_size = ad->pa_buffer_size * sizeof(int16_t);
|
||||
int ret;
|
||||
do {
|
||||
ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL);
|
||||
} while (ret > 0);
|
||||
|
||||
if (pa_stream_get_state(ad->pa_str) == PA_STREAM_READY) {
|
||||
const void *ptr = ad->samples_out.ptr();
|
||||
while (byte_size > 0) {
|
||||
size_t bytes = pa_stream_writable_size(ad->pa_str);
|
||||
if (bytes > 0) {
|
||||
if (bytes > byte_size) {
|
||||
bytes = byte_size;
|
||||
}
|
||||
|
||||
ret = pa_stream_write(ad->pa_str, ptr, bytes, NULL, 0LL, PA_SEEK_RELATIVE);
|
||||
if (ret >= 0) {
|
||||
byte_size -= bytes;
|
||||
ptr = (const char *)ptr + bytes;
|
||||
}
|
||||
if (avail_bytes > 0 && pa_stream_get_state(ad->pa_str) == PA_STREAM_READY) {
|
||||
size_t bytes = pa_stream_writable_size(ad->pa_str);
|
||||
if (bytes > 0) {
|
||||
size_t bytes_to_write = MIN(bytes, avail_bytes);
|
||||
const void *ptr = ad->samples_out.ptr();
|
||||
ret = pa_stream_write(ad->pa_str, ptr + write_ofs, bytes_to_write, NULL, 0LL, PA_SEEK_RELATIVE);
|
||||
if (ret != 0) {
|
||||
ERR_PRINT("pa_stream_write error");
|
||||
} else {
|
||||
ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL);
|
||||
if (ret == 0) {
|
||||
// If pa_mainloop_iterate returns 0 sleep for 1 msec to wait
|
||||
// for the stream to be able to process more bytes
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
|
||||
OS::get_singleton()->delay_usec(1000);
|
||||
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
}
|
||||
avail_bytes -= bytes_to_write;
|
||||
write_ofs += bytes_to_write;
|
||||
written_bytes += bytes_to_write;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -379,8 +404,64 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) {
|
|||
}
|
||||
}
|
||||
|
||||
if (ad->pa_rec_str && pa_stream_get_state(ad->pa_rec_str) == PA_STREAM_READY) {
|
||||
size_t bytes = pa_stream_readable_size(ad->pa_rec_str);
|
||||
if (bytes > 0) {
|
||||
const void *ptr = NULL;
|
||||
size_t maxbytes = ad->input_buffer.size() * sizeof(int16_t);
|
||||
|
||||
bytes = MIN(bytes, maxbytes);
|
||||
ret = pa_stream_peek(ad->pa_rec_str, &ptr, &bytes);
|
||||
if (ret != 0) {
|
||||
ERR_PRINT("pa_stream_peek error");
|
||||
} else {
|
||||
int16_t *srcptr = (int16_t *)ptr;
|
||||
for (size_t i = bytes >> 1; i > 0; i--) {
|
||||
int32_t sample = int32_t(*srcptr++) << 16;
|
||||
ad->input_buffer_write(sample);
|
||||
|
||||
if (ad->pa_rec_map.channels == 1) {
|
||||
// In case input device is single channel convert it to Stereo
|
||||
ad->input_buffer_write(sample);
|
||||
}
|
||||
}
|
||||
|
||||
read_bytes += bytes;
|
||||
ret = pa_stream_drop(ad->pa_rec_str);
|
||||
if (ret != 0) {
|
||||
ERR_PRINT("pa_stream_drop error");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// User selected a new device, finish the current one so we'll init the new device
|
||||
if (ad->capture_device_name != ad->capture_new_device) {
|
||||
ad->capture_device_name = ad->capture_new_device;
|
||||
ad->capture_finish_device();
|
||||
|
||||
Error err = ad->capture_init_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("PulseAudio: capture_init_device error");
|
||||
ad->capture_device_name = "Default";
|
||||
ad->capture_new_device = "Default";
|
||||
|
||||
err = ad->capture_init_device();
|
||||
if (err != OK) {
|
||||
ad->active = false;
|
||||
ad->exit_thread = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
|
||||
// Let the thread rest a while if we haven't read or write anything
|
||||
if (written_bytes == 0 && read_bytes == 0) {
|
||||
OS::get_singleton()->delay_usec(1000);
|
||||
}
|
||||
}
|
||||
|
||||
ad->thread_exited = true;
|
||||
|
@ -510,11 +591,165 @@ void AudioDriverPulseAudio::finish() {
|
|||
thread = NULL;
|
||||
}
|
||||
|
||||
Error AudioDriverPulseAudio::capture_init_device() {
|
||||
|
||||
// If there is a specified device check that it is really present
|
||||
if (capture_device_name != "Default") {
|
||||
Array list = capture_get_device_list();
|
||||
if (list.find(capture_device_name) == -1) {
|
||||
capture_device_name = "Default";
|
||||
capture_new_device = "Default";
|
||||
}
|
||||
}
|
||||
|
||||
detect_channels(true);
|
||||
switch (pa_rec_map.channels) {
|
||||
case 1: // Mono
|
||||
case 2: // Stereo
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_PRINTS("PulseAudio: Unsupported number of input channels: " + itos(pa_rec_map.channels));
|
||||
pa_channel_map_init_stereo(&pa_rec_map);
|
||||
break;
|
||||
}
|
||||
|
||||
if (OS::get_singleton()->is_stdout_verbose()) {
|
||||
print_line("PulseAudio: detected " + itos(pa_rec_map.channels) + " input channels");
|
||||
}
|
||||
|
||||
pa_sample_spec spec;
|
||||
|
||||
spec.format = PA_SAMPLE_S16LE;
|
||||
spec.channels = pa_rec_map.channels;
|
||||
spec.rate = mix_rate;
|
||||
|
||||
int latency = 30;
|
||||
input_buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
|
||||
int buffer_size = input_buffer_frames * spec.channels;
|
||||
|
||||
pa_buffer_attr attr;
|
||||
attr.fragsize = buffer_size * sizeof(int16_t);
|
||||
|
||||
pa_rec_str = pa_stream_new(pa_ctx, "Record", &spec, &pa_rec_map);
|
||||
if (pa_rec_str == NULL) {
|
||||
ERR_PRINTS("PulseAudio: pa_stream_new error: " + String(pa_strerror(pa_context_errno(pa_ctx))));
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
|
||||
const char *dev = capture_device_name == "Default" ? NULL : capture_device_name.utf8().get_data();
|
||||
pa_stream_flags flags = pa_stream_flags(PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE);
|
||||
int error_code = pa_stream_connect_record(pa_rec_str, dev, &attr, flags);
|
||||
if (error_code < 0) {
|
||||
ERR_PRINTS("PulseAudio: pa_stream_connect_record error: " + String(pa_strerror(error_code)));
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
|
||||
input_buffer.resize(input_buffer_frames * 8);
|
||||
input_position = 0;
|
||||
input_size = 0;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
void AudioDriverPulseAudio::capture_finish_device() {
|
||||
|
||||
if (pa_rec_str) {
|
||||
int ret = pa_stream_disconnect(pa_rec_str);
|
||||
if (ret != 0) {
|
||||
ERR_PRINTS("PulseAudio: pa_stream_disconnect error: " + String(pa_strerror(ret)));
|
||||
}
|
||||
pa_stream_unref(pa_rec_str);
|
||||
pa_rec_str = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
Error AudioDriverPulseAudio::capture_start() {
|
||||
|
||||
lock();
|
||||
Error err = capture_init_device();
|
||||
unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
Error AudioDriverPulseAudio::capture_stop() {
|
||||
lock();
|
||||
capture_finish_device();
|
||||
unlock();
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
void AudioDriverPulseAudio::capture_set_device(const String &p_name) {
|
||||
|
||||
lock();
|
||||
capture_new_device = p_name;
|
||||
unlock();
|
||||
}
|
||||
|
||||
void AudioDriverPulseAudio::pa_sourcelist_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata) {
|
||||
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)userdata;
|
||||
|
||||
// If eol is set to a positive number, you're at the end of the list
|
||||
if (eol > 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (l->monitor_of_sink == PA_INVALID_INDEX) {
|
||||
ad->pa_rec_devices.push_back(l->name);
|
||||
}
|
||||
|
||||
ad->pa_status++;
|
||||
}
|
||||
|
||||
Array AudioDriverPulseAudio::capture_get_device_list() {
|
||||
|
||||
pa_rec_devices.clear();
|
||||
pa_rec_devices.push_back("Default");
|
||||
|
||||
if (pa_ctx == NULL) {
|
||||
return pa_rec_devices;
|
||||
}
|
||||
|
||||
lock();
|
||||
|
||||
// Get the device list
|
||||
pa_status = 0;
|
||||
pa_operation *pa_op = pa_context_get_source_info_list(pa_ctx, pa_sourcelist_cb, (void *)this);
|
||||
if (pa_op) {
|
||||
while (pa_status == 0) {
|
||||
int ret = pa_mainloop_iterate(pa_ml, 1, NULL);
|
||||
if (ret < 0) {
|
||||
ERR_PRINT("pa_mainloop_iterate error");
|
||||
}
|
||||
}
|
||||
|
||||
pa_operation_unref(pa_op);
|
||||
} else {
|
||||
ERR_PRINT("pa_context_get_server_info error");
|
||||
}
|
||||
|
||||
unlock();
|
||||
|
||||
return pa_rec_devices;
|
||||
}
|
||||
|
||||
String AudioDriverPulseAudio::capture_get_device() {
|
||||
|
||||
lock();
|
||||
String name = capture_device_name;
|
||||
unlock();
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
AudioDriverPulseAudio::AudioDriverPulseAudio() {
|
||||
|
||||
pa_ml = NULL;
|
||||
pa_ctx = NULL;
|
||||
pa_str = NULL;
|
||||
pa_rec_str = NULL;
|
||||
|
||||
mutex = NULL;
|
||||
thread = NULL;
|
||||
|
@ -528,6 +763,7 @@ AudioDriverPulseAudio::AudioDriverPulseAudio() {
|
|||
|
||||
mix_rate = 0;
|
||||
buffer_frames = 0;
|
||||
input_buffer_frames = 0;
|
||||
pa_buffer_size = 0;
|
||||
channels = 0;
|
||||
pa_ready = 0;
|
||||
|
|
|
@ -47,22 +47,30 @@ class AudioDriverPulseAudio : public AudioDriver {
|
|||
pa_mainloop *pa_ml;
|
||||
pa_context *pa_ctx;
|
||||
pa_stream *pa_str;
|
||||
pa_stream *pa_rec_str;
|
||||
pa_channel_map pa_map;
|
||||
pa_channel_map pa_rec_map;
|
||||
|
||||
String device_name;
|
||||
String new_device;
|
||||
String default_device;
|
||||
|
||||
String capture_device_name;
|
||||
String capture_new_device;
|
||||
String capture_default_device;
|
||||
|
||||
Vector<int32_t> samples_in;
|
||||
Vector<int16_t> samples_out;
|
||||
|
||||
unsigned int mix_rate;
|
||||
unsigned int buffer_frames;
|
||||
unsigned int input_buffer_frames;
|
||||
unsigned int pa_buffer_size;
|
||||
int channels;
|
||||
int pa_ready;
|
||||
int pa_status;
|
||||
Array pa_devices;
|
||||
Array pa_rec_devices;
|
||||
|
||||
bool active;
|
||||
bool thread_exited;
|
||||
|
@ -72,13 +80,18 @@ class AudioDriverPulseAudio : public AudioDriver {
|
|||
|
||||
static void pa_state_cb(pa_context *c, void *userdata);
|
||||
static void pa_sink_info_cb(pa_context *c, const pa_sink_info *l, int eol, void *userdata);
|
||||
static void pa_source_info_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata);
|
||||
static void pa_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata);
|
||||
static void pa_sinklist_cb(pa_context *c, const pa_sink_info *l, int eol, void *userdata);
|
||||
static void pa_sourcelist_cb(pa_context *c, const pa_source_info *l, int eol, void *userdata);
|
||||
|
||||
Error init_device();
|
||||
void finish_device();
|
||||
|
||||
void detect_channels();
|
||||
Error capture_init_device();
|
||||
void capture_finish_device();
|
||||
|
||||
void detect_channels(bool capture = false);
|
||||
|
||||
static void thread_func(void *p_udata);
|
||||
|
||||
|
@ -91,15 +104,24 @@ public:
|
|||
virtual void start();
|
||||
virtual int get_mix_rate() const;
|
||||
virtual SpeakerMode get_speaker_mode() const;
|
||||
|
||||
virtual Array get_device_list();
|
||||
virtual String get_device();
|
||||
virtual void set_device(String device);
|
||||
|
||||
virtual Array capture_get_device_list();
|
||||
virtual void capture_set_device(const String &p_name);
|
||||
virtual String capture_get_device();
|
||||
|
||||
virtual void lock();
|
||||
virtual void unlock();
|
||||
virtual void finish();
|
||||
|
||||
virtual float get_latency();
|
||||
|
||||
virtual Error capture_start();
|
||||
virtual Error capture_stop();
|
||||
|
||||
AudioDriverPulseAudio();
|
||||
~AudioDriverPulseAudio();
|
||||
};
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
|
||||
#include "audio_driver_wasapi.h"
|
||||
|
||||
#include <Functiondiscoverykeys_devpkey.h>
|
||||
|
||||
#include "os/os.h"
|
||||
#include "project_settings.h"
|
||||
|
||||
|
@ -52,8 +54,22 @@ const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
|
|||
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
|
||||
const IID IID_IAudioClient = __uuidof(IAudioClient);
|
||||
const IID IID_IAudioRenderClient = __uuidof(IAudioRenderClient);
|
||||
const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
|
||||
|
||||
static bool default_device_changed = false;
|
||||
#define SAFE_RELEASE(memory) \
|
||||
if ((memory) != NULL) { \
|
||||
(memory)->Release(); \
|
||||
(memory) = NULL; \
|
||||
}
|
||||
|
||||
#define REFTIMES_PER_SEC 10000000
|
||||
#define REFTIMES_PER_MILLISEC 10000
|
||||
|
||||
#define CAPTURE_BUFFER_CHANNELS 2
|
||||
|
||||
static StringName capture_device_id;
|
||||
static bool default_render_device_changed = false;
|
||||
static bool default_capture_device_changed = false;
|
||||
|
||||
class CMMNotificationClient : public IMMNotificationClient {
|
||||
LONG _cRef;
|
||||
|
@ -109,8 +125,13 @@ public:
|
|||
}
|
||||
|
||||
HRESULT STDMETHODCALLTYPE OnDefaultDeviceChanged(EDataFlow flow, ERole role, LPCWSTR pwstrDeviceId) {
|
||||
if (flow == eRender && role == eConsole) {
|
||||
default_device_changed = true;
|
||||
if (role == eConsole) {
|
||||
if (flow == eRender) {
|
||||
default_render_device_changed = true;
|
||||
} else if (flow == eCapture) {
|
||||
default_capture_device_changed = true;
|
||||
capture_device_id = String(pwstrDeviceId);
|
||||
}
|
||||
}
|
||||
|
||||
return S_OK;
|
||||
|
@ -123,7 +144,7 @@ public:
|
|||
|
||||
static CMMNotificationClient notif_client;
|
||||
|
||||
Error AudioDriverWASAPI::init_device(bool reinit) {
|
||||
Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit) {
|
||||
|
||||
WAVEFORMATEX *pwfex;
|
||||
IMMDeviceEnumerator *enumerator = NULL;
|
||||
|
@ -134,12 +155,12 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
|
|||
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
if (device_name == "Default") {
|
||||
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
|
||||
if (p_device->device_name == "Default") {
|
||||
hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
|
||||
} else {
|
||||
IMMDeviceCollection *devices = NULL;
|
||||
|
||||
hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices);
|
||||
hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
LPWSTR strId = NULL;
|
||||
|
@ -165,7 +186,7 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
|
|||
hr = props->GetValue(PKEY_Device_FriendlyName, &propvar);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
|
||||
if (device_name == String(propvar.pwszVal)) {
|
||||
if (p_device->device_name == String(propvar.pwszVal)) {
|
||||
hr = device->GetId(&strId);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
|
||||
|
@ -186,9 +207,10 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
|
|||
}
|
||||
|
||||
if (device == NULL) {
|
||||
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device);
|
||||
hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
|
||||
}
|
||||
}
|
||||
|
||||
if (reinit) {
|
||||
// In case we're trying to re-initialize the device prevent throwing this error on the console,
|
||||
// otherwise if there is currently no device available this will spam the console.
|
||||
|
@ -200,11 +222,15 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
|
|||
}
|
||||
|
||||
hr = enumerator->RegisterEndpointNotificationCallback(¬if_client);
|
||||
SAFE_RELEASE(enumerator)
|
||||
|
||||
if (hr != S_OK) {
|
||||
ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error");
|
||||
}
|
||||
|
||||
hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&audio_client);
|
||||
hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&p_device->audio_client);
|
||||
SAFE_RELEASE(device)
|
||||
|
||||
if (reinit) {
|
||||
if (hr != S_OK) {
|
||||
return ERR_CANT_OPEN;
|
||||
|
@ -213,75 +239,89 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
|
|||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
}
|
||||
|
||||
hr = audio_client->GetMixFormat(&pwfex);
|
||||
hr = p_device->audio_client->GetMixFormat(&pwfex);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
// Since we're using WASAPI Shared Mode we can't control any of these, we just tag along
|
||||
wasapi_channels = pwfex->nChannels;
|
||||
format_tag = pwfex->wFormatTag;
|
||||
bits_per_sample = pwfex->wBitsPerSample;
|
||||
p_device->channels = pwfex->nChannels;
|
||||
p_device->format_tag = pwfex->wFormatTag;
|
||||
p_device->bits_per_sample = pwfex->wBitsPerSample;
|
||||
p_device->frame_size = (p_device->bits_per_sample / 8) * p_device->channels;
|
||||
|
||||
switch (wasapi_channels) {
|
||||
case 2: // Stereo
|
||||
case 4: // Surround 3.1
|
||||
case 6: // Surround 5.1
|
||||
case 8: // Surround 7.1
|
||||
channels = wasapi_channels;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(wasapi_channels));
|
||||
channels = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
if (format_tag == WAVE_FORMAT_EXTENSIBLE) {
|
||||
if (p_device->format_tag == WAVE_FORMAT_EXTENSIBLE) {
|
||||
WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex;
|
||||
|
||||
if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
|
||||
format_tag = WAVE_FORMAT_PCM;
|
||||
p_device->format_tag = WAVE_FORMAT_PCM;
|
||||
} else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
|
||||
format_tag = WAVE_FORMAT_IEEE_FLOAT;
|
||||
p_device->format_tag = WAVE_FORMAT_IEEE_FLOAT;
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: Format not supported");
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
} else {
|
||||
if (format_tag != WAVE_FORMAT_PCM && format_tag != WAVE_FORMAT_IEEE_FLOAT) {
|
||||
if (p_device->format_tag != WAVE_FORMAT_PCM && p_device->format_tag != WAVE_FORMAT_IEEE_FLOAT) {
|
||||
ERR_PRINT("WASAPI: Format not supported");
|
||||
ERR_FAIL_V(ERR_CANT_OPEN);
|
||||
}
|
||||
}
|
||||
|
||||
DWORD streamflags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
|
||||
DWORD streamflags = 0;
|
||||
if (mix_rate != pwfex->nSamplesPerSec) {
|
||||
streamflags |= AUDCLNT_STREAMFLAGS_RATEADJUST;
|
||||
pwfex->nSamplesPerSec = mix_rate;
|
||||
pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8);
|
||||
}
|
||||
|
||||
hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, 0, 0, pwfex, NULL);
|
||||
hr = p_device->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, p_capture ? REFTIMES_PER_SEC : 0, 0, pwfex, NULL);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
event = CreateEvent(NULL, FALSE, FALSE, NULL);
|
||||
ERR_FAIL_COND_V(event == NULL, ERR_CANT_OPEN);
|
||||
|
||||
hr = audio_client->SetEventHandle(event);
|
||||
if (p_capture) {
|
||||
hr = p_device->audio_client->GetService(IID_IAudioCaptureClient, (void **)&p_device->capture_client);
|
||||
} else {
|
||||
hr = p_device->audio_client->GetService(IID_IAudioRenderClient, (void **)&p_device->render_client);
|
||||
}
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
hr = audio_client->GetService(IID_IAudioRenderClient, (void **)&render_client);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
// Free memory
|
||||
CoTaskMemFree(pwfex);
|
||||
SAFE_RELEASE(device)
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::init_render_device(bool reinit) {
|
||||
|
||||
Error err = audio_device_init(&audio_output, false, reinit);
|
||||
if (err != OK)
|
||||
return err;
|
||||
|
||||
switch (audio_output.channels) {
|
||||
case 2: // Stereo
|
||||
case 4: // Surround 3.1
|
||||
case 6: // Surround 5.1
|
||||
case 8: // Surround 7.1
|
||||
channels = audio_output.channels;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(audio_output.channels));
|
||||
channels = 2;
|
||||
break;
|
||||
}
|
||||
|
||||
UINT32 max_frames;
|
||||
hr = audio_client->GetBufferSize(&max_frames);
|
||||
HRESULT hr = audio_output.audio_client->GetBufferSize(&max_frames);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
// Due to WASAPI Shared Mode we have no control of the buffer size
|
||||
buffer_frames = max_frames;
|
||||
|
||||
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
|
||||
buffer_size = buffer_frames * channels;
|
||||
samples_in.resize(buffer_size);
|
||||
samples_in.resize(buffer_frames * channels);
|
||||
|
||||
input_position = 0;
|
||||
input_size = 0;
|
||||
|
||||
if (OS::get_singleton()->is_stdout_verbose()) {
|
||||
print_line("WASAPI: detected " + itos(channels) + " channels");
|
||||
|
@ -291,41 +331,61 @@ Error AudioDriverWASAPI::init_device(bool reinit) {
|
|||
return OK;
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::finish_device() {
|
||||
Error AudioDriverWASAPI::init_capture_device(bool reinit) {
|
||||
|
||||
if (audio_client) {
|
||||
if (active) {
|
||||
audio_client->Stop();
|
||||
active = false;
|
||||
}
|
||||
Error err = audio_device_init(&audio_input, true, reinit);
|
||||
if (err != OK)
|
||||
return err;
|
||||
|
||||
audio_client->Release();
|
||||
audio_client = NULL;
|
||||
}
|
||||
// Get the max frames
|
||||
UINT32 max_frames;
|
||||
HRESULT hr = audio_input.audio_client->GetBufferSize(&max_frames);
|
||||
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
|
||||
|
||||
if (render_client) {
|
||||
render_client->Release();
|
||||
render_client = NULL;
|
||||
}
|
||||
|
||||
if (audio_client) {
|
||||
audio_client->Release();
|
||||
audio_client = NULL;
|
||||
}
|
||||
// Set the buffer size
|
||||
input_buffer.resize(max_frames * CAPTURE_BUFFER_CHANNELS);
|
||||
input_position = 0;
|
||||
input_size = 0;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::audio_device_finish(AudioDeviceWASAPI *p_device) {
|
||||
|
||||
if (p_device->active) {
|
||||
if (p_device->audio_client) {
|
||||
p_device->audio_client->Stop();
|
||||
}
|
||||
|
||||
p_device->active = false;
|
||||
}
|
||||
|
||||
SAFE_RELEASE(p_device->audio_client)
|
||||
SAFE_RELEASE(p_device->render_client)
|
||||
SAFE_RELEASE(p_device->capture_client)
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::finish_render_device() {
|
||||
|
||||
return audio_device_finish(&audio_output);
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::finish_capture_device() {
|
||||
|
||||
return audio_device_finish(&audio_input);
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::init() {
|
||||
|
||||
mix_rate = GLOBAL_DEF_RST("audio/mix_rate", DEFAULT_MIX_RATE);
|
||||
|
||||
Error err = init_device();
|
||||
Error err = init_render_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: init_device error");
|
||||
ERR_PRINT("WASAPI: init_render_device error");
|
||||
}
|
||||
|
||||
active = false;
|
||||
exit_thread = false;
|
||||
thread_exited = false;
|
||||
|
||||
|
@ -345,7 +405,7 @@ AudioDriver::SpeakerMode AudioDriverWASAPI::get_speaker_mode() const {
|
|||
return get_speaker_mode_by_total_channels(channels);
|
||||
}
|
||||
|
||||
Array AudioDriverWASAPI::get_device_list() {
|
||||
Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) {
|
||||
|
||||
Array list;
|
||||
IMMDeviceCollection *devices = NULL;
|
||||
|
@ -358,7 +418,7 @@ Array AudioDriverWASAPI::get_device_list() {
|
|||
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
|
||||
ERR_FAIL_COND_V(hr != S_OK, Array());
|
||||
|
||||
hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices);
|
||||
hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
|
||||
ERR_FAIL_COND_V(hr != S_OK, Array());
|
||||
|
||||
UINT count = 0;
|
||||
|
@ -393,21 +453,63 @@ Array AudioDriverWASAPI::get_device_list() {
|
|||
return list;
|
||||
}
|
||||
|
||||
Array AudioDriverWASAPI::get_device_list() {
|
||||
|
||||
return audio_device_get_list(false);
|
||||
}
|
||||
|
||||
String AudioDriverWASAPI::get_device() {
|
||||
|
||||
return device_name;
|
||||
lock();
|
||||
String name = audio_output.device_name;
|
||||
unlock();
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
void AudioDriverWASAPI::set_device(String device) {
|
||||
|
||||
lock();
|
||||
new_device = device;
|
||||
audio_output.new_device = device;
|
||||
unlock();
|
||||
}
|
||||
|
||||
void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample) {
|
||||
if (ad->format_tag == WAVE_FORMAT_PCM) {
|
||||
switch (ad->bits_per_sample) {
|
||||
int32_t AudioDriverWASAPI::read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i) {
|
||||
if (format_tag == WAVE_FORMAT_PCM) {
|
||||
int32_t sample = 0;
|
||||
switch (bits_per_sample) {
|
||||
case 8:
|
||||
sample = int32_t(((int8_t *)buffer)[i]) << 24;
|
||||
break;
|
||||
|
||||
case 16:
|
||||
sample = int32_t(((int16_t *)buffer)[i]) << 16;
|
||||
break;
|
||||
|
||||
case 24:
|
||||
sample |= int32_t(((int8_t *)buffer)[i * 3 + 2]) << 24;
|
||||
sample |= int32_t(((int8_t *)buffer)[i * 3 + 1]) << 16;
|
||||
sample |= int32_t(((int8_t *)buffer)[i * 3 + 0]) << 8;
|
||||
break;
|
||||
|
||||
case 32:
|
||||
sample = ((int32_t *)buffer)[i];
|
||||
break;
|
||||
}
|
||||
|
||||
return sample;
|
||||
} else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) {
|
||||
return int32_t(((float *)buffer)[i] * 32768.0) << 16;
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: Unknown format tag");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioDriverWASAPI::write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample) {
|
||||
if (format_tag == WAVE_FORMAT_PCM) {
|
||||
switch (bits_per_sample) {
|
||||
case 8:
|
||||
((int8_t *)buffer)[i] = sample >> 24;
|
||||
break;
|
||||
|
@ -426,83 +528,99 @@ void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i,
|
|||
((int32_t *)buffer)[i] = sample;
|
||||
break;
|
||||
}
|
||||
} else if (ad->format_tag == WAVE_FORMAT_IEEE_FLOAT) {
|
||||
} else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) {
|
||||
((float *)buffer)[i] = (sample >> 16) / 32768.f;
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: Unknown format tag");
|
||||
ad->exit_thread = true;
|
||||
}
|
||||
}
|
||||
|
||||
void AudioDriverWASAPI::thread_func(void *p_udata) {
|
||||
|
||||
AudioDriverWASAPI *ad = (AudioDriverWASAPI *)p_udata;
|
||||
uint32_t avail_frames = 0;
|
||||
uint32_t write_ofs = 0;
|
||||
|
||||
while (!ad->exit_thread) {
|
||||
|
||||
uint32_t read_frames = 0;
|
||||
uint32_t written_frames = 0;
|
||||
|
||||
if (avail_frames == 0) {
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
|
||||
if (ad->audio_output.active) {
|
||||
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
|
||||
} else {
|
||||
for (unsigned int i = 0; i < ad->samples_in.size(); i++) {
|
||||
ad->samples_in.write[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
avail_frames = ad->buffer_frames;
|
||||
write_ofs = 0;
|
||||
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
}
|
||||
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
|
||||
if (ad->active) {
|
||||
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
|
||||
} else {
|
||||
for (unsigned int i = 0; i < ad->buffer_size; i++) {
|
||||
ad->samples_in.write[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
|
||||
unsigned int left_frames = ad->buffer_frames;
|
||||
unsigned int buffer_idx = 0;
|
||||
while (left_frames > 0 && ad->audio_client) {
|
||||
WaitForSingleObject(ad->event, 1000);
|
||||
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
if (avail_frames > 0 && ad->audio_output.audio_client) {
|
||||
|
||||
UINT32 cur_frames;
|
||||
bool invalidated = false;
|
||||
HRESULT hr = ad->audio_client->GetCurrentPadding(&cur_frames);
|
||||
HRESULT hr = ad->audio_output.audio_client->GetCurrentPadding(&cur_frames);
|
||||
if (hr == S_OK) {
|
||||
|
||||
// Check how much frames are available on the WASAPI buffer
|
||||
UINT32 avail_frames = ad->buffer_frames - cur_frames;
|
||||
UINT32 write_frames = avail_frames > left_frames ? left_frames : avail_frames;
|
||||
UINT32 write_frames = MIN(ad->buffer_frames - cur_frames, avail_frames);
|
||||
if (write_frames > 0) {
|
||||
BYTE *buffer = NULL;
|
||||
hr = ad->audio_output.render_client->GetBuffer(write_frames, &buffer);
|
||||
if (hr == S_OK) {
|
||||
|
||||
BYTE *buffer = NULL;
|
||||
hr = ad->render_client->GetBuffer(write_frames, &buffer);
|
||||
if (hr == S_OK) {
|
||||
// We're using WASAPI Shared Mode so we must convert the buffer
|
||||
|
||||
if (ad->channels == ad->wasapi_channels) {
|
||||
for (unsigned int i = 0; i < write_frames * ad->channels; i++) {
|
||||
ad->write_sample(ad, buffer, i, ad->samples_in[buffer_idx++]);
|
||||
}
|
||||
} else {
|
||||
for (unsigned int i = 0; i < write_frames; i++) {
|
||||
for (unsigned int j = 0; j < MIN(ad->channels, ad->wasapi_channels); j++) {
|
||||
ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, ad->samples_in[buffer_idx++]);
|
||||
// We're using WASAPI Shared Mode so we must convert the buffer
|
||||
if (ad->channels == ad->audio_output.channels) {
|
||||
for (unsigned int i = 0; i < write_frames * ad->channels; i++) {
|
||||
ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i, ad->samples_in.write[write_ofs++]);
|
||||
}
|
||||
if (ad->wasapi_channels > ad->channels) {
|
||||
for (unsigned int j = ad->channels; j < ad->wasapi_channels; j++) {
|
||||
ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, 0);
|
||||
} else {
|
||||
for (unsigned int i = 0; i < write_frames; i++) {
|
||||
for (unsigned int j = 0; j < MIN(ad->channels, ad->audio_output.channels); j++) {
|
||||
ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, ad->samples_in.write[write_ofs++]);
|
||||
}
|
||||
if (ad->audio_output.channels > ad->channels) {
|
||||
for (unsigned int j = ad->channels; j < ad->audio_output.channels; j++) {
|
||||
ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hr = ad->render_client->ReleaseBuffer(write_frames, 0);
|
||||
if (hr != S_OK) {
|
||||
ERR_PRINT("WASAPI: Release buffer error");
|
||||
}
|
||||
hr = ad->audio_output.render_client->ReleaseBuffer(write_frames, 0);
|
||||
if (hr != S_OK) {
|
||||
ERR_PRINT("WASAPI: Release buffer error");
|
||||
}
|
||||
|
||||
left_frames -= write_frames;
|
||||
} else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
|
||||
invalidated = true;
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: Get buffer error");
|
||||
ad->exit_thread = true;
|
||||
avail_frames -= write_frames;
|
||||
written_frames += write_frames;
|
||||
} else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
|
||||
// Device is not valid anymore, reopen it
|
||||
|
||||
Error err = ad->finish_render_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_render_device error");
|
||||
} else {
|
||||
// We reopened the device and samples_in may have resized, so invalidate the current avail_frames
|
||||
avail_frames = 0;
|
||||
}
|
||||
} else {
|
||||
ERR_PRINT("WASAPI: Get buffer error");
|
||||
ad->exit_thread = true;
|
||||
}
|
||||
}
|
||||
} else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
|
||||
invalidated = true;
|
||||
|
@ -514,47 +632,117 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
|
|||
// Device is not valid anymore
|
||||
WARN_PRINT("WASAPI: Current device invalidated, closing device");
|
||||
|
||||
Error err = ad->finish_device();
|
||||
Error err = ad->finish_render_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_device error");
|
||||
ERR_PRINT("WASAPI: finish_render_device error");
|
||||
}
|
||||
}
|
||||
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
}
|
||||
|
||||
ad->lock();
|
||||
ad->start_counting_ticks();
|
||||
|
||||
// If we're using the Default device and it changed finish it so we'll re-init the device
|
||||
if (ad->device_name == "Default" && default_device_changed) {
|
||||
Error err = ad->finish_device();
|
||||
if (ad->audio_output.device_name == "Default" && default_render_device_changed) {
|
||||
Error err = ad->finish_render_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_device error");
|
||||
ERR_PRINT("WASAPI: finish_render_device error");
|
||||
}
|
||||
|
||||
default_device_changed = false;
|
||||
default_render_device_changed = false;
|
||||
}
|
||||
|
||||
// User selected a new device, finish the current one so we'll init the new device
|
||||
if (ad->device_name != ad->new_device) {
|
||||
ad->device_name = ad->new_device;
|
||||
Error err = ad->finish_device();
|
||||
if (ad->audio_output.device_name != ad->audio_output.new_device) {
|
||||
ad->audio_output.device_name = ad->audio_output.new_device;
|
||||
Error err = ad->finish_render_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_device error");
|
||||
ERR_PRINT("WASAPI: finish_render_device error");
|
||||
}
|
||||
}
|
||||
|
||||
if (!ad->audio_client) {
|
||||
Error err = ad->init_device(true);
|
||||
if (!ad->audio_output.audio_client) {
|
||||
Error err = ad->init_render_device(true);
|
||||
if (err == OK) {
|
||||
ad->start();
|
||||
}
|
||||
}
|
||||
|
||||
if (ad->audio_input.active) {
|
||||
UINT32 packet_length = 0;
|
||||
BYTE *data;
|
||||
UINT32 num_frames_available;
|
||||
DWORD flags;
|
||||
|
||||
HRESULT hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length);
|
||||
if (hr == S_OK) {
|
||||
while (packet_length != 0) {
|
||||
hr = ad->audio_input.capture_client->GetBuffer(&data, &num_frames_available, &flags, NULL, NULL);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
|
||||
// fixme: Only works for floating point atm
|
||||
for (int j = 0; j < num_frames_available; j++) {
|
||||
int32_t l, r;
|
||||
|
||||
if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
|
||||
l = r = 0;
|
||||
} else {
|
||||
if (ad->audio_input.channels == 2) {
|
||||
l = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j * 2);
|
||||
r = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j * 2 + 1);
|
||||
} else if (ad->audio_input.channels == 1) {
|
||||
l = r = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j);
|
||||
} else {
|
||||
l = r = 0;
|
||||
ERR_PRINT("WASAPI: unsupported channel count in microphone!");
|
||||
}
|
||||
}
|
||||
|
||||
ad->input_buffer_write(l);
|
||||
ad->input_buffer_write(r);
|
||||
}
|
||||
|
||||
read_frames += num_frames_available;
|
||||
|
||||
hr = ad->audio_input.capture_client->ReleaseBuffer(num_frames_available);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
|
||||
hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length);
|
||||
ERR_BREAK(hr != S_OK);
|
||||
}
|
||||
}
|
||||
|
||||
// If we're using the Default device and it changed finish it so we'll re-init the device
|
||||
if (ad->audio_input.device_name == "Default" && default_capture_device_changed) {
|
||||
Error err = ad->finish_capture_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_capture_device error");
|
||||
}
|
||||
|
||||
default_capture_device_changed = false;
|
||||
}
|
||||
|
||||
// User selected a new device, finish the current one so we'll init the new device
|
||||
if (ad->audio_input.device_name != ad->audio_input.new_device) {
|
||||
ad->audio_input.device_name = ad->audio_input.new_device;
|
||||
Error err = ad->finish_capture_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: finish_capture_device error");
|
||||
}
|
||||
}
|
||||
|
||||
if (!ad->audio_input.audio_client) {
|
||||
Error err = ad->init_capture_device(true);
|
||||
if (err == OK) {
|
||||
ad->capture_start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ad->stop_counting_ticks();
|
||||
ad->unlock();
|
||||
|
||||
// Let the thread rest a while if we haven't read or write anything
|
||||
if (written_frames == 0 && read_frames == 0) {
|
||||
OS::get_singleton()->delay_usec(1000);
|
||||
}
|
||||
}
|
||||
|
||||
ad->thread_exited = true;
|
||||
|
@ -562,12 +750,12 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
|
|||
|
||||
void AudioDriverWASAPI::start() {
|
||||
|
||||
if (audio_client) {
|
||||
HRESULT hr = audio_client->Start();
|
||||
if (audio_output.audio_client) {
|
||||
HRESULT hr = audio_output.audio_client->Start();
|
||||
if (hr != S_OK) {
|
||||
ERR_PRINT("WASAPI: Start failed");
|
||||
} else {
|
||||
active = true;
|
||||
audio_output.active = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -594,7 +782,8 @@ void AudioDriverWASAPI::finish() {
|
|||
thread = NULL;
|
||||
}
|
||||
|
||||
finish_device();
|
||||
finish_capture_device();
|
||||
finish_render_device();
|
||||
|
||||
if (mutex) {
|
||||
memdelete(mutex);
|
||||
|
@ -602,30 +791,70 @@ void AudioDriverWASAPI::finish() {
|
|||
}
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::capture_start() {
|
||||
|
||||
Error err = init_capture_device();
|
||||
if (err != OK) {
|
||||
ERR_PRINT("WASAPI: init_capture_device error");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (audio_input.active == false) {
|
||||
audio_input.audio_client->Start();
|
||||
audio_input.active = true;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
Error AudioDriverWASAPI::capture_stop() {
|
||||
|
||||
if (audio_input.active == true) {
|
||||
audio_input.audio_client->Stop();
|
||||
audio_input.active = false;
|
||||
|
||||
return OK;
|
||||
}
|
||||
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
void AudioDriverWASAPI::capture_set_device(const String &p_name) {
|
||||
|
||||
lock();
|
||||
audio_input.new_device = p_name;
|
||||
unlock();
|
||||
}
|
||||
|
||||
Array AudioDriverWASAPI::capture_get_device_list() {
|
||||
|
||||
return audio_device_get_list(true);
|
||||
}
|
||||
|
||||
String AudioDriverWASAPI::capture_get_device() {
|
||||
|
||||
lock();
|
||||
String name = audio_input.device_name;
|
||||
unlock();
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
AudioDriverWASAPI::AudioDriverWASAPI() {
|
||||
|
||||
audio_client = NULL;
|
||||
render_client = NULL;
|
||||
mutex = NULL;
|
||||
thread = NULL;
|
||||
|
||||
format_tag = 0;
|
||||
bits_per_sample = 0;
|
||||
|
||||
samples_in.clear();
|
||||
|
||||
buffer_size = 0;
|
||||
channels = 0;
|
||||
wasapi_channels = 0;
|
||||
mix_rate = 0;
|
||||
buffer_frames = 0;
|
||||
|
||||
thread_exited = false;
|
||||
exit_thread = false;
|
||||
active = false;
|
||||
|
||||
device_name = "Default";
|
||||
new_device = "Default";
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,35 +43,63 @@
|
|||
|
||||
class AudioDriverWASAPI : public AudioDriver {
|
||||
|
||||
HANDLE event;
|
||||
IAudioClient *audio_client;
|
||||
IAudioRenderClient *render_client;
|
||||
class AudioDeviceWASAPI {
|
||||
public:
|
||||
IAudioClient *audio_client;
|
||||
IAudioRenderClient *render_client;
|
||||
IAudioCaptureClient *capture_client;
|
||||
bool active;
|
||||
|
||||
WORD format_tag;
|
||||
WORD bits_per_sample;
|
||||
unsigned int channels;
|
||||
unsigned int frame_size;
|
||||
|
||||
String device_name;
|
||||
String new_device;
|
||||
|
||||
AudioDeviceWASAPI() {
|
||||
audio_client = NULL;
|
||||
render_client = NULL;
|
||||
capture_client = NULL;
|
||||
active = false;
|
||||
format_tag = 0;
|
||||
bits_per_sample = 0;
|
||||
channels = 0;
|
||||
frame_size = 0;
|
||||
device_name = "Default";
|
||||
new_device = "Default";
|
||||
}
|
||||
};
|
||||
|
||||
AudioDeviceWASAPI audio_input;
|
||||
AudioDeviceWASAPI audio_output;
|
||||
|
||||
Mutex *mutex;
|
||||
Thread *thread;
|
||||
|
||||
String device_name;
|
||||
String new_device;
|
||||
|
||||
WORD format_tag;
|
||||
WORD bits_per_sample;
|
||||
|
||||
Vector<int32_t> samples_in;
|
||||
|
||||
unsigned int buffer_size;
|
||||
unsigned int channels;
|
||||
unsigned int wasapi_channels;
|
||||
int mix_rate;
|
||||
int buffer_frames;
|
||||
|
||||
bool thread_exited;
|
||||
mutable bool exit_thread;
|
||||
bool active;
|
||||
|
||||
_FORCE_INLINE_ void write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample);
|
||||
static _FORCE_INLINE_ void write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample);
|
||||
static _FORCE_INLINE_ int32_t read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i);
|
||||
static void thread_func(void *p_udata);
|
||||
|
||||
Error init_device(bool reinit = false);
|
||||
Error finish_device();
|
||||
Error init_render_device(bool reinit = false);
|
||||
Error init_capture_device(bool reinit = false);
|
||||
|
||||
Error finish_render_device();
|
||||
Error finish_capture_device();
|
||||
|
||||
Error audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit);
|
||||
Error audio_device_finish(AudioDeviceWASAPI *p_device);
|
||||
Array audio_device_get_list(bool p_capture);
|
||||
|
||||
public:
|
||||
virtual const char *get_name() const {
|
||||
|
@ -89,6 +117,12 @@ public:
|
|||
virtual void unlock();
|
||||
virtual void finish();
|
||||
|
||||
virtual Error capture_start();
|
||||
virtual Error capture_stop();
|
||||
virtual Array capture_get_device_list();
|
||||
virtual void capture_set_device(const String &p_name);
|
||||
virtual String capture_get_device();
|
||||
|
||||
AudioDriverWASAPI();
|
||||
};
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
/*************************************************************************/
|
||||
|
||||
#include "audio_stream.h"
|
||||
#include "os/os.h"
|
||||
|
||||
//////////////////////////////
|
||||
|
||||
|
@ -99,6 +100,119 @@ void AudioStream::_bind_methods() {
|
|||
|
||||
////////////////////////////////
|
||||
|
||||
Ref<AudioStreamPlayback> AudioStreamMicrophone::instance_playback() {
|
||||
Ref<AudioStreamPlaybackMicrophone> playback;
|
||||
playback.instance();
|
||||
|
||||
playbacks.insert(playback.ptr());
|
||||
|
||||
playback->microphone = Ref<AudioStreamMicrophone>((AudioStreamMicrophone *)this);
|
||||
playback->active = false;
|
||||
|
||||
return playback;
|
||||
}
|
||||
|
||||
String AudioStreamMicrophone::get_stream_name() const {
|
||||
|
||||
//if (audio_stream.is_valid()) {
|
||||
//return "Random: " + audio_stream->get_name();
|
||||
//}
|
||||
return "Microphone";
|
||||
}
|
||||
|
||||
float AudioStreamMicrophone::get_length() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioStreamMicrophone::_bind_methods() {
|
||||
}
|
||||
|
||||
AudioStreamMicrophone::AudioStreamMicrophone() {
|
||||
}
|
||||
|
||||
void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) {
|
||||
|
||||
AudioDriver::get_singleton()->lock();
|
||||
|
||||
Vector<int32_t> buf = AudioDriver::get_singleton()->get_input_buffer();
|
||||
unsigned int input_size = AudioDriver::get_singleton()->get_input_size();
|
||||
|
||||
// p_frames is multipled by two since an AudioFrame is stereo
|
||||
if ((p_frames + MICROPHONE_PLAYBACK_DELAY * 2) > input_size) {
|
||||
for (int i = 0; i < p_frames; i++) {
|
||||
p_buffer[i] = AudioFrame(0.0f, 0.0f);
|
||||
}
|
||||
input_ofs = 0;
|
||||
} else {
|
||||
for (int i = 0; i < p_frames; i++) {
|
||||
if (input_size >= input_ofs) {
|
||||
float l = (buf[input_ofs++] >> 16) / 32768.f;
|
||||
if (input_ofs >= buf.size()) {
|
||||
input_ofs = 0;
|
||||
}
|
||||
float r = (buf[input_ofs++] >> 16) / 32768.f;
|
||||
if (input_ofs >= buf.size()) {
|
||||
input_ofs = 0;
|
||||
}
|
||||
|
||||
p_buffer[i] = AudioFrame(l, r);
|
||||
} else {
|
||||
p_buffer[i] = AudioFrame(0.0f, 0.0f);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
AudioDriver::get_singleton()->unlock();
|
||||
}
|
||||
|
||||
void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
|
||||
AudioStreamPlaybackResampled::mix(p_buffer, p_rate_scale, p_frames);
|
||||
}
|
||||
|
||||
float AudioStreamPlaybackMicrophone::get_stream_sampling_rate() {
|
||||
return AudioDriver::get_singleton()->get_mix_rate();
|
||||
}
|
||||
|
||||
void AudioStreamPlaybackMicrophone::start(float p_from_pos) {
|
||||
input_ofs = 0;
|
||||
|
||||
AudioDriver::get_singleton()->capture_start();
|
||||
|
||||
active = true;
|
||||
_begin_resample();
|
||||
}
|
||||
|
||||
void AudioStreamPlaybackMicrophone::stop() {
|
||||
AudioDriver::get_singleton()->capture_stop();
|
||||
active = false;
|
||||
}
|
||||
|
||||
bool AudioStreamPlaybackMicrophone::is_playing() const {
|
||||
return active;
|
||||
}
|
||||
|
||||
int AudioStreamPlaybackMicrophone::get_loop_count() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
float AudioStreamPlaybackMicrophone::get_playback_position() const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AudioStreamPlaybackMicrophone::seek(float p_time) {
|
||||
return; // Can't seek a microphone input
|
||||
}
|
||||
|
||||
AudioStreamPlaybackMicrophone::~AudioStreamPlaybackMicrophone() {
|
||||
microphone->playbacks.erase(this);
|
||||
stop();
|
||||
}
|
||||
|
||||
AudioStreamPlaybackMicrophone::AudioStreamPlaybackMicrophone() {
|
||||
}
|
||||
|
||||
////////////////////////////////
|
||||
|
||||
void AudioStreamRandomPitch::set_audio_stream(const Ref<AudioStream> &p_audio_stream) {
|
||||
|
||||
audio_stream = p_audio_stream;
|
||||
|
|
|
@ -94,6 +94,63 @@ public:
|
|||
virtual float get_length() const = 0; //if supported, otherwise return 0
|
||||
};
|
||||
|
||||
// Microphone
|
||||
|
||||
class AudioStreamPlaybackMicrophone;
|
||||
|
||||
class AudioStreamMicrophone : public AudioStream {
|
||||
|
||||
GDCLASS(AudioStreamMicrophone, AudioStream)
|
||||
friend class AudioStreamPlaybackMicrophone;
|
||||
|
||||
Set<AudioStreamPlaybackMicrophone *> playbacks;
|
||||
|
||||
protected:
|
||||
static void _bind_methods();
|
||||
|
||||
public:
|
||||
virtual Ref<AudioStreamPlayback> instance_playback();
|
||||
virtual String get_stream_name() const;
|
||||
|
||||
virtual float get_length() const; //if supported, otherwise return 0
|
||||
|
||||
AudioStreamMicrophone();
|
||||
};
|
||||
|
||||
class AudioStreamPlaybackMicrophone : public AudioStreamPlaybackResampled {
|
||||
|
||||
GDCLASS(AudioStreamPlaybackMicrophone, AudioStreamPlayback)
|
||||
friend class AudioStreamMicrophone;
|
||||
|
||||
const int MICROPHONE_PLAYBACK_DELAY = 256;
|
||||
|
||||
bool active;
|
||||
unsigned int input_ofs;
|
||||
|
||||
Ref<AudioStreamMicrophone> microphone;
|
||||
|
||||
protected:
|
||||
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames);
|
||||
virtual float get_stream_sampling_rate();
|
||||
|
||||
public:
|
||||
virtual void mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames);
|
||||
|
||||
virtual void start(float p_from_pos = 0.0);
|
||||
virtual void stop();
|
||||
virtual bool is_playing() const;
|
||||
|
||||
virtual int get_loop_count() const; //times it looped
|
||||
|
||||
virtual float get_playback_position() const;
|
||||
virtual void seek(float p_time);
|
||||
|
||||
~AudioStreamPlaybackMicrophone();
|
||||
AudioStreamPlaybackMicrophone();
|
||||
};
|
||||
|
||||
//
|
||||
|
||||
class AudioStreamPlaybackRandomPitch;
|
||||
|
||||
class AudioStreamRandomPitch : public AudioStream {
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "os/file_access.h"
|
||||
#include "os/os.h"
|
||||
#include "project_settings.h"
|
||||
#include "scene/resources/audio_stream_sample.h"
|
||||
#include "servers/audio/audio_driver_dummy.h"
|
||||
#include "servers/audio/effects/audio_effect_compressor.h"
|
||||
#ifdef TOOLS_ENABLED
|
||||
|
@ -79,6 +80,17 @@ double AudioDriver::get_mix_time() const {
|
|||
return total;
|
||||
}
|
||||
|
||||
void AudioDriver::input_buffer_write(int32_t sample) {
|
||||
|
||||
input_buffer.write[input_position++] = sample;
|
||||
if (input_position >= input_buffer.size()) {
|
||||
input_position = 0;
|
||||
}
|
||||
if (input_size < input_buffer.size()) {
|
||||
input_size++;
|
||||
}
|
||||
}
|
||||
|
||||
AudioDriver::SpeakerMode AudioDriver::get_speaker_mode_by_total_channels(int p_channels) const {
|
||||
switch (p_channels) {
|
||||
case 4: return SPEAKER_SURROUND_31;
|
||||
|
@ -113,6 +125,14 @@ String AudioDriver::get_device() {
|
|||
return "Default";
|
||||
}
|
||||
|
||||
Array AudioDriver::capture_get_device_list() {
|
||||
Array list;
|
||||
|
||||
list.push_back("Default");
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
AudioDriver::AudioDriver() {
|
||||
|
||||
_last_mix_time = 0;
|
||||
|
@ -1201,6 +1221,21 @@ void AudioServer::set_device(String device) {
|
|||
AudioDriver::get_singleton()->set_device(device);
|
||||
}
|
||||
|
||||
Array AudioServer::capture_get_device_list() {
|
||||
|
||||
return AudioDriver::get_singleton()->capture_get_device_list();
|
||||
}
|
||||
|
||||
String AudioServer::capture_get_device() {
|
||||
|
||||
return AudioDriver::get_singleton()->capture_get_device();
|
||||
}
|
||||
|
||||
void AudioServer::capture_set_device(const String &p_name) {
|
||||
|
||||
AudioDriver::get_singleton()->capture_set_device(p_name);
|
||||
}
|
||||
|
||||
void AudioServer::_bind_methods() {
|
||||
|
||||
ClassDB::bind_method(D_METHOD("set_bus_count", "amount"), &AudioServer::set_bus_count);
|
||||
|
@ -1251,6 +1286,10 @@ void AudioServer::_bind_methods() {
|
|||
ClassDB::bind_method(D_METHOD("get_device"), &AudioServer::get_device);
|
||||
ClassDB::bind_method(D_METHOD("set_device"), &AudioServer::set_device);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("capture_get_device_list"), &AudioServer::capture_get_device_list);
|
||||
ClassDB::bind_method(D_METHOD("capture_get_device"), &AudioServer::capture_get_device);
|
||||
ClassDB::bind_method(D_METHOD("capture_set_device"), &AudioServer::capture_set_device);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("set_bus_layout", "bus_layout"), &AudioServer::set_bus_layout);
|
||||
ClassDB::bind_method(D_METHOD("generate_bus_layout"), &AudioServer::generate_bus_layout);
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@
|
|||
#include "variant.h"
|
||||
|
||||
class AudioDriverDummy;
|
||||
class AudioStream;
|
||||
class AudioStreamSample;
|
||||
|
||||
class AudioDriver {
|
||||
|
||||
|
@ -51,8 +53,13 @@ class AudioDriver {
|
|||
#endif
|
||||
|
||||
protected:
|
||||
Vector<int32_t> input_buffer;
|
||||
unsigned int input_position;
|
||||
unsigned int input_size;
|
||||
|
||||
void audio_server_process(int p_frames, int32_t *p_buffer, bool p_update_mix_time = true);
|
||||
void update_mix_time(int p_frames);
|
||||
void input_buffer_write(int32_t sample);
|
||||
|
||||
#ifdef DEBUG_ENABLED
|
||||
_FORCE_INLINE_ void start_counting_ticks() { prof_ticks = OS::get_singleton()->get_ticks_usec(); }
|
||||
|
@ -91,11 +98,21 @@ public:
|
|||
virtual void unlock() = 0;
|
||||
virtual void finish() = 0;
|
||||
|
||||
virtual Error capture_start() { return FAILED; }
|
||||
virtual Error capture_stop() { return FAILED; }
|
||||
virtual void capture_set_device(const String &p_name) {}
|
||||
virtual String capture_get_device() { return "Default"; }
|
||||
virtual Array capture_get_device_list(); // TODO: convert this and get_device_list to PoolStringArray
|
||||
|
||||
virtual float get_latency() { return 0; }
|
||||
|
||||
SpeakerMode get_speaker_mode_by_total_channels(int p_channels) const;
|
||||
int get_total_channels_by_speaker_mode(SpeakerMode) const;
|
||||
|
||||
Vector<int32_t> get_input_buffer() { return input_buffer; }
|
||||
unsigned int get_input_position() { return input_position; }
|
||||
unsigned int get_input_size() { return input_size; }
|
||||
|
||||
#ifdef DEBUG_ENABLED
|
||||
uint64_t get_profiling_time() const { return prof_time; }
|
||||
void reset_profiling_time() { prof_time = 0; }
|
||||
|
@ -222,6 +239,18 @@ private:
|
|||
|
||||
void _mix_step();
|
||||
|
||||
#if 0
|
||||
struct AudioInBlock {
|
||||
|
||||
Ref<AudioStreamSample> audio_stream;
|
||||
int current_position;
|
||||
bool loops;
|
||||
};
|
||||
|
||||
Map<StringName, AudioInBlock *> audio_in_block_map;
|
||||
Vector<AudioInBlock *> audio_in_blocks;
|
||||
#endif
|
||||
|
||||
struct CallbackItem {
|
||||
|
||||
AudioCallback callback;
|
||||
|
@ -335,8 +364,11 @@ public:
|
|||
String get_device();
|
||||
void set_device(String device);
|
||||
|
||||
float get_output_latency() { return output_latency; }
|
||||
Array capture_get_device_list();
|
||||
String capture_get_device();
|
||||
void capture_set_device(const String &p_name);
|
||||
|
||||
float get_output_latency() { return output_latency; }
|
||||
AudioServer();
|
||||
virtual ~AudioServer();
|
||||
};
|
||||
|
|
|
@ -104,6 +104,7 @@ void register_server_types() {
|
|||
|
||||
ClassDB::register_virtual_class<AudioStream>();
|
||||
ClassDB::register_virtual_class<AudioStreamPlayback>();
|
||||
ClassDB::register_class<AudioStreamMicrophone>();
|
||||
ClassDB::register_class<AudioStreamRandomPitch>();
|
||||
ClassDB::register_virtual_class<AudioEffect>();
|
||||
ClassDB::register_class<AudioEffectEQ>();
|
||||
|
|
Loading…
Reference in New Issue