Merge pull request #77352 from bruvzg/tts_disable_3
[3.x] Add audio/general/text_to_speech project setting to enable/disable TTS.
This commit is contained in:
commit
8a417f32bc
|
@ -1093,6 +1093,8 @@ ProjectSettings::ProjectSettings() {
|
|||
GLOBAL_DEF_RST("audio/3d_panning_strength", 1.0f);
|
||||
custom_prop_info["audio/3d_panning_strength"] = PropertyInfo(Variant::REAL, "audio/3d_panning_strength", PROPERTY_HINT_RANGE, "0,4,0.01");
|
||||
|
||||
GLOBAL_DEF_RST("audio/general/text_to_speech", false);
|
||||
|
||||
PoolStringArray extensions = PoolStringArray();
|
||||
extensions.push_back("gd");
|
||||
if (Engine::get_singleton()->has_singleton("GodotSharp")) {
|
||||
|
|
|
@ -1073,6 +1073,7 @@
|
|||
- [code]id[/code] is voice identifier.
|
||||
- [code]language[/code] is language code in [code]lang_Variant[/code] format. [code]lang[/code] part is a 2 or 3-letter code based on the ISO-639 standard, in lowercase. And [code]Variant[/code] part is an engine dependent string describing country, region or/and dialect.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_get_voices_for_language" qualifiers="const">
|
||||
|
@ -1081,6 +1082,7 @@
|
|||
<description>
|
||||
Returns an [PoolStringArray] of voice identifiers for the [code]language[/code].
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_is_paused" qualifiers="const">
|
||||
|
@ -1088,6 +1090,7 @@
|
|||
<description>
|
||||
Returns [code]true[/code] if the synthesizer is in a paused state.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_is_speaking" qualifiers="const">
|
||||
|
@ -1095,6 +1098,7 @@
|
|||
<description>
|
||||
Returns [code]true[/code] if the synthesizer is generating speech, or have utterance waiting in the queue.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_pause">
|
||||
|
@ -1102,6 +1106,7 @@
|
|||
<description>
|
||||
Puts the synthesizer into a paused state.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_resume">
|
||||
|
@ -1109,6 +1114,7 @@
|
|||
<description>
|
||||
Resumes the synthesizer if it was paused.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_set_utterance_callback">
|
||||
|
@ -1122,6 +1128,7 @@
|
|||
- [code]TTS_UTTERANCE_BOUNDARY[/code] callable's method should take two [int] parameters, the index of the character and the utterance id.
|
||||
[b]Note:[/b] The granularity of the boundary callbacks is engine dependent.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_speak">
|
||||
|
@ -1143,6 +1150,7 @@
|
|||
[b]Note:[/b] On Windows and Linux, utterance [code]text[/code] can use SSML markup. SSML support is engine and voice dependent. If the engine does not support SSML, you should strip out all XML markup before calling [method tts_speak].
|
||||
[b]Note:[/b] The granularity of pitch, rate, and volume is engine and voice dependent. Values may be truncated.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
<method name="tts_stop">
|
||||
|
@ -1150,6 +1158,7 @@
|
|||
<description>
|
||||
Stops synthesis in progress and removes all utterances from the queue.
|
||||
[b]Note:[/b] This method is implemented on Android, iOS, HTML5, Linux, macOS, and Windows.
|
||||
[b]Note:[/b] [member ProjectSettings.audio/general/text_to_speech] should be [code]true[/code] to use text-to-speech.
|
||||
</description>
|
||||
</method>
|
||||
</methods>
|
||||
|
|
|
@ -284,6 +284,10 @@
|
|||
If [code]true[/code], microphone input will be allowed. This requires appropriate permissions to be set when exporting to Android or iOS.
|
||||
[b]Note:[/b] If the operating system blocks access to audio input devices (due to the user's privacy settings), audio capture will only return silence. On Windows 10 and later, make sure that apps are allowed to access the microphone in the OS' privacy settings.
|
||||
</member>
|
||||
<member name="audio/general/text_to_speech" type="bool" setter="" getter="" default="false">
|
||||
If [code]true[/code], text-to-speech support is enabled, see [method OS.tts_get_voices] and [method OS.tts_speak].
|
||||
[b]Note:[/b] Enabling TTS can cause addition idle CPU usage and interfere with the sleep mode, so consider disabling it if TTS is not used.
|
||||
</member>
|
||||
<member name="audio/mix_rate" type="int" setter="" getter="" default="44100">
|
||||
The mixing rate used for audio (in Hz). In general, it's better to not touch this and leave it to the host operating system.
|
||||
</member>
|
||||
|
|
|
@ -440,7 +440,9 @@ public class Godot extends Fragment implements SensorEventListener, IDownloaderC
|
|||
|
||||
final String[] current_command_line = command_line;
|
||||
mView.queueEvent(() -> {
|
||||
if (!GodotLib.setup(current_command_line)) {
|
||||
tts = new GodotTTS(activity);
|
||||
|
||||
if (!GodotLib.setup(current_command_line, tts)) {
|
||||
godot_initialized = false;
|
||||
Log.e(TAG, "Unable to setup the Godot engine! Aborting...");
|
||||
alert(R.string.error_engine_setup_message, R.string.text_error_title, this::forceQuit);
|
||||
|
@ -663,7 +665,6 @@ public class Godot extends Fragment implements SensorEventListener, IDownloaderC
|
|||
final Activity activity = getActivity();
|
||||
io = new GodotIO(activity);
|
||||
netUtils = new GodotNetUtils(activity);
|
||||
tts = new GodotTTS(activity);
|
||||
Context context = getContext();
|
||||
directoryAccessHandler = new DirectoryAccessHandler(context);
|
||||
fileAccessHandler = new FileAccessHandler(context);
|
||||
|
@ -673,7 +674,7 @@ public class Godot extends Fragment implements SensorEventListener, IDownloaderC
|
|||
mMagnetometer = mSensorManager.getDefaultSensor(Sensor.TYPE_MAGNETIC_FIELD);
|
||||
mGyroscope = mSensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE);
|
||||
|
||||
GodotLib.initialize(activity, this, activity.getAssets(), io, netUtils, directoryAccessHandler, fileAccessHandler, use_apk_expansion, tts);
|
||||
GodotLib.initialize(activity, this, activity.getAssets(), io, netUtils, directoryAccessHandler, fileAccessHandler, use_apk_expansion);
|
||||
|
||||
result_callback = null;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ public class GodotLib {
|
|||
/**
|
||||
* Invoked on the main thread to initialize Godot native layer.
|
||||
*/
|
||||
public static native void initialize(Activity activity, Godot p_instance, AssetManager p_asset_manager, GodotIO godotIO, GodotNetUtils netUtils, DirectoryAccessHandler directoryAccessHandler, FileAccessHandler fileAccessHandler, boolean use_apk_expansion, GodotTTS tts);
|
||||
public static native void initialize(Activity activity, Godot p_instance, AssetManager p_asset_manager, GodotIO godotIO, GodotNetUtils netUtils, DirectoryAccessHandler directoryAccessHandler, FileAccessHandler fileAccessHandler, boolean use_apk_expansion);
|
||||
|
||||
/**
|
||||
* Invoked on the main thread to clean up Godot native layer.
|
||||
|
@ -65,7 +65,7 @@ public class GodotLib {
|
|||
* Invoked on the GL thread to complete setup for the Godot native layer logic.
|
||||
* @param p_cmdline Command line arguments used to configure Godot native layer components.
|
||||
*/
|
||||
public static native boolean setup(String[] p_cmdline);
|
||||
public static native boolean setup(String[] p_cmdline, GodotTTS tts);
|
||||
|
||||
/**
|
||||
* Invoked on the GL thread when the underlying Android surface has changed size.
|
||||
|
|
|
@ -62,8 +62,9 @@ public class GodotTTS extends UtteranceProgressListener {
|
|||
final private static int EVENT_CANCEL = 2;
|
||||
final private static int EVENT_BOUNDARY = 3;
|
||||
|
||||
final private TextToSpeech synth;
|
||||
final private LinkedList<GodotUtterance> queue;
|
||||
final private Activity activity;
|
||||
private TextToSpeech synth;
|
||||
private LinkedList<GodotUtterance> queue;
|
||||
final private Object lock = new Object();
|
||||
private GodotUtterance lastUtterance;
|
||||
|
||||
|
@ -71,10 +72,7 @@ public class GodotTTS extends UtteranceProgressListener {
|
|||
private boolean paused;
|
||||
|
||||
public GodotTTS(Activity p_activity) {
|
||||
synth = new TextToSpeech(p_activity, null);
|
||||
queue = new LinkedList<GodotUtterance>();
|
||||
|
||||
synth.setOnUtteranceProgressListener(this);
|
||||
activity = p_activity;
|
||||
}
|
||||
|
||||
private void updateTTS() {
|
||||
|
@ -186,6 +184,16 @@ public class GodotTTS extends UtteranceProgressListener {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize synth and query.
|
||||
*/
|
||||
public void init() {
|
||||
synth = new TextToSpeech(activity, null);
|
||||
queue = new LinkedList<GodotUtterance>();
|
||||
|
||||
synth.setOnUtteranceProgressListener(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an utterance to the queue.
|
||||
*/
|
||||
|
|
|
@ -158,7 +158,7 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_setVirtualKeyboardHei
|
|||
}
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_initialize(JNIEnv *env, jclass clazz, jobject p_activity, jobject p_godot_instance, jobject p_asset_manager, jobject p_godot_io, jobject p_net_utils, jobject p_directory_access_handler, jobject p_file_access_handler, jboolean p_use_apk_expansion, jobject p_godot_tts) {
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_initialize(JNIEnv *env, jclass clazz, jobject p_activity, jobject p_godot_instance, jobject p_asset_manager, jobject p_godot_io, jobject p_net_utils, jobject p_directory_access_handler, jobject p_file_access_handler, jboolean p_use_apk_expansion) {
|
||||
JavaVM *jvm;
|
||||
env->GetJavaVM(&jvm);
|
||||
|
||||
|
@ -175,7 +175,6 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_initialize(JNIEnv *en
|
|||
DirAccessJAndroid::setup(p_directory_access_handler);
|
||||
FileAccessFilesystemJAndroid::setup(p_file_access_handler);
|
||||
NetSocketAndroid::setup(p_net_utils);
|
||||
TTS_Android::setup(p_godot_tts);
|
||||
|
||||
os_android = new OS_Android(godot_java, godot_io_java, p_use_apk_expansion);
|
||||
|
||||
|
@ -186,7 +185,7 @@ JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_ondestroy(JNIEnv *env
|
|||
_terminate(env, false);
|
||||
}
|
||||
|
||||
JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_setup(JNIEnv *env, jclass clazz, jobjectArray p_cmdline) {
|
||||
JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_setup(JNIEnv *env, jclass clazz, jobjectArray p_cmdline, jobject p_godot_tts) {
|
||||
setup_android_thread();
|
||||
|
||||
const char **cmdline = nullptr;
|
||||
|
@ -227,6 +226,8 @@ JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_setup(JNIEnv *env
|
|||
return false;
|
||||
}
|
||||
|
||||
TTS_Android::setup(p_godot_tts);
|
||||
|
||||
java_class_wrapper = memnew(JavaClassWrapper(godot_java->get_activity()));
|
||||
ClassDB::register_class<JNISingleton>();
|
||||
_initialize_java_modules();
|
||||
|
|
|
@ -37,9 +37,9 @@
|
|||
// These functions can be called from within JAVA and are the means by which our JAVA implementation calls back into our C++ code.
|
||||
// See java/src/org/godotengine/godot/GodotLib.java for the JAVA side of this (yes that's why we have the long names)
|
||||
extern "C" {
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_initialize(JNIEnv *env, jclass clazz, jobject p_activity, jobject p_godot_instance, jobject p_asset_manager, jobject p_godot_io, jobject p_net_utils, jobject p_directory_access_handler, jobject p_file_access_handler, jboolean p_use_apk_expansion, jobject p_godot_tts);
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_initialize(JNIEnv *env, jclass clazz, jobject p_activity, jobject p_godot_instance, jobject p_asset_manager, jobject p_godot_io, jobject p_net_utils, jobject p_directory_access_handler, jobject p_file_access_handler, jboolean p_use_apk_expansion);
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_ondestroy(JNIEnv *env, jclass clazz);
|
||||
JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_setup(JNIEnv *env, jclass clazz, jobjectArray p_cmdline);
|
||||
JNIEXPORT jboolean JNICALL Java_org_godotengine_godot_GodotLib_setup(JNIEnv *env, jclass clazz, jobjectArray p_cmdline, jobject p_godot_tts);
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_resize(JNIEnv *env, jclass clazz, jint width, jint height);
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_newcontext(JNIEnv *env, jclass clazz);
|
||||
JNIEXPORT void JNICALL Java_org_godotengine_godot_GodotLib_ttsCallback(JNIEnv *env, jclass clazz, jint event, jint id, jint pos);
|
||||
|
|
|
@ -35,9 +35,11 @@
|
|||
#include "string_android.h"
|
||||
#include "thread_jandroid.h"
|
||||
|
||||
bool TTS_Android::initialized = false;
|
||||
jobject TTS_Android::tts = 0;
|
||||
jclass TTS_Android::cls = 0;
|
||||
|
||||
jmethodID TTS_Android::_init = 0;
|
||||
jmethodID TTS_Android::_is_speaking = 0;
|
||||
jmethodID TTS_Android::_is_paused = 0;
|
||||
jmethodID TTS_Android::_get_voices = 0;
|
||||
|
@ -99,23 +101,33 @@ Vector<char16_t> TTS_Android::str_to_utf16(const String &p_string) {
|
|||
}
|
||||
|
||||
void TTS_Android::setup(jobject p_tts) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
bool tts_enabled = GLOBAL_GET("audio/general/text_to_speech");
|
||||
if (tts_enabled) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
|
||||
tts = env->NewGlobalRef(p_tts);
|
||||
tts = env->NewGlobalRef(p_tts);
|
||||
|
||||
jclass c = env->GetObjectClass(tts);
|
||||
cls = (jclass)env->NewGlobalRef(c);
|
||||
jclass c = env->GetObjectClass(tts);
|
||||
cls = (jclass)env->NewGlobalRef(c);
|
||||
|
||||
_is_speaking = env->GetMethodID(cls, "isSpeaking", "()Z");
|
||||
_is_paused = env->GetMethodID(cls, "isPaused", "()Z");
|
||||
_get_voices = env->GetMethodID(cls, "getVoices", "()[Ljava/lang/String;");
|
||||
_speak = env->GetMethodID(cls, "speak", "(Ljava/lang/String;Ljava/lang/String;IFFIZ)V");
|
||||
_pause_speaking = env->GetMethodID(cls, "pauseSpeaking", "()V");
|
||||
_resume_speaking = env->GetMethodID(cls, "resumeSpeaking", "()V");
|
||||
_stop_speaking = env->GetMethodID(cls, "stopSpeaking", "()V");
|
||||
_init = env->GetMethodID(cls, "init", "()V");
|
||||
_is_speaking = env->GetMethodID(cls, "isSpeaking", "()Z");
|
||||
_is_paused = env->GetMethodID(cls, "isPaused", "()Z");
|
||||
_get_voices = env->GetMethodID(cls, "getVoices", "()[Ljava/lang/String;");
|
||||
_speak = env->GetMethodID(cls, "speak", "(Ljava/lang/String;Ljava/lang/String;IFFIZ)V");
|
||||
_pause_speaking = env->GetMethodID(cls, "pauseSpeaking", "()V");
|
||||
_resume_speaking = env->GetMethodID(cls, "resumeSpeaking", "()V");
|
||||
_stop_speaking = env->GetMethodID(cls, "stopSpeaking", "()V");
|
||||
|
||||
if (_init) {
|
||||
env->CallVoidMethod(tts, _init);
|
||||
initialized = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TTS_Android::_java_utterance_callback(int p_event, int p_id, int p_pos) {
|
||||
ERR_FAIL_COND_MSG(!initialized, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (ids.has(p_id)) {
|
||||
int pos = 0;
|
||||
if ((OS::TTSUtteranceEvent)p_event == OS::TTS_UTTERANCE_BOUNDARY) {
|
||||
|
@ -136,6 +148,7 @@ void TTS_Android::_java_utterance_callback(int p_event, int p_id, int p_pos) {
|
|||
}
|
||||
|
||||
bool TTS_Android::is_speaking() {
|
||||
ERR_FAIL_COND_V_MSG(!initialized, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (_is_speaking) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
|
||||
|
@ -147,6 +160,7 @@ bool TTS_Android::is_speaking() {
|
|||
}
|
||||
|
||||
bool TTS_Android::is_paused() {
|
||||
ERR_FAIL_COND_V_MSG(!initialized, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (_is_paused) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
|
||||
|
@ -158,6 +172,7 @@ bool TTS_Android::is_paused() {
|
|||
}
|
||||
|
||||
Array TTS_Android::get_voices() {
|
||||
ERR_FAIL_COND_V_MSG(!initialized, Array(), "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
Array list;
|
||||
if (_get_voices) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
|
@ -185,6 +200,7 @@ Array TTS_Android::get_voices() {
|
|||
}
|
||||
|
||||
void TTS_Android::speak(const String &p_text, const String &p_voice, int p_volume, float p_pitch, float p_rate, int p_utterance_id, bool p_interrupt) {
|
||||
ERR_FAIL_COND_MSG(!initialized, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (p_interrupt) {
|
||||
stop();
|
||||
}
|
||||
|
@ -207,6 +223,7 @@ void TTS_Android::speak(const String &p_text, const String &p_voice, int p_volum
|
|||
}
|
||||
|
||||
void TTS_Android::pause() {
|
||||
ERR_FAIL_COND_MSG(!initialized, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (_pause_speaking) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
|
||||
|
@ -216,6 +233,7 @@ void TTS_Android::pause() {
|
|||
}
|
||||
|
||||
void TTS_Android::resume() {
|
||||
ERR_FAIL_COND_MSG(!initialized, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (_resume_speaking) {
|
||||
JNIEnv *env = get_jni_env();
|
||||
|
||||
|
@ -225,6 +243,7 @@ void TTS_Android::resume() {
|
|||
}
|
||||
|
||||
void TTS_Android::stop() {
|
||||
ERR_FAIL_COND_MSG(!initialized, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
const int *k = NULL;
|
||||
while ((k = ids.next(k))) {
|
||||
OS::get_singleton()->tts_post_utterance_event(OS::TTS_UTTERANCE_CANCELED, *k);
|
||||
|
|
|
@ -33,14 +33,17 @@
|
|||
|
||||
#include "core/array.h"
|
||||
#include "core/os/os.h"
|
||||
#include "core/project_settings.h"
|
||||
#include "core/ustring.h"
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
class TTS_Android {
|
||||
static bool initialized;
|
||||
static jobject tts;
|
||||
static jclass cls;
|
||||
|
||||
static jmethodID _init;
|
||||
static jmethodID _is_speaking;
|
||||
static jmethodID _is_paused;
|
||||
static jmethodID _get_voices;
|
||||
|
|
|
@ -90,36 +90,43 @@ OSIPhone *OSIPhone::get_singleton() {
|
|||
};
|
||||
|
||||
bool OSIPhone::tts_is_speaking() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return [tts isSpeaking];
|
||||
}
|
||||
|
||||
bool OSIPhone::tts_is_paused() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return [tts isPaused];
|
||||
}
|
||||
|
||||
Array OSIPhone::tts_get_voices() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, Array(), "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, Array());
|
||||
return [tts getVoices];
|
||||
}
|
||||
|
||||
void OSIPhone::tts_speak(const String &p_text, const String &p_voice, int p_volume, float p_pitch, float p_rate, int p_utterance_id, bool p_interrupt) {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts speak:p_text voice:p_voice volume:p_volume pitch:p_pitch rate:p_rate utterance_id:p_utterance_id interrupt:p_interrupt];
|
||||
}
|
||||
|
||||
void OSIPhone::tts_pause() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts pauseSpeaking];
|
||||
}
|
||||
|
||||
void OSIPhone::tts_resume() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts resumeSpeaking];
|
||||
}
|
||||
|
||||
void OSIPhone::tts_stop() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts stopSpeaking];
|
||||
}
|
||||
|
@ -201,7 +208,10 @@ Error OSIPhone::initialize(const VideoMode &p_desired, int p_video_driver, int p
|
|||
}
|
||||
|
||||
// Init TTS
|
||||
tts = [[TTS_IOS alloc] init];
|
||||
bool tts_enabled = GLOBAL_GET("audio/general/text_to_speech");
|
||||
if (tts_enabled) {
|
||||
tts = [[TTS_IOS alloc] init];
|
||||
}
|
||||
|
||||
visual_server->init();
|
||||
//visual_server->cursor_set_visible(false, 0);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "os_javascript.h"
|
||||
|
||||
#include "core/io/json.h"
|
||||
#include "core/project_settings.h"
|
||||
#include "drivers/gles2/rasterizer_gles2.h"
|
||||
#include "drivers/gles3/rasterizer_gles3.h"
|
||||
#include "drivers/unix/dir_access_unix.h"
|
||||
|
@ -65,10 +66,12 @@ void OS_JavaScript::request_quit_callback() {
|
|||
}
|
||||
|
||||
bool OS_JavaScript::tts_is_speaking() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
return godot_js_tts_is_speaking();
|
||||
}
|
||||
|
||||
bool OS_JavaScript::tts_is_paused() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
return godot_js_tts_is_paused();
|
||||
}
|
||||
|
||||
|
@ -87,11 +90,13 @@ void OS_JavaScript::update_voices_callback(int p_size, const char **p_voice) {
|
|||
}
|
||||
|
||||
Array OS_JavaScript::tts_get_voices() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, Array(), "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
godot_js_tts_get_voices(update_voices_callback);
|
||||
return voices;
|
||||
}
|
||||
|
||||
void OS_JavaScript::tts_speak(const String &p_text, const String &p_voice, int p_volume, float p_pitch, float p_rate, int p_utterance_id, bool p_interrupt) {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
if (p_interrupt) {
|
||||
tts_stop();
|
||||
}
|
||||
|
@ -108,14 +113,17 @@ void OS_JavaScript::tts_speak(const String &p_text, const String &p_voice, int p
|
|||
}
|
||||
|
||||
void OS_JavaScript::tts_pause() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
godot_js_tts_pause();
|
||||
}
|
||||
|
||||
void OS_JavaScript::tts_resume() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
godot_js_tts_resume();
|
||||
}
|
||||
|
||||
void OS_JavaScript::tts_stop() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
for (Map<int, CharString>::Element *E = utterance_ids.front(); E; E = E->next()) {
|
||||
tts_post_utterance_event(OS::TTS_UTTERANCE_CANCELED, E->key());
|
||||
}
|
||||
|
@ -1167,6 +1175,8 @@ OS_JavaScript *OS_JavaScript::get_singleton() {
|
|||
}
|
||||
|
||||
OS_JavaScript::OS_JavaScript() {
|
||||
tts = GLOBAL_GET("audio/general/text_to_speech");
|
||||
|
||||
// Expose method for requesting quit.
|
||||
godot_js_os_request_quit_cb(&request_quit_callback);
|
||||
// Set canvas ID
|
||||
|
|
|
@ -74,6 +74,7 @@ private:
|
|||
List<AudioDriverJavaScript *> audio_drivers;
|
||||
VisualServer *visual_server;
|
||||
|
||||
bool tts;
|
||||
bool swap_ok_cancel;
|
||||
bool idb_available;
|
||||
bool idb_needs_sync;
|
||||
|
|
|
@ -1581,36 +1581,43 @@ int OS_OSX::get_current_video_driver() const {
|
|||
}
|
||||
|
||||
bool OS_OSX::tts_is_speaking() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return [tts isSpeaking];
|
||||
}
|
||||
|
||||
bool OS_OSX::tts_is_paused() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return [tts isPaused];
|
||||
}
|
||||
|
||||
Array OS_OSX::tts_get_voices() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, Array(), "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, Array());
|
||||
return [tts getVoices];
|
||||
}
|
||||
|
||||
void OS_OSX::tts_speak(const String &p_text, const String &p_voice, int p_volume, float p_pitch, float p_rate, int p_utterance_id, bool p_interrupt) {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts speak:p_text voice:p_voice volume:p_volume pitch:p_pitch rate:p_rate utterance_id:p_utterance_id interrupt:p_interrupt];
|
||||
}
|
||||
|
||||
void OS_OSX::tts_pause() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts pauseSpeaking];
|
||||
}
|
||||
|
||||
void OS_OSX::tts_resume() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts resumeSpeaking];
|
||||
}
|
||||
|
||||
void OS_OSX::tts_stop() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
[tts stopSpeaking];
|
||||
}
|
||||
|
@ -1634,7 +1641,10 @@ Error OS_OSX::initialize(const VideoMode &p_desired, int p_video_driver, int p_a
|
|||
CGDisplayRegisterReconfigurationCallback(displays_arrangement_changed, NULL);
|
||||
|
||||
// Init TTS
|
||||
tts = [[TTS_OSX alloc] init];
|
||||
bool tts_enabled = GLOBAL_GET("audio/general/text_to_speech");
|
||||
if (tts_enabled) {
|
||||
tts = [[TTS_OSX alloc] init];
|
||||
}
|
||||
|
||||
window_delegate = [[GodotWindowDelegate alloc] init];
|
||||
|
||||
|
|
|
@ -256,36 +256,43 @@ void OS_Windows::_touch_event(bool p_pressed, float p_x, float p_y, int idx) {
|
|||
};
|
||||
|
||||
bool OS_Windows::tts_is_speaking() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return tts->is_speaking();
|
||||
}
|
||||
|
||||
bool OS_Windows::tts_is_paused() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return tts->is_paused();
|
||||
}
|
||||
|
||||
Array OS_Windows::tts_get_voices() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, Array(), "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, Array());
|
||||
return tts->get_voices();
|
||||
}
|
||||
|
||||
void OS_Windows::tts_speak(const String &p_text, const String &p_voice, int p_volume, float p_pitch, float p_rate, int p_utterance_id, bool p_interrupt) {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->speak(p_text, p_voice, p_volume, p_pitch, p_rate, p_utterance_id, p_interrupt);
|
||||
}
|
||||
|
||||
void OS_Windows::tts_pause() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->pause();
|
||||
}
|
||||
|
||||
void OS_Windows::tts_resume() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->resume();
|
||||
}
|
||||
|
||||
void OS_Windows::tts_stop() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->stop();
|
||||
}
|
||||
|
@ -1392,7 +1399,10 @@ Error OS_Windows::initialize(const VideoMode &p_desired, int p_video_driver, int
|
|||
}
|
||||
|
||||
// Init TTS
|
||||
tts = memnew(TTS_Windows);
|
||||
bool tts_enabled = GLOBAL_GET("audio/general/text_to_speech");
|
||||
if (tts_enabled) {
|
||||
tts = memnew(TTS_Windows);
|
||||
}
|
||||
|
||||
use_raw_input = true;
|
||||
|
||||
|
|
|
@ -110,36 +110,43 @@ static String get_atom_name(Display *p_disp, Atom p_atom) {
|
|||
#ifdef SPEECHD_ENABLED
|
||||
|
||||
bool OS_X11::tts_is_speaking() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return tts->is_speaking();
|
||||
}
|
||||
|
||||
bool OS_X11::tts_is_paused() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, false, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, false);
|
||||
return tts->is_paused();
|
||||
}
|
||||
|
||||
Array OS_X11::tts_get_voices() const {
|
||||
ERR_FAIL_COND_V_MSG(!tts, Array(), "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND_V(!tts, Array());
|
||||
return tts->get_voices();
|
||||
}
|
||||
|
||||
void OS_X11::tts_speak(const String &p_text, const String &p_voice, int p_volume, float p_pitch, float p_rate, int p_utterance_id, bool p_interrupt) {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->speak(p_text, p_voice, p_volume, p_pitch, p_rate, p_utterance_id, p_interrupt);
|
||||
}
|
||||
|
||||
void OS_X11::tts_pause() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->pause();
|
||||
}
|
||||
|
||||
void OS_X11::tts_resume() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->resume();
|
||||
}
|
||||
|
||||
void OS_X11::tts_stop() {
|
||||
ERR_FAIL_COND_MSG(!tts, "Enable the \"audio/general/text_to_speech\" project setting to use text-to-speech.");
|
||||
ERR_FAIL_COND(!tts);
|
||||
tts->stop();
|
||||
}
|
||||
|
@ -421,7 +428,10 @@ Error OS_X11::initialize(const VideoMode &p_desired, int p_video_driver, int p_a
|
|||
|
||||
#ifdef SPEECHD_ENABLED
|
||||
// Init TTS
|
||||
tts = memnew(TTS_Linux);
|
||||
bool tts_enabled = GLOBAL_GET("audio/general/text_to_speech");
|
||||
if (tts_enabled) {
|
||||
tts = memnew(TTS_Linux);
|
||||
}
|
||||
#endif
|
||||
|
||||
visual_server = memnew(VisualServerRaster);
|
||||
|
@ -894,7 +904,9 @@ void OS_X11::finalize() {
|
|||
#endif
|
||||
|
||||
#ifdef SPEECHD_ENABLED
|
||||
memdelete(tts);
|
||||
if (tts) {
|
||||
memdelete(tts);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JOYDEV_ENABLED
|
||||
|
|
Loading…
Reference in New Issue