Keep only production ready translations, move the WIP ones to godot-editor-l10n

- Remove WIP translation templates, moved to godot-editor-l10n which
  will be the source and target for Weblate contributions.
  * https://github.com/godotengine/godot-editor-l10n
- Re-add translations with a high completion ratio, stripped of the
  untranslated and fuzzy strings, and of the location and translator
  comments, to keep the size lower.
  * Threshold editor interface: 20% complete.
  * Threshold properties: 20% complete.
  * Threshold classes: 10% complete.
This commit is contained in:
Rémi Verschelde 2023-02-07 15:17:55 +01:00
parent 5d7e003b29
commit 097cf5431b
No known key found for this signature in database
GPG Key ID: C3336907360768E1
163 changed files with 311299 additions and 4983507 deletions

View File

@ -1,45 +0,0 @@
# Makefile providing various facilities to manage translations
TEMPLATE = classes.pot
POFILES = $(wildcard *.po)
LANGS = $(POFILES:%.po=%)
all: update merge
update:
@cd ../..; \
python3 doc/translations/extract.py \
--path doc/classes modules/*/doc_classes \
--output doc/translations/$(TEMPLATE)
merge:
@for po in $(POFILES); do \
echo -e "\nMerging $$po..."; \
msgmerge -w 79 -C $$po $$po $(TEMPLATE) > "$$po".new; \
mv -f "$$po".new $$po; \
msgattrib --output-file=$$po --no-obsolete $$po; \
done
check:
@for po in $(POFILES); do msgfmt -c $$po -o /dev/null; done
# Generate completion ratio from statistics string such as:
# 2775 translated messages, 272 fuzzy translations, 151 untranslated messages.
# First number can be 0, second and third numbers are only present if non-zero.
include-list:
@list=""; \
threshold=0.10; \
for po in $(POFILES); do \
res=`msgfmt --statistics $$po -o /dev/null 2>&1 | sed 's/[^0-9,]*//g'`; \
complete=`cut -d',' -f1 <<< $$res`; \
fuzzy_or_untranslated=`cut -d',' -f2 <<< $$res`; \
untranslated_maybe=`cut -d',' -f3 <<< $$res`; \
if [ -z "$$fuzzy_or_untranslated" ]; then fuzzy_or_untranslated=0; fi; \
if [ -z "$$untranslated_maybe" ]; then untranslated_maybe=0; fi; \
incomplete=`expr $$fuzzy_or_untranslated + $$untranslated_maybe`; \
if `awk "BEGIN {exit !($$complete / ($$complete + $$incomplete) > $$threshold)}"`; then \
lang=`basename $$po .po`; \
list+="$$lang,"; \
fi; \
done; \
echo $$list;

View File

@ -1 +0,0 @@
These `.po` and `.pot` files come from Weblate. Do not modify them manually.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,310 +0,0 @@
#!/usr/bin/env python3
import argparse
import os
import shutil
from collections import OrderedDict
EXTRACT_TAGS = ["description", "brief_description", "member", "constant", "theme_item", "link"]
HEADER = """\
# LANGUAGE translation of the Godot Engine class reference.
# Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md).
# Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur.
# This file is distributed under the same license as the Godot source code.
#
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: Godot Engine class reference\\n"
"Report-Msgid-Bugs-To: https://github.com/godotengine/godot\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8-bit\\n"
"""
# Some strings used by make_rst.py are normally part of the editor translations,
# so we need to include them manually here for the online docs.
BASE_STRINGS = [
"Description",
"Tutorials",
"Properties",
"Constructors",
"Methods",
"Operators",
"Theme Properties",
"Signals",
"Enumerations",
"Constants",
"Property Descriptions",
"Constructor Descriptions",
"Method Descriptions",
"Operator Descriptions",
"Theme Property Descriptions",
"Inherits:",
"Inherited By:",
"(overrides %s)",
"Default",
"Setter",
"value",
"Getter",
"This method should typically be overridden by the user to have any effect.",
"This method has no side effects. It doesn't modify any of the instance's member variables.",
"This method accepts any number of arguments after the ones described here.",
"This method is used to construct a type.",
"This method doesn't need an instance to be called, so it can be called directly using the class name.",
"This method describes a valid operator to use with this type as left-hand operand.",
]
## <xml-line-number-hack from="https://stackoverflow.com/a/36430270/10846399">
import sys
sys.modules["_elementtree"] = None # type: ignore
import xml.etree.ElementTree as ET
## override the parser to get the line number
class LineNumberingParser(ET.XMLParser):
def _start(self, *args, **kwargs):
## Here we assume the default XML parser which is expat
## and copy its element position attributes into output Elements
element = super(self.__class__, self)._start(*args, **kwargs)
element._start_line_number = self.parser.CurrentLineNumber
element._start_column_number = self.parser.CurrentColumnNumber
element._start_byte_index = self.parser.CurrentByteIndex
return element
def _end(self, *args, **kwargs):
element = super(self.__class__, self)._end(*args, **kwargs)
element._end_line_number = self.parser.CurrentLineNumber
element._end_column_number = self.parser.CurrentColumnNumber
element._end_byte_index = self.parser.CurrentByteIndex
return element
## </xml-line-number-hack>
class Desc:
def __init__(self, line_no, msg, desc_list=None):
## line_no : the line number where the desc is
## msg : the description string
## desc_list : the DescList it belongs to
self.line_no = line_no
self.msg = msg
self.desc_list = desc_list
class DescList:
def __init__(self, doc, path):
## doc : root xml element of the document
## path : file path of the xml document
## list : list of Desc objects for this document
self.doc = doc
self.path = path
self.list = []
def print_error(error):
print("ERROR: {}".format(error))
## build classes with xml elements recursively
def _collect_classes_dir(path, classes):
if not os.path.isdir(path):
print_error("Invalid directory path: {}".format(path))
exit(1)
for _dir in map(lambda dir: os.path.join(path, dir), os.listdir(path)):
if os.path.isdir(_dir):
_collect_classes_dir(_dir, classes)
elif os.path.isfile(_dir):
if not _dir.endswith(".xml"):
# print("Got non-.xml file '{}', skipping.".format(path))
continue
_collect_classes_file(_dir, classes)
## opens a file and parse xml add to classes
def _collect_classes_file(path, classes):
if not os.path.isfile(path) or not path.endswith(".xml"):
print_error("Invalid xml file path: {}".format(path))
exit(1)
print("Collecting file: {}".format(os.path.basename(path)))
try:
tree = ET.parse(path, parser=LineNumberingParser())
except ET.ParseError as e:
print_error("Parse error reading file '{}': {}".format(path, e))
exit(1)
doc = tree.getroot()
if "name" in doc.attrib:
if "version" not in doc.attrib:
print_error("Version missing from 'doc', file: {}".format(path))
name = doc.attrib["name"]
if name in classes:
print_error("Duplicate class {} at path {}".format(name, path))
exit(1)
classes[name] = DescList(doc, path)
else:
print_error("Unknown XML file {}, skipping".format(path))
## regions are list of tuples with size 3 (start_index, end_index, indent)
## indication in string where the codeblock starts, ends, and it's indent
## if i inside the region returns the indent, else returns -1
def _get_xml_indent(i, regions):
for region in regions:
if region[0] < i < region[1]:
return region[2]
return -1
## find and build all regions of codeblock which we need later
def _make_codeblock_regions(desc, path=""):
code_block_end = False
code_block_index = 0
code_block_regions = []
while not code_block_end:
code_block_index = desc.find("[codeblock]", code_block_index)
if code_block_index < 0:
break
xml_indent = 0
while True:
## [codeblock] always have a trailing new line and some tabs
## those tabs are belongs to xml indentations not code indent
if desc[code_block_index + len("[codeblock]\n") + xml_indent] == "\t":
xml_indent += 1
else:
break
end_index = desc.find("[/codeblock]", code_block_index)
if end_index < 0:
print_error("Non terminating codeblock: {}".format(path))
exit(1)
code_block_regions.append((code_block_index, end_index, xml_indent))
code_block_index += 1
return code_block_regions
def _strip_and_split_desc(desc, code_block_regions):
desc_strip = "" ## a stripped desc msg
total_indent = 0 ## code indent = total indent - xml indent
for i in range(len(desc)):
c = desc[i]
if c == "\n":
c = "\\n"
if c == '"':
c = '\\"'
if c == "\\":
c = "\\\\" ## <element \> is invalid for msgmerge
if c == "\t":
xml_indent = _get_xml_indent(i, code_block_regions)
if xml_indent >= 0:
total_indent += 1
if xml_indent < total_indent:
c = "\\t"
else:
continue
else:
continue
desc_strip += c
if c == "\\n":
total_indent = 0
return desc_strip
## make catalog strings from xml elements
def _make_translation_catalog(classes):
unique_msgs = OrderedDict()
for class_name in classes:
desc_list = classes[class_name]
for elem in desc_list.doc.iter():
if elem.tag in EXTRACT_TAGS:
elem_text = elem.text
if elem.tag == "link":
elem_text = elem.attrib["title"] if "title" in elem.attrib else ""
if not elem_text or len(elem_text) == 0:
continue
line_no = elem._start_line_number if elem_text[0] != "\n" else elem._start_line_number + 1
desc_str = elem_text.strip()
code_block_regions = _make_codeblock_regions(desc_str, desc_list.path)
desc_msg = _strip_and_split_desc(desc_str, code_block_regions)
desc_obj = Desc(line_no, desc_msg, desc_list)
desc_list.list.append(desc_obj)
if desc_msg not in unique_msgs:
unique_msgs[desc_msg] = [desc_obj]
else:
unique_msgs[desc_msg].append(desc_obj)
return unique_msgs
## generate the catalog file
def _generate_translation_catalog_file(unique_msgs, output, location_line=False):
with open(output, "w", encoding="utf8") as f:
f.write(HEADER)
for msg in BASE_STRINGS:
f.write("#: doc/tools/make_rst.py\n")
f.write('msgid "{}"\n'.format(msg))
f.write('msgstr ""\n\n')
for msg in unique_msgs:
if len(msg) == 0 or msg in BASE_STRINGS:
continue
f.write("#:")
desc_list = unique_msgs[msg]
for desc in desc_list:
path = desc.desc_list.path.replace("\\", "/")
if path.startswith("./"):
path = path[2:]
if location_line: # Can be skipped as diffs on line numbers are spammy.
f.write(" {}:{}".format(path, desc.line_no))
else:
f.write(" {}".format(path))
f.write("\n")
f.write('msgid "{}"\n'.format(msg))
f.write('msgstr ""\n\n')
## TODO: what if 'nt'?
if os.name == "posix":
print("Wrapping template at 79 characters for compatibility with Weblate.")
os.system("msgmerge -w79 {0} {0} > {0}.wrap".format(output))
shutil.move("{}.wrap".format(output), output)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--path", "-p", nargs="+", default=".", help="The directory or directories containing XML files to collect."
)
parser.add_argument("--output", "-o", default="translation_catalog.pot", help="The path to the output file.")
args = parser.parse_args()
output = os.path.abspath(args.output)
if not os.path.isdir(os.path.dirname(output)) or not output.endswith(".pot"):
print_error("Invalid output path: {}".format(output))
exit(1)
classes = OrderedDict()
for path in args.path:
if not os.path.isdir(path):
print_error("Invalid working directory path: {}".format(path))
exit(1)
print("\nCurrent working dir: {}".format(path))
path_classes = OrderedDict() ## dictionary of key=class_name, value=DescList objects
_collect_classes_dir(path, path_classes)
classes.update(path_classes)
classes = OrderedDict(sorted(classes.items(), key=lambda kv: kv[0].lower()))
unique_msgs = _make_translation_catalog(classes)
_generate_translation_catalog_file(unique_msgs, output)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,42 +0,0 @@
# Makefile providing various facilities to manage translations
TEMPLATE = editor.pot
POFILES = $(wildcard *.po)
LANGS = $(POFILES:%.po=%)
all: update merge
update:
@cd ../..; python3 editor/translations/extract.py
merge:
@for po in $(POFILES); do \
echo -e "\nMerging $$po..."; \
msgmerge -w 79 -C $$po $$po $(TEMPLATE) > "$$po".new; \
mv -f "$$po".new $$po; \
msgattrib --output-file=$$po --no-obsolete $$po; \
done
check:
@for po in $(POFILES); do msgfmt -c $$po -o /dev/null; done
# Generate completion ratio from statistics string such as:
# 2775 translated messages, 272 fuzzy translations, 151 untranslated messages.
# First number can be 0, second and third numbers are only present if non-zero.
include-list:
@list=""; \
threshold=0.20; \
for po in $(POFILES); do \
res=`msgfmt --statistics $$po -o /dev/null 2>&1 | sed 's/[^0-9,]*//g'`; \
complete=`cut -d',' -f1 <<< $$res`; \
fuzzy_or_untranslated=`cut -d',' -f2 <<< $$res`; \
untranslated_maybe=`cut -d',' -f3 <<< $$res`; \
if [ -z "$$fuzzy_or_untranslated" ]; then fuzzy_or_untranslated=0; fi; \
if [ -z "$$untranslated_maybe" ]; then untranslated_maybe=0; fi; \
incomplete=`expr $$fuzzy_or_untranslated + $$untranslated_maybe`; \
if `awk "BEGIN {exit !($$complete / ($$complete + $$incomplete) > $$threshold)}"`; then \
lang=`basename $$po .po`; \
list+="$$lang,"; \
fi; \
done; \
echo $$list;

View File

@ -1,23 +0,0 @@
# How to contribute translations
Godot's translation work is coordinated on
[Hosted Weblate](https://hosted.weblate.org/projects/godot-engine/godot),
an open source web-based translation platform, where contributors can work
together on translations using various internationalization features.
Creating an account there is free, and you can also login directly with
your GitHub, BitBucket, Google or Facebook account.
To avoid merge conflicts when syncing translations from Weblate (currently
this is done manually), we ask all contributors to work there instead of
making pull requests on this repository.
Link if you missed it: https://hosted.weblate.org/projects/godot-engine/godot
## Adding new languages
If you want to translate for a language which is not featured yet on Weblate,
you can add it (when logged in) by clicking the "Start new translation"
button at the bottom of the page.
Alternatively, you can use this
[direct link](https://hosted.weblate.org/new-lang/godot-engine/godot/).

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,341 +0,0 @@
#!/bin/python
import enum
import fnmatch
import os
import os.path
import re
import shutil
import subprocess
import sys
from typing import Dict, Tuple
class Message:
__slots__ = ("msgid", "msgid_plural", "msgctxt", "comments", "locations")
def format(self):
lines = []
if self.comments:
for i, content in enumerate(self.comments):
prefix = "#. TRANSLATORS:" if i == 0 else "#."
lines.append(prefix + content)
lines.append("#: " + " ".join(self.locations))
if self.msgctxt:
lines.append('msgctxt "{}"'.format(self.msgctxt))
if self.msgid_plural:
lines += [
'msgid "{}"'.format(self.msgid),
'msgid_plural "{}"'.format(self.msgid_plural),
'msgstr[0] ""',
'msgstr[1] ""',
]
else:
lines += [
'msgid "{}"'.format(self.msgid),
'msgstr ""',
]
return "\n".join(lines)
messages_map: Dict[Tuple[str, str], Message] = {} # (id, context) -> Message.
line_nb = False
for arg in sys.argv[1:]:
if arg == "--with-line-nb":
print("Enabling line numbers in the context locations.")
line_nb = True
else:
sys.exit("Non supported argument '" + arg + "'. Aborting.")
if not os.path.exists("editor"):
sys.exit("ERROR: This script should be started from the root of the git repo.")
matches = []
for root, dirnames, filenames in os.walk("."):
dirnames[:] = [d for d in dirnames if d not in ["thirdparty"]]
for filename in fnmatch.filter(filenames, "*.cpp"):
matches.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, "*.h"):
matches.append(os.path.join(root, filename))
matches.sort()
remaps = {}
remap_re = re.compile(r'^\t*capitalize_string_remaps\["(?P<from>.+)"\] = (String::utf8\()?"(?P<to>.+)"')
stop_words = set()
stop_words_re = re.compile(r'^\t*"(?P<word>.+)",')
is_inside_stop_words = False
with open("editor/editor_property_name_processor.cpp") as f:
for line in f:
if is_inside_stop_words:
m = stop_words_re.search(line)
if m:
stop_words.add(m.group("word"))
else:
is_inside_stop_words = False
else:
m = remap_re.search(line)
if m:
remaps[m.group("from")] = m.group("to")
if not is_inside_stop_words and not stop_words:
is_inside_stop_words = "stop_words = " in line
main_po = """
# LANGUAGE translation of the Godot Engine editor.
# Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md).
# Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur.
# This file is distributed under the same license as the Godot source code.
#
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: Godot Engine editor\\n"
"Report-Msgid-Bugs-To: https://github.com/godotengine/godot\\n"
"MIME-Version: 1.0\\n"
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8-bit\\n"\n
"""
class ExtractType(enum.IntEnum):
TEXT = 1
PROPERTY_PATH = 2
GROUP = 3
SUBGROUP = 4
# Regex "(?P<name>([^"\\]|\\.)*)" creates a group named `name` that matches a string.
message_patterns = {
re.compile(r'RTR\("(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
re.compile(r'TTR\("(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
re.compile(r'TTRC\("(?P<message>([^"\\]|\\.)*)"\)'): ExtractType.TEXT,
re.compile(
r'TTRN\("(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
): ExtractType.TEXT,
re.compile(
r'RTRN\("(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
): ExtractType.TEXT,
re.compile(r'_initial_set\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
re.compile(r'GLOBAL_DEF(_RST)?(_NOVAL)?(_BASIC)?\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
re.compile(r'EDITOR_DEF(_RST)?\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
re.compile(
r'EDITOR_SETTING(_USAGE)?\(Variant::[_A-Z0-9]+, [_A-Z0-9]+, "(?P<message>[^"]+?)",'
): ExtractType.PROPERTY_PATH,
re.compile(
r"(ADD_PROPERTYI?|ImportOption|ExportOption)\(PropertyInfo\("
+ r"Variant::[_A-Z0-9]+" # Name
+ r', "(?P<message>[^"]+)"' # Type
+ r'(, [_A-Z0-9]+(, "([^"\\]|\\.)*"(, (?P<usage>[_A-Z0-9]+))?)?|\))' # [, hint[, hint string[, usage]]].
): ExtractType.PROPERTY_PATH,
re.compile(r'ADD_ARRAY\("(?P<message>[^"]+)", '): ExtractType.PROPERTY_PATH,
re.compile(r'ADD_ARRAY_COUNT(_WITH_USAGE_FLAGS)?\("(?P<message>[^"]+)", '): ExtractType.TEXT,
re.compile(r'(ADD_GROUP|GNAME)\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)"\)'): ExtractType.GROUP,
re.compile(r'ADD_GROUP_INDENT\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)", '): ExtractType.GROUP,
re.compile(r'ADD_SUBGROUP\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)"\)'): ExtractType.SUBGROUP,
re.compile(r'ADD_SUBGROUP_INDENT\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)", '): ExtractType.GROUP,
re.compile(r'PNAME\("(?P<message>[^"]+)"\)'): ExtractType.PROPERTY_PATH,
}
theme_property_patterns = {
re.compile(r'set_(constant|font|font_size|stylebox|color|icon)\("(?P<message>[^"]+)", '): ExtractType.PROPERTY_PATH,
}
# See String::_camelcase_to_underscore().
capitalize_re = re.compile(r"(?<=\D)(?=\d)|(?<=\d)(?=\D([a-z]|\d))")
def _process_editor_string(name):
# See EditorPropertyNameProcessor::process_string().
capitalized_parts = []
parts = list(filter(bool, name.split("_"))) # Non-empty only.
for i, segment in enumerate(parts):
if i > 0 and i + 1 < len(parts) and segment in stop_words:
capitalized_parts.append(segment)
continue
remapped = remaps.get(segment)
if remapped:
capitalized_parts.append(remapped)
else:
# See String::capitalize().
# fmt: off
capitalized_parts.append(" ".join(
part.title()
for part in capitalize_re.sub("_", segment).replace("_", " ").split()
))
# fmt: on
return " ".join(capitalized_parts)
def _is_block_translator_comment(translator_line):
line = translator_line.strip()
if line.find("//") == 0:
return False
else:
return True
def _extract_translator_comment(line, is_block_translator_comment):
line = line.strip()
reached_end = False
extracted_comment = ""
start = line.find("TRANSLATORS:")
if start == -1:
start = 0
else:
start += len("TRANSLATORS:")
if is_block_translator_comment:
# If '*/' is found, then it's the end.
if line.rfind("*/") != -1:
extracted_comment = line[start : line.rfind("*/")]
reached_end = True
else:
extracted_comment = line[start:]
else:
# If beginning is not '//', then it's the end.
if line.find("//") != 0:
reached_end = True
else:
start = 2 if start == 0 else start
extracted_comment = line[start:]
return (not reached_end, extracted_comment)
def process_file(f, fname):
l = f.readline()
lc = 1
reading_translator_comment = False
is_block_translator_comment = False
translator_comment = ""
current_group = ""
current_subgroup = ""
patterns = message_patterns
if os.path.basename(fname) == "default_theme.cpp":
patterns = {**message_patterns, **theme_property_patterns}
while l:
# Detect translator comments.
if not reading_translator_comment and l.find("TRANSLATORS:") != -1:
reading_translator_comment = True
is_block_translator_comment = _is_block_translator_comment(l)
translator_comment = ""
# Gather translator comments. It will be gathered for the next translation function.
if reading_translator_comment:
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
if extracted_comment != "":
translator_comment += extracted_comment + "\n"
if not reading_translator_comment:
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
if not reading_translator_comment:
for pattern, extract_type in patterns.items():
for m in pattern.finditer(l):
location = os.path.relpath(fname).replace("\\", "/")
if line_nb:
location += ":" + str(lc)
captures = m.groupdict("")
msg = captures.get("message", "")
msg_plural = captures.get("plural_message", "")
msgctx = captures.get("context", "")
if extract_type == ExtractType.TEXT:
_add_message(msg, msg_plural, msgctx, location, translator_comment)
elif extract_type == ExtractType.PROPERTY_PATH:
if captures.get("usage") == "PROPERTY_USAGE_NO_EDITOR":
continue
if current_subgroup:
if msg.startswith(current_subgroup):
msg = msg[len(current_subgroup) :]
elif current_subgroup.startswith(msg):
pass # Keep this as-is. See EditorInspector::update_tree().
else:
current_subgroup = ""
elif current_group:
if msg.startswith(current_group):
msg = msg[len(current_group) :]
elif current_group.startswith(msg):
pass # Keep this as-is. See EditorInspector::update_tree().
else:
current_group = ""
current_subgroup = ""
if "." in msg: # Strip feature tag.
msg = msg.split(".", 1)[0]
for part in msg.split("/"):
_add_message(_process_editor_string(part), msg_plural, msgctx, location, translator_comment)
elif extract_type == ExtractType.GROUP:
_add_message(msg, msg_plural, msgctx, location, translator_comment)
current_group = captures["prefix"]
current_subgroup = ""
elif extract_type == ExtractType.SUBGROUP:
_add_message(msg, msg_plural, msgctx, location, translator_comment)
current_subgroup = captures["prefix"]
translator_comment = ""
l = f.readline()
lc += 1
def _add_message(msg, msg_plural, msgctx, location, translator_comment):
key = (msg, msgctx)
message = messages_map.get(key)
if not message:
message = Message()
message.msgid = msg
message.msgid_plural = msg_plural
message.msgctxt = msgctx
message.locations = []
message.comments = []
messages_map[key] = message
if location not in message.locations:
message.locations.append(location)
if translator_comment and translator_comment not in message.comments:
message.comments.append(translator_comment)
print("Updating the editor.pot template...")
for fname in matches:
with open(fname, "r", encoding="utf8") as f:
process_file(f, fname)
main_po += "\n\n".join(message.format() for message in messages_map.values())
with open("editor.pot", "w") as f:
f.write(main_po)
if os.name == "posix":
print("Wrapping template at 79 characters for compatibility with Weblate.")
os.system("msgmerge -w79 editor.pot editor.pot > editor.pot.wrap")
shutil.move("editor.pot.wrap", "editor.pot")
shutil.move("editor.pot", "editor/translations/editor.pot")
# TODO: Make that in a portable way, if we care; if not, kudos to Unix users
if os.name == "posix":
added = subprocess.check_output(r"git diff editor/translations/editor.pot | grep \+msgid | wc -l", shell=True)
removed = subprocess.check_output(r"git diff editor/translations/editor.pot | grep \\\-msgid | wc -l", shell=True)
print("\n# Template changes compared to the staged status:")
print("# Additions: %s msgids.\n# Deletions: %s msgids." % (int(added), int(removed)))

Some files were not shown because too many files have changed in this diff Show More