__init__.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. import logging
  2. from functools import cache
  3. from os import path, access, R_OK
  4. from yaml import load
  5. try:
  6. from yaml import CLoader as Loader
  7. except ImportError:
  8. from yaml import Loader
  9. __doc__ = """
  10. Transliteration tables.
  11. These tables contain all transliteration information, grouped by script and
  12. language (or language and script? TBD)
  13. """
  14. TABLE_DIR = path.join(path.dirname(path.realpath(__file__)), "data")
  15. logger = logging.getLogger(__name__)
  16. class Token:
  17. """
  18. Token class: minimal unit of text parsing.
  19. This class overrides the `<` operator for strings, so that sorting is done
  20. in a way that prioritizes a longer string over a shorter one with identical
  21. root.
  22. """
  23. def __init__(self, content):
  24. self.content = content
  25. def __lt__(self, other):
  26. """
  27. Operator to sort tokens.
  28. E.g:
  29. - ABCD
  30. - AB
  31. - A
  32. - BCDE
  33. - BCD
  34. - BEFGH
  35. - B
  36. """
  37. logger.debug(f"a: {self.content}, b: {other.content}")
  38. self_len = len(self.content)
  39. other_len = len(other.content)
  40. min_len = min(self_len, other_len)
  41. # If one of the strings is entirely contained in the other string...
  42. if self.content[:min_len] == other.content[:min_len]:
  43. logger.debug("Roots match.")
  44. # ...then the longer one takes precedence (is "less")
  45. return self_len > other_len
  46. # If the root strings are different, perform a normal comparison.
  47. return self.content < other.content
  48. @cache
  49. def list_tables():
  50. """
  51. List all the available tables.
  52. """
  53. with open(path.join(TABLE_DIR, "index.yml")) as fh:
  54. tdata = load(fh, Loader=Loader)
  55. return tdata
  56. @cache
  57. def load_table(tname):
  58. """
  59. Load one transliteration table and possible parent.
  60. The table file is parsed into an in-memory configuration that contains
  61. the language & script metadata and parsing rules.
  62. """
  63. fname = path.join(TABLE_DIR, tname + ".yml")
  64. if not access(fname, R_OK):
  65. raise ValueError(f"No transliteration table for {tname}!")
  66. with open(fname) as fh:
  67. tdata = load(fh, Loader=Loader)
  68. # NOTE Only one level of inheritance. No need for recursion for now.
  69. parent = tdata.get("general", {}).get("inherits", None)
  70. if parent:
  71. parent_tdata = load_table(parent)
  72. if "script_to_roman" in tdata:
  73. tokens = {
  74. Token(k): v
  75. for k, v in tdata["script_to_roman"].get("map", {}).items()}
  76. if parent:
  77. # Merge (and override) parent values.
  78. tokens = {
  79. Token(k): v for k, v in parent_tdata.get(
  80. "script_to_roman", {}).get("map", {})
  81. } | tokens
  82. tdata["script_to_roman"]["map"] = tuple(
  83. (k.content, tokens[k]) for k in sorted(tokens))
  84. if "roman_to_script" in tdata:
  85. tokens = {
  86. Token(k): v
  87. for k, v in tdata["roman_to_script"].get("map", {}).items()}
  88. if parent:
  89. # Merge (and override) parent values.
  90. tokens = {
  91. Token(k): v for k, v in parent_tdata.get(
  92. "roman_to_script", {}).get("map", {})
  93. } | tokens
  94. tdata["roman_to_script"]["map"] = tuple(
  95. (k.content, tokens[k]) for k in sorted(tokens))
  96. if parent:
  97. p_ignore = {
  98. Token(t) for t in parent_tdata.get(
  99. "roman_to_script", {}).get("ignore", [])}
  100. else:
  101. p_ignore = set()
  102. ignore = {
  103. Token(t)
  104. for t in tdata["roman_to_script"].get("ignore", [])
  105. } | p_ignore
  106. tdata["roman_to_script"]["ignore"] = [
  107. t.content for t in sorted(ignore)]
  108. return tdata