trans.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. import logging
  2. from importlib import import_module
  3. from re import Pattern, compile
  4. from unicodedata import normalize as precomp_normalize
  5. from scriptshifter.exceptions import BREAK, CONT
  6. from scriptshifter.tables import (
  7. BOW, EOW, WORD_BOUNDARY, FEAT_R2S, FEAT_S2R, HOOK_PKG_PATH,
  8. get_connection, get_lang_dcap, get_lang_general, get_lang_hooks,
  9. get_lang_ignore, get_lang_map, get_lang_normalize)
  10. # Match multiple spaces.
  11. MULTI_WS_RE = compile(r"(\s){2,}")
  12. logger = logging.getLogger(__name__)
  13. class Transliterator:
  14. """
  15. Context carrying the state of transliteration process.
  16. Use within a `with` block for proper cleanup.
  17. """
  18. @property
  19. def src(self):
  20. return self._src
  21. @src.setter
  22. def src(self):
  23. raise NotImplementedError("Attribute is read-only.")
  24. @src.deleter
  25. def src(self):
  26. raise NotImplementedError("Attribute is read-only.")
  27. @property
  28. def cur_char(self):
  29. return self.src[self.cur]
  30. def __init__(self, lang, src, t_dir, options={}):
  31. """
  32. Initialize a context.
  33. Args:
  34. src (str): The original text. Read-only.
  35. t_dir (int): the direction of transliteration.
  36. Either FEAT_R2S or FEAT_S2R.
  37. options (dict): extra options as a dict.
  38. """
  39. self.lang = lang
  40. self._src = src
  41. self.t_dir = t_dir
  42. self.conn = get_connection()
  43. with self.conn as conn:
  44. general = get_lang_general(conn, self.lang)
  45. self.general = general["data"]
  46. self.lang_id = general["id"]
  47. self.options = options
  48. self.hooks = get_lang_hooks(self.conn, self.lang_id, self.t_dir)
  49. self.dest_ls = []
  50. self.warnings = []
  51. def __enter__(self):
  52. return self
  53. def __exit__(self, exc_type, exc_value, traceback):
  54. self.conn.close()
  55. def run_hook(self, hname):
  56. ret = None
  57. for hook_def in self.hooks.get(hname, []):
  58. fn = getattr(
  59. import_module("." + hook_def["module_name"], HOOK_PKG_PATH),
  60. hook_def["fn_name"]
  61. )
  62. ret = fn(self, **hook_def["kwargs"])
  63. if ret in (BREAK, CONT):
  64. # This will stop parsing hooks functions and tell the caller to
  65. # break out of the outer loop or skip iteration.
  66. return ret
  67. return ret
  68. def normalize_src(self):
  69. """
  70. Normalize source text according to rules.
  71. NOTE: this manipluates the protected source attribute so it may not
  72. correspond to the originally provided source.
  73. """
  74. # Normalize precomposed Unicode characters.
  75. #
  76. # In using diacritics, LC standards prefer the decomposed form
  77. # (combining diacritic + base character) to the pre-composed form
  78. # (single Unicode symbol for the letter with diacritic).
  79. #
  80. # Note: only safe for R2S.
  81. if self.t_dir == FEAT_R2S:
  82. logger.debug("Normalizing pre-composed symbols.")
  83. self._src = precomp_normalize("NFD", self.src)
  84. norm_rules = get_lang_normalize(self.conn, self.lang_id)
  85. for nk, nv in norm_rules.items():
  86. self._src = self.src.replace(nk, nv)
  87. return self.run_hook("post_normalize")
  88. def cur_at_bow(self, cur=None):
  89. """
  90. Check if cursor is at the beginning of a word.
  91. @param cur(int): Position to check. By default, the current cursor.
  92. """
  93. if cur is None:
  94. cur = self.cur
  95. return (
  96. self.cur == 0
  97. or self.src[cur - 1] in WORD_BOUNDARY
  98. ) and (self.src[cur] not in WORD_BOUNDARY)
  99. def cur_at_eow(self, cur=None):
  100. """
  101. Check if cursor is at the end of a word.
  102. @param cur(int): Position to check. By default, the current cursor.
  103. """
  104. if cur is None:
  105. cur = self.cur
  106. return (
  107. cur == len(self.src) - 1
  108. or self.src[cur + 1] in WORD_BOUNDARY
  109. ) and (self.src[cur] not in WORD_BOUNDARY)
  110. def transliterate(src, lang, t_dir="s2r", capitalize=False, options={}):
  111. """
  112. Transliterate a single string.
  113. Args:
  114. src (str): Source string.
  115. lang (str): Language name.
  116. t_dir (str): Transliteration direction. Either `s2r` for
  117. script-to-Roman (default) or `r2s` for Roman-to-script.
  118. capitalize: capitalize words: one of `False` (no change - default),
  119. `"first"` (only first letter), or `"all"` (first letter of each
  120. word).
  121. options: extra script-dependent options. Defaults to the empty map.
  122. Keyword args:
  123. r2s (bool): If False (the default), the source is considered to be a
  124. non-latin script in the language and script specified, and the output
  125. the Romanization thereof; if True, the source is considered to be
  126. romanized text to be transliterated into the specified script/language.
  127. Return:
  128. str: The transliterated string.
  129. """
  130. # Map t_dir to constant.
  131. t_dir = FEAT_S2R if t_dir == "s2r" else FEAT_R2S
  132. source_str = "Roman" if t_dir == FEAT_R2S else lang
  133. target_str = lang if t_dir == FEAT_R2S else "Roman"
  134. logger.info(f"Transliteration is from {source_str} to {target_str}.")
  135. src = src.strip()
  136. options["capitalize"] = capitalize
  137. with Transliterator(lang, src, t_dir, options) as ctx:
  138. if t_dir == FEAT_S2R and not ctx.general["has_s2r"]:
  139. raise NotImplementedError(
  140. f"Script-to-Roman not yet supported for {lang}."
  141. )
  142. if t_dir == FEAT_R2S and not ctx.general["has_r2s"]:
  143. raise NotImplementedError(
  144. f"Roman-to-script not yet supported for {lang}."
  145. )
  146. # Normalize case before post_config and rule-based normalization.
  147. if t_dir == FEAT_R2S and not ctx.general["case_sensitive"]:
  148. ctx._src = ctx.src.lower()
  149. # This hook may take over the whole transliteration process or delegate
  150. # it to some external process, and return the output string directly.
  151. if ctx.run_hook("post_config") == BREAK:
  152. return getattr(ctx, "dest", ""), ctx.warnings
  153. # ctx.normalize_src returns the results of the post_normalize hook.
  154. if ctx.normalize_src() == BREAK:
  155. return getattr(ctx, "dest", ""), ctx.warnings
  156. logger.debug(f"Normalized source: {ctx.src}")
  157. lang_map = list(get_lang_map(ctx.conn, ctx.lang_id, ctx.t_dir))
  158. # Loop through source characters. The increment of each loop depends on
  159. # the length of the token that eventually matches.
  160. ctx.cur = 0
  161. while ctx.cur < len(ctx.src):
  162. # Reset cursor position flags.
  163. # Carry over extended "beginning of word" flag.
  164. ctx.cur_flags = 0
  165. # Look for a word boundary and flag word beginning/end it if found.
  166. if ctx.cur_at_bow():
  167. # Beginning of word.
  168. logger.debug(f"Beginning of word at position {ctx.cur}.")
  169. ctx.cur_flags |= BOW
  170. if ctx.cur_at_eow():
  171. # End of word.
  172. logger.debug(f"End of word at position {ctx.cur}.")
  173. ctx.cur_flags |= EOW
  174. # This hook may skip the parsing of the current
  175. # token or exit the scanning loop altogether.
  176. hret = ctx.run_hook("begin_input_token")
  177. if hret == BREAK:
  178. logger.debug("Breaking text scanning from hook signal.")
  179. break
  180. if hret == CONT:
  181. logger.debug("Skipping scanning iteration from hook signal.")
  182. continue
  183. # Check ignore list. Find as many subsequent ignore tokens
  184. # as possible before moving on to looking for match tokens.
  185. ctx.tk = None
  186. while True:
  187. ctx.ignoring = False
  188. for ctx.tk in get_lang_ignore(ctx.conn, ctx.lang_id):
  189. hret = ctx.run_hook("pre_ignore_token")
  190. if hret == BREAK:
  191. break
  192. if hret == CONT:
  193. continue
  194. _matching = False
  195. if type(ctx.tk) is Pattern:
  196. # Seach RE pattern beginning at cursor.
  197. if _ptn_match := ctx.tk.match(ctx.src[ctx.cur:]):
  198. ctx.tk = _ptn_match[0]
  199. logger.debug(f"Matched regex: {ctx.tk}")
  200. step = len(ctx.tk)
  201. _matching = True
  202. else:
  203. # Search exact match.
  204. step = len(ctx.tk)
  205. if ctx.tk == ctx.src[ctx.cur:ctx.cur + step]:
  206. _matching = True
  207. if _matching:
  208. # The position matches an ignore token.
  209. hret = ctx.run_hook("on_ignore_match")
  210. if hret == BREAK:
  211. break
  212. if hret == CONT:
  213. continue
  214. logger.info(f"Ignored token: {ctx.tk}")
  215. ctx.dest_ls.append(ctx.tk)
  216. ctx.cur += step
  217. if ctx.cur >= len(ctx.src):
  218. # reached end of string. Stop ignoring.
  219. # The outer loop will exit imediately after.
  220. ctx.ignoring = False
  221. break
  222. ctx.ignoring = True
  223. break
  224. # We looked through all ignore tokens, not found any. Move on.
  225. if not ctx.ignoring:
  226. break
  227. # Otherwise, if we found a match, check if the next position
  228. # may be ignored as well.
  229. delattr(ctx, "tk")
  230. delattr(ctx, "ignoring")
  231. if ctx.cur >= len(ctx.src):
  232. break
  233. # Begin transliteration token lookup.
  234. ctx.match = False
  235. for ctx.src_tk, ctx.dest_str in lang_map:
  236. hret = ctx.run_hook("pre_tx_token")
  237. if hret == BREAK:
  238. break
  239. if hret == CONT:
  240. continue
  241. step = len(ctx.src_tk.content)
  242. # If the token is longer than the remaining of the string,
  243. # it surely won't match.
  244. if ctx.cur + step > len(ctx.src):
  245. continue
  246. # If the first character of the token is greater (= higher code
  247. # point value) than the current character, then break the loop
  248. # without a match, because we know there won't be any more
  249. # match due to the alphabetical ordering.
  250. if ctx.src_tk.content[0] > ctx.cur_char:
  251. logger.debug(
  252. f"{ctx.src_tk.content} is after "
  253. f"{ctx.src[ctx.cur:ctx.cur + step]}. "
  254. "Breaking loop.")
  255. break
  256. # If src_tk has a WB flag but the token is not at WB, skip.
  257. if (
  258. (ctx.src_tk.flags & BOW and not ctx.cur_flags & BOW)
  259. or (
  260. # Can't rely on EOW flag, we must check on the last
  261. # character of the potential match.
  262. ctx.src_tk.flags & EOW
  263. and not ctx.cur_at_eow(ctx.cur + step - 1)
  264. )
  265. ):
  266. continue
  267. # Longer tokens should be guaranteed to be scanned before their
  268. # substrings at this point.
  269. # Similarly, flagged tokens are evaluated first.
  270. if ctx.src_tk.content == ctx.src[ctx.cur:ctx.cur + step]:
  271. ctx.match = True
  272. # This hook may skip this token or break out of the token
  273. # lookup for the current position.
  274. hret = ctx.run_hook("on_tx_token_match")
  275. if hret == BREAK:
  276. break
  277. if hret == CONT:
  278. continue
  279. # A match is found. Stop scanning tokens, append result,
  280. # and proceed scanning the source.
  281. # Capitalization. This applies double capitalization
  282. # rules. The external function in
  283. # scriptshifter.tools.capitalize used for non-table
  284. # languages does not.
  285. if (
  286. (ctx.options["capitalize"] == "first" and ctx.cur == 0)
  287. or
  288. (
  289. ctx.options["capitalize"] == "all"
  290. and ctx.cur_flags & BOW
  291. )
  292. ):
  293. logger.info("Capitalizing token.")
  294. double_cap = False
  295. for dcap_rule in get_lang_dcap(ctx.conn, ctx.lang_id):
  296. if ctx.dest_str == dcap_rule:
  297. ctx.dest_str = ctx.dest_str.upper()
  298. double_cap = True
  299. break
  300. if not double_cap:
  301. ctx.dest_str = (
  302. ctx.dest_str[0].upper() + ctx.dest_str[1:])
  303. ctx.dest_ls.append(ctx.dest_str)
  304. ctx.cur += step
  305. break
  306. if ctx.match is False:
  307. delattr(ctx, "match")
  308. hret = ctx.run_hook("on_no_tx_token_match")
  309. if hret == BREAK:
  310. break
  311. if hret == CONT:
  312. continue
  313. # No match found. Copy non-mapped character (one at a time).
  314. logger.info(
  315. f"Token {ctx.cur_char} "
  316. f"(\\u{hex(ord(ctx.cur_char))[2:]}) "
  317. f"at position {ctx.cur} is not mapped.")
  318. ctx.dest_ls.append(ctx.cur_char)
  319. ctx.cur += 1
  320. else:
  321. delattr(ctx, "match")
  322. delattr(ctx, "cur_flags")
  323. delattr(ctx, "cur")
  324. # This hook may take care of the assembly and cause the function to
  325. # return its own return value.
  326. hret = ctx.run_hook("pre_assembly")
  327. if hret is not None:
  328. return hret, ctx.warnings
  329. logger.debug(f"Output list: {ctx.dest_ls}")
  330. ctx.dest = "".join(ctx.dest_ls)
  331. # This hook may reassign the output string and/or cause the function to
  332. # return it immediately.
  333. hret = ctx.run_hook("post_assembly")
  334. if hret is not None:
  335. return hret, ctx.warnings
  336. # Strip multiple spaces and leading/trailing whitespace.
  337. ctx.dest = MULTI_WS_RE.sub(r"\1", ctx.dest.strip())
  338. return ctx.dest, ctx.warnings