read_ucd.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511
  1. #!/usr/bin/env python3
  2. # Tool to read various files from the Unicode character database and
  3. # generate headers containing derived arrays and lookup tables needed
  4. # by PuTTY.
  5. #
  6. # The aim is to have this be a single tool which you can easily re-run
  7. # against a new version of Unicode, simply by pointing it at an
  8. # appropriate UCD.zip or a directory containing the same files
  9. # unpacked.
  10. import argparse
  11. import collections
  12. import io
  13. import os
  14. import string
  15. import sys
  16. import zipfile
  17. UCDRecord = collections.namedtuple('UCDRecord', [
  18. 'c',
  19. 'General_Category',
  20. 'Canonical_Combining_Class',
  21. 'Bidi_Class',
  22. 'Decomposition_Type',
  23. 'Decomposition_Mapping',
  24. ])
  25. def to_ranges(iterable):
  26. """Collect together adjacent ranges in a list of (key, value) pairs.
  27. The input iterable should deliver a sequence of (key, value) pairs
  28. in which the keys are integers in sorted order. The output is a
  29. sequence of tuples with structure ((start, end), value), each
  30. indicating that all the keys [start, start+1, ..., end] go with
  31. that value.
  32. """
  33. start = end = val = None
  34. for k, v in iterable:
  35. if (k-1, v) == (end, val):
  36. end = k
  37. else:
  38. if start is not None:
  39. yield (start, end), val
  40. start, end, val = k, k, v
  41. if start is not None:
  42. yield (start, end), val
  43. def map_to_ranges(m):
  44. """Convert an integer-keyed map into a list of (range, value) pairs."""
  45. yield from to_ranges(sorted(m.items()))
  46. def set_to_ranges(s):
  47. """Convert a set into a list of ranges."""
  48. for r, _ in to_ranges((x, None) for x in sorted(s)):
  49. yield r
  50. def lines(iterable, keep_comments=False):
  51. """Deliver the lines of a Unicode data file.
  52. The input iterable should yield raw lines of the file: for
  53. example, it can be the file handle itself. The output values have
  54. their newlines removed, comments and trailing spaces deleted, and
  55. blank lines discarded.
  56. """
  57. for line in iter(iterable):
  58. line = line.rstrip("\r\n")
  59. if not keep_comments:
  60. line = line.split("#", 1)[0]
  61. line = line.rstrip(" \t")
  62. if line == "":
  63. continue
  64. yield line
  65. class Main:
  66. def run(self):
  67. "Parse arguments and generate all the output files."
  68. parser = argparse.ArgumentParser(
  69. description='Build UCD-derived source files.')
  70. parser.add_argument("ucd", help="UCD to work from, either UCD.zip or "
  71. "a directory full of unpacked files.")
  72. args = parser.parse_args()
  73. if os.path.isdir(args.ucd):
  74. ucd_dir = args.ucd
  75. self.open_ucd_file = lambda filename: (
  76. open(os.path.join(ucd_dir, filename)))
  77. else:
  78. ucd_zip = zipfile.ZipFile(args.ucd)
  79. self.open_ucd_file = lambda filename: (
  80. io.TextIOWrapper(ucd_zip.open(filename)))
  81. self.find_unicode_version()
  82. with open("version.h", "w") as fh:
  83. self.write_version_header(fh)
  84. with open("bidi_type.h", "w") as fh:
  85. self.write_bidi_type_table(fh)
  86. with open("bidi_mirror.h", "w") as fh:
  87. self.write_bidi_mirroring_table(fh)
  88. with open("bidi_brackets.h", "w") as fh:
  89. self.write_bidi_brackets_table(fh)
  90. with open("nonspacing_chars.h", "w") as fh:
  91. self.write_nonspacing_chars_list(fh)
  92. with open("wide_chars.h", "w") as fh:
  93. self.write_wide_chars_list(fh)
  94. with open("ambiguous_wide_chars.h", "w") as fh:
  95. self.write_ambiguous_wide_chars_list(fh)
  96. with open("known_chars.h", "w") as fh:
  97. self.write_known_chars_table(fh)
  98. with open("combining_classes.h", "w") as fh:
  99. self.write_combining_class_table(fh)
  100. with open("canonical_decomp.h", "w") as fh:
  101. self.write_canonical_decomp_table(fh)
  102. with open("canonical_comp.h", "w") as fh:
  103. self.write_canonical_comp_table(fh)
  104. def find_unicode_version(self):
  105. """Find out the version of Unicode.
  106. This is read from the top of NamesList.txt, which has the
  107. closest thing to a machine-readable statement of the version
  108. number that I found in the whole collection of files.
  109. """
  110. with self.open_ucd_file("NamesList.txt") as fh:
  111. for line in lines(fh):
  112. if line.startswith("@@@\t"):
  113. self.unicode_version_full = line[4:]
  114. self.unicode_version_short = " ".join(
  115. w for w in self.unicode_version_full.split(" ")
  116. if any(c in string.digits for c in w))
  117. return
  118. @property
  119. def UnicodeData(self):
  120. """Records from UnicodeData.txt.
  121. Each yielded item is a UCDRecord tuple.
  122. """
  123. with self.open_ucd_file("UnicodeData.txt") as fh:
  124. for line in lines(fh):
  125. # Split up the line into its raw fields.
  126. (
  127. codepoint, name, category, cclass, bidiclass, decomp,
  128. num6, num7, num8, bidimirrored, obsolete_unicode1_name,
  129. obsolete_comment, uppercase, lowercase, titlecase,
  130. ) = line.split(";")
  131. # By default, we expect that this record describes
  132. # just one code point.
  133. codepoints = [int(codepoint, 16)]
  134. # Spot the special markers where consecutive lines say
  135. # <Foo, First> and <Foo, Last>, indicating that the
  136. # entire range of code points in between are treated
  137. # the same. If so, we replace 'codepoints' with a
  138. # range object.
  139. if "<" in name:
  140. assert name.startswith("<") and name.endswith(">"), (
  141. "Confusing < in character name: {!r}".format(line))
  142. name_pieces = [piece.strip(" \t") for piece in
  143. name.lstrip("<").rstrip(">").split(",")]
  144. if "First" in name_pieces:
  145. assert isinstance(codepoints, list)
  146. prev_line_was_first = True
  147. prev_codepoint = codepoints[0]
  148. continue
  149. elif "Last" in name_pieces:
  150. assert prev_line_was_first
  151. codepoints = range(prev_codepoint, codepoints[0]+1)
  152. del prev_codepoint
  153. prev_line_was_first = False
  154. # Decode some of the raw fields into more cooked
  155. # forms.
  156. cclass = int(cclass)
  157. # Separate the decomposition field into decomposition
  158. # type and mapping.
  159. if decomp == "":
  160. dtype = decomp = None
  161. elif "<" not in decomp:
  162. dtype = 'canonical'
  163. else:
  164. assert decomp.startswith("<")
  165. dtype, decomp = decomp[1:].split(">", 1)
  166. decomp = decomp.lstrip(" ")
  167. # And decode the mapping part from hex strings to integers.
  168. if decomp is not None:
  169. decomp = [int(w, 16) for w in decomp.split(" ")]
  170. # And yield a UCDRecord for each code point in our
  171. # range.
  172. for codepoint in codepoints:
  173. yield UCDRecord(
  174. c=codepoint,
  175. General_Category=category,
  176. Canonical_Combining_Class=cclass,
  177. Bidi_Class=bidiclass,
  178. Decomposition_Type=dtype,
  179. Decomposition_Mapping=decomp,
  180. )
  181. @property
  182. def BidiMirroring(self):
  183. """Parsed character pairs from BidiMirroring.txt.
  184. Each yielded tuple is a pair of Unicode code points.
  185. """
  186. with self.open_ucd_file("BidiMirroring.txt") as fh:
  187. for line in lines(fh):
  188. cs1, cs2 = line.split(";")
  189. c1 = int(cs1, 16)
  190. c2 = int(cs2, 16)
  191. yield c1, c2
  192. @property
  193. def BidiBrackets(self):
  194. """Bracket pairs from BidiBrackets.txt.
  195. Each yielded tuple is a pair of Unicode code points, followed
  196. by either 'o', 'c' or 'n' to indicate whether the first one is
  197. an open or closing parenthesis or neither.
  198. """
  199. with self.open_ucd_file("BidiBrackets.txt") as fh:
  200. for line in lines(fh):
  201. cs1, cs2, kind = line.split(";")
  202. c1 = int(cs1, 16)
  203. c2 = int(cs2, 16)
  204. kind = kind.strip(" \t")
  205. yield c1, c2, kind
  206. @property
  207. def EastAsianWidth(self):
  208. """East Asian width types from EastAsianWidth.txt.
  209. Each yielded tuple is (code point, width type).
  210. """
  211. with self.open_ucd_file("EastAsianWidth.txt") as fh:
  212. for line in lines(fh):
  213. fields = line.split(";")
  214. if ".." in fields[0]:
  215. start, end = [int(s, 16) for s in fields[0].split("..")]
  216. cs = range(start, end+1)
  217. else:
  218. cs = [int(fields[0], 16)]
  219. for c in cs:
  220. yield c, fields[1]
  221. @property
  222. def CompositionExclusions(self):
  223. """Composition exclusions from CompositionExclusions.txt.
  224. Each yielded item is just a code point.
  225. """
  226. with self.open_ucd_file("CompositionExclusions.txt") as fh:
  227. for line in lines(fh):
  228. yield int(line, 16)
  229. def write_file_header_comment(self, fh, description):
  230. print("/*", file=fh)
  231. print(" * Autogenerated by read_ucd.py from",
  232. self.unicode_version_full, file=fh)
  233. print(" *", file=fh)
  234. for line in description.strip("\n").split("\n"):
  235. print(" *" + (" " if line != "" else "") + line, file=fh)
  236. print(" */", file=fh)
  237. print(file=fh)
  238. def write_version_header(self, fh):
  239. self.write_file_header_comment(fh, """
  240. String literals giving the currently supported version of Unicode.
  241. Useful for error messages and 'about' boxes.
  242. """)
  243. assert all(0x20 <= ord(c) < 0x7F and c != '"'
  244. for c in self.unicode_version_full)
  245. print("#define UNICODE_VERSION_FULL \"{}\"".format(
  246. self.unicode_version_full), file=fh)
  247. print("#define UNICODE_VERSION_SHORT \"{}\"".format(
  248. self.unicode_version_short), file=fh)
  249. def write_bidi_type_table(self, fh):
  250. self.write_file_header_comment(fh, """
  251. Bidirectional type of every Unicode character, excluding those with
  252. type ON.
  253. Used by terminal/bidi.c, whose associated lookup function returns ON
  254. by default for anything not in this list.
  255. """)
  256. types = {}
  257. for rec in self.UnicodeData:
  258. if rec.Bidi_Class != "ON":
  259. types[rec.c] = rec.Bidi_Class
  260. for (start, end), t in map_to_ranges(types):
  261. print(f"{{0x{start:04x}, 0x{end:04x}, {t}}},", file=fh)
  262. def write_bidi_mirroring_table(self, fh):
  263. self.write_file_header_comment(fh, """
  264. Map each Unicode character to its mirrored form when printing right to
  265. left.
  266. Used by terminal/bidi.c.
  267. """)
  268. bidi_mirror = {}
  269. for c1, c2 in self.BidiMirroring:
  270. assert bidi_mirror.get(c1, c2) == c2, f"Clash at {c1:%04X}"
  271. bidi_mirror[c1] = c2
  272. assert bidi_mirror.get(c2, c1) == c1, f"Clash at {c2:%04X}"
  273. bidi_mirror[c2] = c1
  274. for c1, c2 in sorted(bidi_mirror.items()):
  275. print("{{0x{:04x}, 0x{:04x}}},".format(c1, c2), file=fh)
  276. def write_bidi_brackets_table(self, fh):
  277. self.write_file_header_comment(fh, """
  278. Identify Unicode characters that count as brackets for the purposes of
  279. bidirectional text layout. For each one, indicate whether it's an open
  280. or closed bracket, and identify up to two characters that can act as
  281. its counterpart.
  282. Used by terminal/bidi.c.
  283. """)
  284. bracket_map = {}
  285. for c1, c2, kind in self.BidiBrackets:
  286. bracket_map[c1] = kind, c2
  287. equivalents = {}
  288. for rec in self.UnicodeData:
  289. if (rec.Decomposition_Type == 'canonical' and
  290. len(rec.Decomposition_Mapping) == 1):
  291. c = rec.c
  292. c2 = rec.Decomposition_Mapping[0]
  293. equivalents[c] = c2
  294. equivalents[c2] = c
  295. for src, (kind, dst) in sorted(bracket_map.items()):
  296. dsteq = equivalents.get(dst, 0)
  297. # UCD claims there's an 'n' kind possible, but as of UCD
  298. # 14, no instances of it exist
  299. enumval = {'o': 'BT_OPEN', 'c': 'BT_CLOSE'}[kind]
  300. print("{{0x{:04x}, {{0x{:04x}, 0x{:04x}, {}}}}},".format(
  301. src, dst, dsteq, enumval), file=fh)
  302. def write_nonspacing_chars_list(self, fh):
  303. self.write_file_header_comment(fh, """
  304. Identify Unicode characters that occupy no character cells of a
  305. terminal.
  306. Used by utils/wcwidth.c.
  307. """)
  308. cs = set()
  309. for rec in self.UnicodeData:
  310. nonspacing = rec.General_Category in {"Me", "Mn", "Cf"}
  311. if rec.c == 0xAD:
  312. # In typography this is a SOFT HYPHEN and counts as
  313. # discardable. But it's also an ISO 8859-1 printing
  314. # character, and all of those occupy one character
  315. # cell in a terminal.
  316. nonspacing = False
  317. if 0x1160 <= rec.c <= 0x11FF:
  318. # Medial (vowel) and final (consonant) jamo for
  319. # decomposed Hangul characters. These are regarded as
  320. # non-spacing on the grounds that they compose with
  321. # the preceding initial consonant.
  322. nonspacing = True
  323. if nonspacing:
  324. cs.add(rec.c)
  325. for start, end in set_to_ranges(cs):
  326. print(f"{{0x{start:04x}, 0x{end:04x}}},", file=fh)
  327. def write_width_table(self, fh, accept):
  328. cs = set()
  329. for c, wid in self.EastAsianWidth:
  330. if wid in accept:
  331. cs.add(c)
  332. for start, end in set_to_ranges(cs):
  333. print(f"{{0x{start:04x}, 0x{end:04x}}},", file=fh)
  334. def write_wide_chars_list(self, fh):
  335. self.write_file_header_comment(fh, """
  336. Identify Unicode characters that occupy two adjacent character cells
  337. in a terminal.
  338. Used by utils/wcwidth.c.
  339. """)
  340. self.write_width_table(fh, {'W', 'F'})
  341. def write_ambiguous_wide_chars_list(self, fh):
  342. self.write_file_header_comment(fh, """
  343. Identify Unicode characters that are width-ambiguous: some regimes
  344. regard them as occupying two adjacent character cells in a terminal,
  345. and others do not.
  346. Used by utils/wcwidth.c.
  347. """)
  348. self.write_width_table(fh, {'A'})
  349. def write_known_chars_table(self, fh):
  350. self.write_file_header_comment(fh, """
  351. List the Unicode code points that are known to this version of the
  352. standard at all.
  353. Used by utils/unicode-known.c.
  354. """)
  355. chars = set(rec.c for rec in self.UnicodeData)
  356. for start, end in set_to_ranges(chars):
  357. print(f"{{0x{start:04x}, 0x{end:04x}}},", file=fh)
  358. def write_combining_class_table(self, fh):
  359. self.write_file_header_comment(fh, """
  360. List the canonical combining class of each Unicode character, if it is
  361. not zero. This controls how combining marks can be reordered by the
  362. Unicode normalisation algorithms.
  363. Used by utils/unicode-norm.c.
  364. """)
  365. cclasses = {}
  366. for rec in self.UnicodeData:
  367. cc = rec.Canonical_Combining_Class
  368. if cc != 0:
  369. cclasses[rec.c] = cc
  370. for (start, end), cclass in map_to_ranges(cclasses):
  371. print(f"{{0x{start:04x}, 0x{end:04x}, {cclass:d}}},", file=fh)
  372. def write_canonical_decomp_table(self, fh):
  373. self.write_file_header_comment(fh, """
  374. List the canonical decomposition of every Unicode character that has
  375. one. This consists of up to two characters, but those may need
  376. decomposition in turn.
  377. Used by utils/unicode-norm.c.
  378. """)
  379. decomps = {}
  380. for rec in self.UnicodeData:
  381. if rec.Decomposition_Type != 'canonical':
  382. continue
  383. # Fill in a zero code point as the second character, if
  384. # it's only one character long
  385. decomps[rec.c] = (rec.Decomposition_Mapping + [0])[:2]
  386. for c, (d1, d2) in sorted(decomps.items()):
  387. d2s = f"0x{d2:04x}" if d2 else "0"
  388. print(f"{{0x{c:04x}, 0x{d1:04x}, {d2s}}},", file=fh)
  389. def write_canonical_comp_table(self, fh):
  390. self.write_file_header_comment(fh, """
  391. List the pairs of Unicode characters that canonically recompose to a
  392. single character in NFC.
  393. Used by utils/unicode-norm.c.
  394. """)
  395. exclusions = set(self.CompositionExclusions)
  396. nonstarters = set(rec.c for rec in self.UnicodeData
  397. if rec.Canonical_Combining_Class != 0)
  398. decomps = {}
  399. for rec in self.UnicodeData:
  400. if rec.Decomposition_Type != 'canonical':
  401. continue # we don't want compatibility decompositions
  402. if len(rec.Decomposition_Mapping) != 2:
  403. continue # we don't want singletons either
  404. if rec.c in exclusions:
  405. continue # we don't want anything explicitly excluded
  406. if (rec.c in nonstarters or
  407. rec.Decomposition_Mapping[0] in nonstarters):
  408. continue # we don't want non-starter decompositions
  409. decomps[tuple(rec.Decomposition_Mapping)] = rec.c
  410. for (d0, d1), c in sorted(decomps.items()):
  411. print(f"{{0x{d0:04x}, 0x{d1:04x}, 0x{c:04x}}},", file=fh)
  412. if __name__ == '__main__':
  413. Main().run()