Pass.cpp 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090
  1. // SPDX-License-Identifier: MIT OR MPL-2.0 OR LGPL-2.1-or-later OR GPL-2.0-or-later
  2. // Copyright 2010, SIL International, All rights reserved.
  3. #include "inc/Main.h"
  4. #include "inc/debug.h"
  5. #include "inc/Endian.h"
  6. #include "inc/Pass.h"
  7. #include <cstring>
  8. #include <cstdlib>
  9. #include <cassert>
  10. #include <cmath>
  11. #include "inc/Segment.h"
  12. #include "inc/Code.h"
  13. #include "inc/Rule.h"
  14. #include "inc/Error.h"
  15. #include "inc/Collider.h"
  16. using namespace graphite2;
  17. using vm::Machine;
  18. typedef Machine::Code Code;
  19. enum KernCollison
  20. {
  21. None = 0,
  22. CrossSpace = 1,
  23. InWord = 2,
  24. reserved = 3
  25. };
  26. Pass::Pass()
  27. : m_silf(0),
  28. m_cols(0),
  29. m_rules(0),
  30. m_ruleMap(0),
  31. m_startStates(0),
  32. m_transitions(0),
  33. m_states(0),
  34. m_codes(0),
  35. m_progs(0),
  36. m_numCollRuns(0),
  37. m_kernColls(0),
  38. m_iMaxLoop(0),
  39. m_numGlyphs(0),
  40. m_numRules(0),
  41. m_numStates(0),
  42. m_numTransition(0),
  43. m_numSuccess(0),
  44. m_successStart(0),
  45. m_numColumns(0),
  46. m_minPreCtxt(0),
  47. m_maxPreCtxt(0),
  48. m_colThreshold(0),
  49. m_isReverseDir(false)
  50. {
  51. }
  52. Pass::~Pass()
  53. {
  54. free(m_cols);
  55. free(m_startStates);
  56. free(m_transitions);
  57. free(m_states);
  58. free(m_ruleMap);
  59. if (m_rules) delete [] m_rules;
  60. if (m_codes) delete [] m_codes;
  61. free(m_progs);
  62. }
  63. bool Pass::readPass(const byte * const pass_start, size_t pass_length, size_t subtable_base,
  64. GR_MAYBE_UNUSED Face & face, passtype pt, GR_MAYBE_UNUSED uint32 version, Error &e)
  65. {
  66. const byte * p = pass_start,
  67. * const pass_end = p + pass_length;
  68. size_t numRanges;
  69. if (e.test(pass_length < 40, E_BADPASSLENGTH)) return face.error(e);
  70. // Read in basic values
  71. const byte flags = be::read<byte>(p);
  72. if (e.test((flags & 0x1f) &&
  73. (pt < PASS_TYPE_POSITIONING || !m_silf->aCollision() || !face.glyphs().hasBoxes() || !(m_silf->flags() & 0x20)),
  74. E_BADCOLLISIONPASS))
  75. return face.error(e);
  76. m_numCollRuns = flags & 0x7;
  77. m_kernColls = (flags >> 3) & 0x3;
  78. m_isReverseDir = (flags >> 5) & 0x1;
  79. m_iMaxLoop = be::read<byte>(p);
  80. if (m_iMaxLoop < 1) m_iMaxLoop = 1;
  81. be::skip<byte>(p,2); // skip maxContext & maxBackup
  82. m_numRules = be::read<uint16>(p);
  83. if (e.test(!m_numRules && m_numCollRuns == 0, E_BADEMPTYPASS)) return face.error(e);
  84. be::skip<uint16>(p); // fsmOffset - not sure why we would want this
  85. const byte * const pcCode = pass_start + be::read<uint32>(p) - subtable_base,
  86. * const rcCode = pass_start + be::read<uint32>(p) - subtable_base,
  87. * const aCode = pass_start + be::read<uint32>(p) - subtable_base;
  88. be::skip<uint32>(p);
  89. m_numStates = be::read<uint16>(p);
  90. m_numTransition = be::read<uint16>(p);
  91. m_numSuccess = be::read<uint16>(p);
  92. m_numColumns = be::read<uint16>(p);
  93. numRanges = be::read<uint16>(p);
  94. be::skip<uint16>(p, 3); // skip searchRange, entrySelector & rangeShift.
  95. assert(p - pass_start == 40);
  96. // Perform some sanity checks.
  97. if ( e.test(m_numTransition > m_numStates, E_BADNUMTRANS)
  98. || e.test(m_numSuccess > m_numStates, E_BADNUMSUCCESS)
  99. || e.test(m_numSuccess + m_numTransition < m_numStates, E_BADNUMSTATES)
  100. || e.test(m_numRules && numRanges == 0, E_NORANGES)
  101. || e.test(m_numColumns > 0x7FFF, E_BADNUMCOLUMNS))
  102. return face.error(e);
  103. m_successStart = m_numStates - m_numSuccess;
  104. // test for beyond end - 1 to account for reading uint16
  105. if (e.test(p + numRanges * 6 - 2 > pass_end, E_BADPASSLENGTH)) return face.error(e);
  106. m_numGlyphs = be::peek<uint16>(p + numRanges * 6 - 4) + 1;
  107. // Calculate the start of various arrays.
  108. const byte * const ranges = p;
  109. be::skip<uint16>(p, numRanges*3);
  110. const byte * const o_rule_map = p;
  111. be::skip<uint16>(p, m_numSuccess + 1);
  112. // More sanity checks
  113. if (e.test(reinterpret_cast<const byte *>(o_rule_map + m_numSuccess*sizeof(uint16)) > pass_end
  114. || p > pass_end, E_BADRULEMAPLEN))
  115. return face.error(e);
  116. const size_t numEntries = be::peek<uint16>(o_rule_map + m_numSuccess*sizeof(uint16));
  117. const byte * const rule_map = p;
  118. be::skip<uint16>(p, numEntries);
  119. if (e.test(p + 2*sizeof(uint8) > pass_end, E_BADPASSLENGTH)) return face.error(e);
  120. m_minPreCtxt = be::read<uint8>(p);
  121. m_maxPreCtxt = be::read<uint8>(p);
  122. if (e.test(m_minPreCtxt > m_maxPreCtxt, E_BADCTXTLENBOUNDS)) return face.error(e);
  123. const byte * const start_states = p;
  124. be::skip<int16>(p, m_maxPreCtxt - m_minPreCtxt + 1);
  125. const uint16 * const sort_keys = reinterpret_cast<const uint16 *>(p);
  126. be::skip<uint16>(p, m_numRules);
  127. const byte * const precontext = p;
  128. be::skip<byte>(p, m_numRules);
  129. if (e.test(p + sizeof(uint16) + sizeof(uint8) > pass_end, E_BADCTXTLENS)) return face.error(e);
  130. m_colThreshold = be::read<uint8>(p);
  131. if (m_colThreshold == 0) m_colThreshold = 10; // A default
  132. const size_t pass_constraint_len = be::read<uint16>(p);
  133. const uint16 * const o_constraint = reinterpret_cast<const uint16 *>(p);
  134. be::skip<uint16>(p, m_numRules + 1);
  135. const uint16 * const o_actions = reinterpret_cast<const uint16 *>(p);
  136. be::skip<uint16>(p, m_numRules + 1);
  137. const byte * const states = p;
  138. if (e.test(2u*m_numTransition*m_numColumns >= (unsigned)(pass_end - p), E_BADPASSLENGTH)
  139. || e.test(p >= pass_end, E_BADPASSLENGTH))
  140. return face.error(e);
  141. be::skip<int16>(p, m_numTransition*m_numColumns);
  142. be::skip<uint8>(p);
  143. if (e.test(p != pcCode, E_BADPASSCCODEPTR)) return face.error(e);
  144. be::skip<byte>(p, pass_constraint_len);
  145. if (e.test(p != rcCode, E_BADRULECCODEPTR)
  146. || e.test(size_t(rcCode - pcCode) != pass_constraint_len, E_BADCCODELEN)) return face.error(e);
  147. be::skip<byte>(p, be::peek<uint16>(o_constraint + m_numRules));
  148. if (e.test(p != aCode, E_BADACTIONCODEPTR)) return face.error(e);
  149. be::skip<byte>(p, be::peek<uint16>(o_actions + m_numRules));
  150. // We should be at the end or within the pass
  151. if (e.test(p > pass_end, E_BADPASSLENGTH)) return face.error(e);
  152. // Load the pass constraint if there is one.
  153. if (pass_constraint_len)
  154. {
  155. face.error_context(face.error_context() + 1);
  156. m_cPConstraint = vm::Machine::Code(true, pcCode, pcCode + pass_constraint_len,
  157. precontext[0], be::peek<uint16>(sort_keys), *m_silf, face, PASS_TYPE_UNKNOWN);
  158. if (e.test(!m_cPConstraint, E_OUTOFMEM)
  159. || e.test(m_cPConstraint.status() != Code::loaded, int(m_cPConstraint.status()) + E_CODEFAILURE))
  160. return face.error(e);
  161. face.error_context(face.error_context() - 1);
  162. }
  163. if (m_numRules)
  164. {
  165. if (!readRanges(ranges, numRanges, e)) return face.error(e);
  166. if (!readRules(rule_map, numEntries, precontext, sort_keys,
  167. o_constraint, rcCode, o_actions, aCode, face, pt, e)) return false;
  168. }
  169. #ifdef GRAPHITE2_TELEMETRY
  170. telemetry::category _states_cat(face.tele.states);
  171. #endif
  172. return m_numRules ? readStates(start_states, states, o_rule_map, face, e) : true;
  173. }
  174. bool Pass::readRules(const byte * rule_map, const size_t num_entries,
  175. const byte *precontext, const uint16 * sort_key,
  176. const uint16 * o_constraint, const byte *rc_data,
  177. const uint16 * o_action, const byte * ac_data,
  178. Face & face, passtype pt, Error &e)
  179. {
  180. const byte * const ac_data_end = ac_data + be::peek<uint16>(o_action + m_numRules);
  181. const byte * const rc_data_end = rc_data + be::peek<uint16>(o_constraint + m_numRules);
  182. precontext += m_numRules;
  183. sort_key += m_numRules;
  184. o_constraint += m_numRules;
  185. o_action += m_numRules;
  186. // Load rules.
  187. const byte * ac_begin = 0, * rc_begin = 0,
  188. * ac_end = ac_data + be::peek<uint16>(o_action),
  189. * rc_end = rc_data + be::peek<uint16>(o_constraint);
  190. // Allocate pools
  191. m_rules = new Rule [m_numRules];
  192. m_codes = new Code [m_numRules*2];
  193. int totalSlots = 0;
  194. const uint16 *tsort = sort_key;
  195. for (int i = 0; i < m_numRules; ++i)
  196. totalSlots += be::peek<uint16>(--tsort);
  197. const size_t prog_pool_sz = vm::Machine::Code::estimateCodeDataOut(ac_end - ac_data + rc_end - rc_data, 2 * m_numRules, totalSlots);
  198. m_progs = gralloc<byte>(prog_pool_sz);
  199. byte * prog_pool_free = m_progs,
  200. * prog_pool_end = m_progs + prog_pool_sz;
  201. if (e.test(!(m_rules && m_codes && m_progs), E_OUTOFMEM)) return face.error(e);
  202. Rule * r = m_rules + m_numRules - 1;
  203. for (size_t n = m_numRules; r >= m_rules; --n, --r, ac_end = ac_begin, rc_end = rc_begin)
  204. {
  205. face.error_context((face.error_context() & 0xFFFF00) + EC_ARULE + int((n - 1) << 24));
  206. r->preContext = *--precontext;
  207. r->sort = be::peek<uint16>(--sort_key);
  208. #ifndef NDEBUG
  209. r->rule_idx = uint16(n - 1);
  210. #endif
  211. if (r->sort > 63 || r->preContext >= r->sort || r->preContext > m_maxPreCtxt || r->preContext < m_minPreCtxt)
  212. return false;
  213. ac_begin = ac_data + be::peek<uint16>(--o_action);
  214. --o_constraint;
  215. rc_begin = be::peek<uint16>(o_constraint) ? rc_data + be::peek<uint16>(o_constraint) : rc_end;
  216. if (ac_begin > ac_end || ac_begin > ac_data_end || ac_end > ac_data_end
  217. || rc_begin > rc_end || rc_begin > rc_data_end || rc_end > rc_data_end
  218. || vm::Machine::Code::estimateCodeDataOut(ac_end - ac_begin + rc_end - rc_begin, 2, r->sort) > size_t(prog_pool_end - prog_pool_free))
  219. return false;
  220. r->action = new (m_codes+n*2-2) vm::Machine::Code(false, ac_begin, ac_end, r->preContext, r->sort, *m_silf, face, pt, &prog_pool_free);
  221. r->constraint = new (m_codes+n*2-1) vm::Machine::Code(true, rc_begin, rc_end, r->preContext, r->sort, *m_silf, face, pt, &prog_pool_free);
  222. if (e.test(!r->action || !r->constraint, E_OUTOFMEM)
  223. || e.test(r->action->status() != Code::loaded, int(r->action->status()) + E_CODEFAILURE)
  224. || e.test(r->constraint->status() != Code::loaded, int(r->constraint->status()) + E_CODEFAILURE)
  225. || e.test(!r->constraint->immutable(), E_MUTABLECCODE))
  226. return face.error(e);
  227. }
  228. byte * const moved_progs = prog_pool_free > m_progs ? static_cast<byte *>(realloc(m_progs, prog_pool_free - m_progs)) : 0;
  229. if (e.test(!moved_progs, E_OUTOFMEM))
  230. {
  231. free(m_progs);
  232. m_progs = 0;
  233. return face.error(e);
  234. }
  235. if (moved_progs != m_progs)
  236. {
  237. for (Code * c = m_codes, * const ce = c + m_numRules*2; c != ce; ++c)
  238. {
  239. c->externalProgramMoved(moved_progs - m_progs);
  240. }
  241. m_progs = moved_progs;
  242. }
  243. // Load the rule entries map
  244. face.error_context((face.error_context() & 0xFFFF00) + EC_APASS);
  245. //TODO: Coverity: 1315804: FORWARD_NULL
  246. RuleEntry * re = m_ruleMap = gralloc<RuleEntry>(num_entries);
  247. if (e.test(!re, E_OUTOFMEM)) return face.error(e);
  248. for (size_t n = num_entries; n; --n, ++re)
  249. {
  250. const ptrdiff_t rn = be::read<uint16>(rule_map);
  251. if (e.test(rn >= m_numRules, E_BADRULENUM)) return face.error(e);
  252. re->rule = m_rules + rn;
  253. }
  254. return true;
  255. }
  256. static int cmpRuleEntry(const void *a, const void *b) { return (*(RuleEntry *)a < *(RuleEntry *)b ? -1 :
  257. (*(RuleEntry *)b < *(RuleEntry *)a ? 1 : 0)); }
  258. bool Pass::readStates(const byte * starts, const byte *states, const byte * o_rule_map, GR_MAYBE_UNUSED Face & face, Error &e)
  259. {
  260. #ifdef GRAPHITE2_TELEMETRY
  261. telemetry::category _states_cat(face.tele.starts);
  262. #endif
  263. m_startStates = gralloc<uint16>(m_maxPreCtxt - m_minPreCtxt + 1);
  264. #ifdef GRAPHITE2_TELEMETRY
  265. telemetry::set_category(face.tele.states);
  266. #endif
  267. m_states = gralloc<State>(m_numStates);
  268. #ifdef GRAPHITE2_TELEMETRY
  269. telemetry::set_category(face.tele.transitions);
  270. #endif
  271. m_transitions = gralloc<uint16>(m_numTransition * m_numColumns);
  272. if (e.test(!m_startStates || !m_states || !m_transitions, E_OUTOFMEM)) return face.error(e);
  273. // load start states
  274. for (uint16 * s = m_startStates,
  275. * const s_end = s + m_maxPreCtxt - m_minPreCtxt + 1; s != s_end; ++s)
  276. {
  277. *s = be::read<uint16>(starts);
  278. if (e.test(*s >= m_numStates, E_BADSTATE))
  279. {
  280. face.error_context((face.error_context() & 0xFFFF00) + EC_ASTARTS + int((s - m_startStates) << 24));
  281. return face.error(e); // true;
  282. }
  283. }
  284. // load state transition table.
  285. for (uint16 * t = m_transitions,
  286. * const t_end = t + m_numTransition*m_numColumns; t != t_end; ++t)
  287. {
  288. *t = be::read<uint16>(states);
  289. if (e.test(*t >= m_numStates, E_BADSTATE))
  290. {
  291. face.error_context((face.error_context() & 0xFFFF00) + EC_ATRANS + int(((t - m_transitions) / m_numColumns) << 8));
  292. return face.error(e);
  293. }
  294. }
  295. State * s = m_states,
  296. * const success_begin = m_states + m_numStates - m_numSuccess;
  297. const RuleEntry * rule_map_end = m_ruleMap + be::peek<uint16>(o_rule_map + m_numSuccess*sizeof(uint16));
  298. for (size_t n = m_numStates; n; --n, ++s)
  299. {
  300. RuleEntry * const begin = s < success_begin ? 0 : m_ruleMap + be::read<uint16>(o_rule_map),
  301. * const end = s < success_begin ? 0 : m_ruleMap + be::peek<uint16>(o_rule_map);
  302. if (e.test(begin >= rule_map_end || end > rule_map_end || begin > end, E_BADRULEMAPPING))
  303. {
  304. face.error_context((face.error_context() & 0xFFFF00) + EC_ARULEMAP + int(n << 24));
  305. return face.error(e);
  306. }
  307. s->rules = begin;
  308. s->rules_end = (end - begin <= FiniteStateMachine::MAX_RULES)? end :
  309. begin + FiniteStateMachine::MAX_RULES;
  310. if (begin) // keep UBSan happy can't call qsort with null begin
  311. qsort(begin, end - begin, sizeof(RuleEntry), &cmpRuleEntry);
  312. }
  313. return true;
  314. }
  315. bool Pass::readRanges(const byte * ranges, size_t num_ranges, Error &e)
  316. {
  317. m_cols = gralloc<uint16>(m_numGlyphs);
  318. if (e.test(!m_cols, E_OUTOFMEM)) return false;
  319. memset(m_cols, 0xFF, m_numGlyphs * sizeof(uint16));
  320. for (size_t n = num_ranges; n; --n)
  321. {
  322. uint16 * ci = m_cols + be::read<uint16>(ranges),
  323. * ci_end = m_cols + be::read<uint16>(ranges) + 1,
  324. col = be::read<uint16>(ranges);
  325. if (e.test(ci >= ci_end || ci_end > m_cols+m_numGlyphs || col >= m_numColumns, E_BADRANGE))
  326. return false;
  327. // A glyph must only belong to one column at a time
  328. while (ci != ci_end && *ci == 0xffff)
  329. *ci++ = col;
  330. if (e.test(ci != ci_end, E_BADRANGE))
  331. return false;
  332. }
  333. return true;
  334. }
  335. bool Pass::runGraphite(vm::Machine & m, FiniteStateMachine & fsm, bool reverse) const
  336. {
  337. Slot *s = m.slotMap().segment.first();
  338. if (!s || !testPassConstraint(m)) return true;
  339. if (reverse)
  340. {
  341. m.slotMap().segment.reverseSlots();
  342. s = m.slotMap().segment.first();
  343. }
  344. if (m_numRules)
  345. {
  346. Slot *currHigh = s->next();
  347. #if !defined GRAPHITE2_NTRACING
  348. if (fsm.dbgout) *fsm.dbgout << "rules" << json::array;
  349. json::closer rules_array_closer(fsm.dbgout);
  350. #endif
  351. m.slotMap().highwater(currHigh);
  352. int lc = m_iMaxLoop;
  353. do
  354. {
  355. findNDoRule(s, m, fsm);
  356. if (m.status() != Machine::finished) return false;
  357. if (s && (s == m.slotMap().highwater() || m.slotMap().highpassed() || --lc == 0)) {
  358. if (!lc)
  359. s = m.slotMap().highwater();
  360. lc = m_iMaxLoop;
  361. if (s)
  362. m.slotMap().highwater(s->next());
  363. }
  364. } while (s);
  365. }
  366. //TODO: Use enums for flags
  367. const bool collisions = m_numCollRuns || m_kernColls;
  368. if (!collisions || !m.slotMap().segment.hasCollisionInfo())
  369. return true;
  370. if (m_numCollRuns)
  371. {
  372. if (!(m.slotMap().segment.flags() & Segment::SEG_INITCOLLISIONS))
  373. {
  374. m.slotMap().segment.positionSlots(0, 0, 0, m.slotMap().dir(), true);
  375. // m.slotMap().segment.flags(m.slotMap().segment.flags() | Segment::SEG_INITCOLLISIONS);
  376. }
  377. if (!collisionShift(&m.slotMap().segment, m.slotMap().dir(), fsm.dbgout))
  378. return false;
  379. }
  380. if ((m_kernColls) && !collisionKern(&m.slotMap().segment, m.slotMap().dir(), fsm.dbgout))
  381. return false;
  382. if (collisions && !collisionFinish(&m.slotMap().segment, fsm.dbgout))
  383. return false;
  384. return true;
  385. }
  386. bool Pass::runFSM(FiniteStateMachine& fsm, Slot * slot) const
  387. {
  388. fsm.reset(slot, m_maxPreCtxt);
  389. if (fsm.slots.context() < m_minPreCtxt)
  390. return false;
  391. uint16 state = m_startStates[m_maxPreCtxt - fsm.slots.context()];
  392. uint8 free_slots = SlotMap::MAX_SLOTS;
  393. do
  394. {
  395. fsm.slots.pushSlot(slot);
  396. if (slot->gid() >= m_numGlyphs
  397. || m_cols[slot->gid()] == 0xffffU
  398. || --free_slots == 0
  399. || state >= m_numTransition)
  400. return free_slots != 0;
  401. const uint16 * transitions = m_transitions + state*m_numColumns;
  402. state = transitions[m_cols[slot->gid()]];
  403. if (state >= m_successStart)
  404. fsm.rules.accumulate_rules(m_states[state]);
  405. slot = slot->next();
  406. } while (state != 0 && slot);
  407. fsm.slots.pushSlot(slot);
  408. return true;
  409. }
  410. #if !defined GRAPHITE2_NTRACING
  411. inline
  412. Slot * input_slot(const SlotMap & slots, const int n)
  413. {
  414. Slot * s = slots[slots.context() + n];
  415. if (!s->isCopied()) return s;
  416. return s->prev() ? s->prev()->next() : (s->next() ? s->next()->prev() : slots.segment.last());
  417. }
  418. inline
  419. Slot * output_slot(const SlotMap & slots, const int n)
  420. {
  421. Slot * s = slots[slots.context() + n - 1];
  422. return s ? s->next() : slots.segment.first();
  423. }
  424. #endif //!defined GRAPHITE2_NTRACING
  425. void Pass::findNDoRule(Slot * & slot, Machine &m, FiniteStateMachine & fsm) const
  426. {
  427. assert(slot);
  428. if (runFSM(fsm, slot))
  429. {
  430. // Search for the first rule which passes the constraint
  431. const RuleEntry * r = fsm.rules.begin(),
  432. * const re = fsm.rules.end();
  433. while (r != re && !testConstraint(*r->rule, m))
  434. {
  435. ++r;
  436. if (m.status() != Machine::finished)
  437. return;
  438. }
  439. #if !defined GRAPHITE2_NTRACING
  440. if (fsm.dbgout)
  441. {
  442. if (fsm.rules.size() != 0)
  443. {
  444. *fsm.dbgout << json::item << json::object;
  445. dumpRuleEventConsidered(fsm, *r);
  446. if (r != re)
  447. {
  448. const int adv = doAction(r->rule->action, slot, m);
  449. dumpRuleEventOutput(fsm, *r->rule, slot);
  450. if (r->rule->action->deletes()) fsm.slots.collectGarbage(slot);
  451. adjustSlot(adv, slot, fsm.slots);
  452. *fsm.dbgout << "cursor" << objectid(dslot(&fsm.slots.segment, slot))
  453. << json::close; // Close RuelEvent object
  454. return;
  455. }
  456. else
  457. {
  458. *fsm.dbgout << json::close // close "considered" array
  459. << "output" << json::null
  460. << "cursor" << objectid(dslot(&fsm.slots.segment, slot->next()))
  461. << json::close;
  462. }
  463. }
  464. }
  465. else
  466. #endif
  467. {
  468. if (r != re)
  469. {
  470. const int adv = doAction(r->rule->action, slot, m);
  471. if (m.status() != Machine::finished) return;
  472. if (r->rule->action->deletes()) fsm.slots.collectGarbage(slot);
  473. adjustSlot(adv, slot, fsm.slots);
  474. return;
  475. }
  476. }
  477. }
  478. slot = slot->next();
  479. return;
  480. }
  481. #if !defined GRAPHITE2_NTRACING
  482. void Pass::dumpRuleEventConsidered(const FiniteStateMachine & fsm, const RuleEntry & re) const
  483. {
  484. *fsm.dbgout << "considered" << json::array;
  485. for (const RuleEntry *r = fsm.rules.begin(); r != &re; ++r)
  486. {
  487. if (r->rule->preContext > fsm.slots.context())
  488. continue;
  489. *fsm.dbgout << json::flat << json::object
  490. << "id" << r->rule - m_rules
  491. << "failed" << true
  492. << "input" << json::flat << json::object
  493. << "start" << objectid(dslot(&fsm.slots.segment, input_slot(fsm.slots, -r->rule->preContext)))
  494. << "length" << r->rule->sort
  495. << json::close // close "input"
  496. << json::close; // close Rule object
  497. }
  498. }
  499. void Pass::dumpRuleEventOutput(const FiniteStateMachine & fsm, const Rule & r, Slot * const last_slot) const
  500. {
  501. *fsm.dbgout << json::item << json::flat << json::object
  502. << "id" << &r - m_rules
  503. << "failed" << false
  504. << "input" << json::flat << json::object
  505. << "start" << objectid(dslot(&fsm.slots.segment, input_slot(fsm.slots, 0)))
  506. << "length" << r.sort - r.preContext
  507. << json::close // close "input"
  508. << json::close // close Rule object
  509. << json::close // close considered array
  510. << "output" << json::object
  511. << "range" << json::flat << json::object
  512. << "start" << objectid(dslot(&fsm.slots.segment, input_slot(fsm.slots, 0)))
  513. << "end" << objectid(dslot(&fsm.slots.segment, last_slot))
  514. << json::close // close "input"
  515. << "slots" << json::array;
  516. const Position rsb_prepos = last_slot ? last_slot->origin() : fsm.slots.segment.advance();
  517. fsm.slots.segment.positionSlots(0, 0, 0, fsm.slots.segment.currdir());
  518. for(Slot * slot = output_slot(fsm.slots, 0); slot != last_slot; slot = slot->next())
  519. *fsm.dbgout << dslot(&fsm.slots.segment, slot);
  520. *fsm.dbgout << json::close // close "slots"
  521. << "postshift" << (last_slot ? last_slot->origin() : fsm.slots.segment.advance()) - rsb_prepos
  522. << json::close; // close "output" object
  523. }
  524. #endif
  525. inline
  526. bool Pass::testPassConstraint(Machine & m) const
  527. {
  528. if (!m_cPConstraint) return true;
  529. assert(m_cPConstraint.constraint());
  530. m.slotMap().reset(*m.slotMap().segment.first(), 0);
  531. m.slotMap().pushSlot(m.slotMap().segment.first());
  532. vm::slotref * map = m.slotMap().begin();
  533. const uint32 ret = m_cPConstraint.run(m, map);
  534. #if !defined GRAPHITE2_NTRACING
  535. json * const dbgout = m.slotMap().segment.getFace()->logger();
  536. if (dbgout)
  537. *dbgout << "constraint" << (ret && m.status() == Machine::finished);
  538. #endif
  539. return ret && m.status() == Machine::finished;
  540. }
  541. bool Pass::testConstraint(const Rule & r, Machine & m) const
  542. {
  543. const uint16 curr_context = m.slotMap().context();
  544. if (unsigned(r.sort + curr_context - r.preContext) > m.slotMap().size()
  545. || curr_context - r.preContext < 0) return false;
  546. vm::slotref * map = m.slotMap().begin() + curr_context - r.preContext;
  547. if (map[r.sort - 1] == 0)
  548. return false;
  549. if (!*r.constraint) return true;
  550. assert(r.constraint->constraint());
  551. for (int n = r.sort; n && map; --n, ++map)
  552. {
  553. if (!*map) continue;
  554. const int32 ret = r.constraint->run(m, map);
  555. if (!ret || m.status() != Machine::finished)
  556. return false;
  557. }
  558. return true;
  559. }
  560. void SlotMap::collectGarbage(Slot * &aSlot)
  561. {
  562. for(Slot **s = begin(), *const *const se = end() - 1; s != se; ++s) {
  563. Slot *& slot = *s;
  564. if(slot && (slot->isDeleted() || slot->isCopied()))
  565. {
  566. if (slot == aSlot)
  567. aSlot = slot->prev() ? slot->prev() : slot->next();
  568. segment.freeSlot(slot);
  569. }
  570. }
  571. }
  572. int Pass::doAction(const Code *codeptr, Slot * & slot_out, vm::Machine & m) const
  573. {
  574. assert(codeptr);
  575. if (!*codeptr) return 0;
  576. SlotMap & smap = m.slotMap();
  577. vm::slotref * map = &smap[smap.context()];
  578. smap.highpassed(false);
  579. int32 ret = codeptr->run(m, map);
  580. if (m.status() != Machine::finished)
  581. {
  582. slot_out = NULL;
  583. smap.highwater(0);
  584. return 0;
  585. }
  586. slot_out = *map;
  587. return ret;
  588. }
  589. void Pass::adjustSlot(int delta, Slot * & slot_out, SlotMap & smap) const
  590. {
  591. if (!slot_out)
  592. {
  593. if (smap.highpassed() || slot_out == smap.highwater())
  594. {
  595. slot_out = smap.segment.last();
  596. ++delta;
  597. if (!smap.highwater() || smap.highwater() == slot_out)
  598. smap.highpassed(false);
  599. }
  600. else
  601. {
  602. slot_out = smap.segment.first();
  603. --delta;
  604. }
  605. }
  606. if (delta < 0)
  607. {
  608. while (++delta <= 0 && slot_out)
  609. {
  610. slot_out = slot_out->prev();
  611. if (smap.highpassed() && smap.highwater() == slot_out)
  612. smap.highpassed(false);
  613. }
  614. }
  615. else if (delta > 0)
  616. {
  617. while (--delta >= 0 && slot_out)
  618. {
  619. if (slot_out == smap.highwater() && slot_out)
  620. smap.highpassed(true);
  621. slot_out = slot_out->next();
  622. }
  623. }
  624. }
  625. bool Pass::collisionShift(Segment *seg, int dir, json * const dbgout) const
  626. {
  627. ShiftCollider shiftcoll(dbgout);
  628. // bool isfirst = true;
  629. bool hasCollisions = false;
  630. Slot *start = seg->first(); // turn on collision fixing for the first slot
  631. Slot *end = NULL;
  632. bool moved = false;
  633. #if !defined GRAPHITE2_NTRACING
  634. if (dbgout)
  635. *dbgout << "collisions" << json::array
  636. << json::flat << json::object << "num-loops" << m_numCollRuns << json::close;
  637. #endif
  638. while (start)
  639. {
  640. #if !defined GRAPHITE2_NTRACING
  641. if (dbgout) *dbgout << json::object << "phase" << "1" << "moves" << json::array;
  642. #endif
  643. hasCollisions = false;
  644. end = NULL;
  645. // phase 1 : position shiftable glyphs, ignoring kernable glyphs
  646. for (Slot *s = start; s; s = s->next())
  647. {
  648. const SlotCollision * c = seg->collisionInfo(s);
  649. if (start && (c->flags() & (SlotCollision::COLL_FIX | SlotCollision::COLL_KERN)) == SlotCollision::COLL_FIX
  650. && !resolveCollisions(seg, s, start, shiftcoll, false, dir, moved, hasCollisions, dbgout))
  651. return false;
  652. if (s != start && (c->flags() & SlotCollision::COLL_END))
  653. {
  654. end = s->next();
  655. break;
  656. }
  657. }
  658. #if !defined GRAPHITE2_NTRACING
  659. if (dbgout)
  660. *dbgout << json::close << json::close; // phase-1
  661. #endif
  662. // phase 2 : loop until happy.
  663. for (int i = 0; i < m_numCollRuns - 1; ++i)
  664. {
  665. if (hasCollisions || moved)
  666. {
  667. #if !defined GRAPHITE2_NTRACING
  668. if (dbgout)
  669. *dbgout << json::object << "phase" << "2a" << "loop" << i << "moves" << json::array;
  670. #endif
  671. // phase 2a : if any shiftable glyphs are in collision, iterate backwards,
  672. // fixing them and ignoring other non-collided glyphs. Note that this handles ONLY
  673. // glyphs that are actually in collision from phases 1 or 2b, and working backwards
  674. // has the intended effect of breaking logjams.
  675. if (hasCollisions)
  676. {
  677. hasCollisions = false;
  678. #if 0
  679. moved = true;
  680. for (Slot *s = start; s != end; s = s->next())
  681. {
  682. SlotCollision * c = seg->collisionInfo(s);
  683. c->setShift(Position(0, 0));
  684. }
  685. #endif
  686. Slot *lend = end ? end->prev() : seg->last();
  687. Slot *lstart = start->prev();
  688. for (Slot *s = lend; s != lstart; s = s->prev())
  689. {
  690. SlotCollision * c = seg->collisionInfo(s);
  691. if (start && (c->flags() & (SlotCollision::COLL_FIX | SlotCollision::COLL_KERN | SlotCollision::COLL_ISCOL))
  692. == (SlotCollision::COLL_FIX | SlotCollision::COLL_ISCOL)) // ONLY if this glyph is still colliding
  693. {
  694. if (!resolveCollisions(seg, s, lend, shiftcoll, true, dir, moved, hasCollisions, dbgout))
  695. return false;
  696. c->setFlags(c->flags() | SlotCollision::COLL_TEMPLOCK);
  697. }
  698. }
  699. }
  700. #if !defined GRAPHITE2_NTRACING
  701. if (dbgout)
  702. *dbgout << json::close << json::close // phase 2a
  703. << json::object << "phase" << "2b" << "loop" << i << "moves" << json::array;
  704. #endif
  705. // phase 2b : redo basic diacritic positioning pass for ALL glyphs. Each successive loop adjusts
  706. // glyphs from their current adjusted position, which has the effect of gradually minimizing the
  707. // resulting adjustment; ie, the final result will be gradually closer to the original location.
  708. // Also it allows more flexibility in the final adjustment, since it is moving along the
  709. // possible 8 vectors from successively different starting locations.
  710. if (moved)
  711. {
  712. moved = false;
  713. for (Slot *s = start; s != end; s = s->next())
  714. {
  715. SlotCollision * c = seg->collisionInfo(s);
  716. if (start && (c->flags() & (SlotCollision::COLL_FIX | SlotCollision::COLL_TEMPLOCK
  717. | SlotCollision::COLL_KERN)) == SlotCollision::COLL_FIX
  718. && !resolveCollisions(seg, s, start, shiftcoll, false, dir, moved, hasCollisions, dbgout))
  719. return false;
  720. else if (c->flags() & SlotCollision::COLL_TEMPLOCK)
  721. c->setFlags(c->flags() & ~SlotCollision::COLL_TEMPLOCK);
  722. }
  723. }
  724. // if (!hasCollisions) // no, don't leave yet because phase 2b will continue to improve things
  725. // break;
  726. #if !defined GRAPHITE2_NTRACING
  727. if (dbgout)
  728. *dbgout << json::close << json::close; // phase 2
  729. #endif
  730. }
  731. }
  732. if (!end)
  733. break;
  734. start = NULL;
  735. for (Slot *s = end->prev(); s; s = s->next())
  736. {
  737. if (seg->collisionInfo(s)->flags() & SlotCollision::COLL_START)
  738. {
  739. start = s;
  740. break;
  741. }
  742. }
  743. }
  744. return true;
  745. }
  746. bool Pass::collisionKern(Segment *seg, int dir, json * const dbgout) const
  747. {
  748. Slot *start = seg->first();
  749. float ymin = 1e38f;
  750. float ymax = -1e38f;
  751. const GlyphCache &gc = seg->getFace()->glyphs();
  752. // phase 3 : handle kerning of clusters
  753. #if !defined GRAPHITE2_NTRACING
  754. if (dbgout)
  755. *dbgout << json::object << "phase" << "3" << "moves" << json::array;
  756. #endif
  757. for (Slot *s = seg->first(); s; s = s->next())
  758. {
  759. if (!gc.check(s->gid()))
  760. return false;
  761. const SlotCollision * c = seg->collisionInfo(s);
  762. const Rect &bbox = seg->theGlyphBBoxTemporary(s->gid());
  763. float y = s->origin().y + c->shift().y;
  764. if (!(c->flags() & SlotCollision::COLL_ISSPACE))
  765. {
  766. ymax = max(y + bbox.tr.y, ymax);
  767. ymin = min(y + bbox.bl.y, ymin);
  768. }
  769. if (start && (c->flags() & (SlotCollision::COLL_KERN | SlotCollision::COLL_FIX))
  770. == (SlotCollision::COLL_KERN | SlotCollision::COLL_FIX))
  771. resolveKern(seg, s, start, dir, ymin, ymax, dbgout);
  772. if (c->flags() & SlotCollision::COLL_END)
  773. start = NULL;
  774. if (c->flags() & SlotCollision::COLL_START)
  775. start = s;
  776. }
  777. #if !defined GRAPHITE2_NTRACING
  778. if (dbgout)
  779. *dbgout << json::close << json::close; // phase 3
  780. #endif
  781. return true;
  782. }
  783. bool Pass::collisionFinish(Segment *seg, GR_MAYBE_UNUSED json * const dbgout) const
  784. {
  785. for (Slot *s = seg->first(); s; s = s->next())
  786. {
  787. SlotCollision *c = seg->collisionInfo(s);
  788. if (c->shift().x != 0 || c->shift().y != 0)
  789. {
  790. const Position newOffset = c->shift();
  791. const Position nullPosition(0, 0);
  792. c->setOffset(newOffset + c->offset());
  793. c->setShift(nullPosition);
  794. }
  795. }
  796. // seg->positionSlots();
  797. #if !defined GRAPHITE2_NTRACING
  798. if (dbgout)
  799. *dbgout << json::close;
  800. #endif
  801. return true;
  802. }
  803. // Can slot s be kerned, or is it attached to something that can be kerned?
  804. static bool inKernCluster(Segment *seg, Slot *s)
  805. {
  806. SlotCollision *c = seg->collisionInfo(s);
  807. if (c->flags() & SlotCollision::COLL_KERN /** && c->flags() & SlotCollision::COLL_FIX **/ )
  808. return true;
  809. while (s->attachedTo())
  810. {
  811. s = s->attachedTo();
  812. c = seg->collisionInfo(s);
  813. if (c->flags() & SlotCollision::COLL_KERN /** && c->flags() & SlotCollision::COLL_FIX **/ )
  814. return true;
  815. }
  816. return false;
  817. }
  818. // Fix collisions for the given slot.
  819. // Return true if everything was fixed, false if there are still collisions remaining.
  820. // isRev means be we are processing backwards.
  821. bool Pass::resolveCollisions(Segment *seg, Slot *slotFix, Slot *start,
  822. ShiftCollider &coll, GR_MAYBE_UNUSED bool isRev, int dir, bool &moved, bool &hasCol,
  823. json * const dbgout) const
  824. {
  825. Slot * nbor; // neighboring slot
  826. SlotCollision *cFix = seg->collisionInfo(slotFix);
  827. if (!coll.initSlot(seg, slotFix, cFix->limit(), cFix->margin(), cFix->marginWt(),
  828. cFix->shift(), cFix->offset(), dir, dbgout))
  829. return false;
  830. bool collides = false;
  831. // When we're processing forward, ignore kernable glyphs that preceed the target glyph.
  832. // When processing backward, don't ignore these until we pass slotFix.
  833. bool ignoreForKern = !isRev;
  834. bool rtl = dir & 1;
  835. Slot *base = slotFix;
  836. while (base->attachedTo())
  837. base = base->attachedTo();
  838. Position zero(0., 0.);
  839. // Look for collisions with the neighboring glyphs.
  840. for (nbor = start; nbor; nbor = isRev ? nbor->prev() : nbor->next())
  841. {
  842. SlotCollision *cNbor = seg->collisionInfo(nbor);
  843. bool sameCluster = nbor->isChildOf(base);
  844. if (nbor != slotFix // don't process if this is the slot of interest
  845. && !(cNbor->ignore()) // don't process if ignoring
  846. && (nbor == base || sameCluster // process if in the same cluster as slotFix
  847. || !inKernCluster(seg, nbor)) // or this cluster is not to be kerned
  848. // || (rtl ^ ignoreForKern)) // or it comes before(ltr) or after(rtl)
  849. && (!isRev // if processing forwards then good to merge otherwise only:
  850. || !(cNbor->flags() & SlotCollision::COLL_FIX) // merge in immovable stuff
  851. || ((cNbor->flags() & SlotCollision::COLL_KERN) && !sameCluster) // ignore other kernable clusters
  852. || (cNbor->flags() & SlotCollision::COLL_ISCOL)) // test against other collided glyphs
  853. && !coll.mergeSlot(seg, nbor, cNbor, cNbor->shift(), !ignoreForKern, sameCluster, collides, false, dbgout))
  854. return false;
  855. else if (nbor == slotFix)
  856. // Switching sides of this glyph - if we were ignoring kernable stuff before, don't anymore.
  857. ignoreForKern = !ignoreForKern;
  858. if (nbor != start && (cNbor->flags() & (isRev ? SlotCollision::COLL_START : SlotCollision::COLL_END)))
  859. break;
  860. }
  861. bool isCol = false;
  862. if (collides || cFix->shift().x != 0.f || cFix->shift().y != 0.f)
  863. {
  864. Position shift = coll.resolve(seg, isCol, dbgout);
  865. // isCol has been set to true if a collision remains.
  866. if (std::fabs(shift.x) < 1e38f && std::fabs(shift.y) < 1e38f)
  867. {
  868. if (sqr(shift.x-cFix->shift().x) + sqr(shift.y-cFix->shift().y) >= m_colThreshold * m_colThreshold)
  869. moved = true;
  870. cFix->setShift(shift);
  871. if (slotFix->firstChild())
  872. {
  873. Rect bbox;
  874. Position here = slotFix->origin() + shift;
  875. float clusterMin = here.x;
  876. slotFix->firstChild()->finalise(seg, NULL, here, bbox, 0, clusterMin, rtl, false);
  877. }
  878. }
  879. }
  880. else
  881. {
  882. // This glyph is not colliding with anything.
  883. #if !defined GRAPHITE2_NTRACING
  884. if (dbgout)
  885. {
  886. *dbgout << json::object
  887. << "missed" << objectid(dslot(seg, slotFix));
  888. coll.outputJsonDbg(dbgout, seg, -1);
  889. *dbgout << json::close;
  890. }
  891. #endif
  892. }
  893. // Set the is-collision flag bit.
  894. if (isCol)
  895. { cFix->setFlags(cFix->flags() | SlotCollision::COLL_ISCOL | SlotCollision::COLL_KNOWN); }
  896. else
  897. { cFix->setFlags((cFix->flags() & ~SlotCollision::COLL_ISCOL) | SlotCollision::COLL_KNOWN); }
  898. hasCol |= isCol;
  899. return true;
  900. }
  901. float Pass::resolveKern(Segment *seg, Slot *slotFix, GR_MAYBE_UNUSED Slot *start, int dir,
  902. float &ymin, float &ymax, json *const dbgout) const
  903. {
  904. Slot *nbor; // neighboring slot
  905. float currSpace = 0.;
  906. bool collides = false;
  907. unsigned int space_count = 0;
  908. Slot *base = slotFix;
  909. while (base->attachedTo())
  910. base = base->attachedTo();
  911. SlotCollision *cFix = seg->collisionInfo(base);
  912. const GlyphCache &gc = seg->getFace()->glyphs();
  913. const Rect &bbb = seg->theGlyphBBoxTemporary(slotFix->gid());
  914. const float by = slotFix->origin().y + cFix->shift().y;
  915. if (base != slotFix)
  916. {
  917. cFix->setFlags(cFix->flags() | SlotCollision::COLL_KERN | SlotCollision::COLL_FIX);
  918. return 0;
  919. }
  920. bool seenEnd = (cFix->flags() & SlotCollision::COLL_END) != 0;
  921. bool isInit = false;
  922. KernCollider coll(dbgout);
  923. ymax = max(by + bbb.tr.y, ymax);
  924. ymin = min(by + bbb.bl.y, ymin);
  925. for (nbor = slotFix->next(); nbor; nbor = nbor->next())
  926. {
  927. if (!gc.check(nbor->gid()))
  928. return 0.;
  929. const Rect &bb = seg->theGlyphBBoxTemporary(nbor->gid());
  930. SlotCollision *cNbor = seg->collisionInfo(nbor);
  931. const float nby = nbor->origin().y + cNbor->shift().y;
  932. if (nbor->isChildOf(base))
  933. {
  934. ymax = max(nby + bb.tr.y, ymax);
  935. ymin = min(nby + bb.bl.y, ymin);
  936. continue;
  937. }
  938. if ((bb.bl.y == 0.f && bb.tr.y == 0.f) || (cNbor->flags() & SlotCollision::COLL_ISSPACE))
  939. {
  940. if (m_kernColls == InWord)
  941. break;
  942. // Add space for a space glyph.
  943. currSpace += nbor->advance();
  944. ++space_count;
  945. }
  946. else
  947. {
  948. space_count = 0;
  949. if (nbor != slotFix && !cNbor->ignore())
  950. {
  951. seenEnd = true;
  952. if (!isInit)
  953. {
  954. if (!coll.initSlot(seg, slotFix, cFix->limit(), cFix->margin(),
  955. cFix->shift(), cFix->offset(), dir, ymin, ymax, dbgout))
  956. return 0.;
  957. isInit = true;
  958. }
  959. collides |= coll.mergeSlot(seg, nbor, cNbor->shift(), currSpace, dir, dbgout);
  960. }
  961. }
  962. if (cNbor->flags() & SlotCollision::COLL_END)
  963. {
  964. if (seenEnd && space_count < 2)
  965. break;
  966. else
  967. seenEnd = true;
  968. }
  969. }
  970. if (collides)
  971. {
  972. Position mv = coll.resolve(seg, slotFix, dir, dbgout);
  973. coll.shift(mv, dir);
  974. Position delta = slotFix->advancePos() + mv - cFix->shift();
  975. slotFix->advance(delta);
  976. cFix->setShift(mv);
  977. return mv.x;
  978. }
  979. return 0.;
  980. }