indexgenerator.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. // This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
  2. #include "meshoptimizer.h"
  3. #include <assert.h>
  4. #include <string.h>
  5. // This work is based on:
  6. // John McDonald, Mark Kilgard. Crack-Free Point-Normal Triangles using Adjacent Edge Normals. 2010
  7. // John Hable. Variable Rate Shading with Visibility Buffer Rendering. 2024
  8. namespace meshopt
  9. {
  10. static unsigned int hashUpdate4(unsigned int h, const unsigned char* key, size_t len)
  11. {
  12. // MurmurHash2
  13. const unsigned int m = 0x5bd1e995;
  14. const int r = 24;
  15. while (len >= 4)
  16. {
  17. unsigned int k = *reinterpret_cast<const unsigned int*>(key);
  18. k *= m;
  19. k ^= k >> r;
  20. k *= m;
  21. h *= m;
  22. h ^= k;
  23. key += 4;
  24. len -= 4;
  25. }
  26. return h;
  27. }
  28. struct VertexHasher
  29. {
  30. const unsigned char* vertices;
  31. size_t vertex_size;
  32. size_t vertex_stride;
  33. size_t hash(unsigned int index) const
  34. {
  35. return hashUpdate4(0, vertices + index * vertex_stride, vertex_size);
  36. }
  37. bool equal(unsigned int lhs, unsigned int rhs) const
  38. {
  39. return memcmp(vertices + lhs * vertex_stride, vertices + rhs * vertex_stride, vertex_size) == 0;
  40. }
  41. };
  42. struct VertexStreamHasher
  43. {
  44. const meshopt_Stream* streams;
  45. size_t stream_count;
  46. size_t hash(unsigned int index) const
  47. {
  48. unsigned int h = 0;
  49. for (size_t i = 0; i < stream_count; ++i)
  50. {
  51. const meshopt_Stream& s = streams[i];
  52. const unsigned char* data = static_cast<const unsigned char*>(s.data);
  53. h = hashUpdate4(h, data + index * s.stride, s.size);
  54. }
  55. return h;
  56. }
  57. bool equal(unsigned int lhs, unsigned int rhs) const
  58. {
  59. for (size_t i = 0; i < stream_count; ++i)
  60. {
  61. const meshopt_Stream& s = streams[i];
  62. const unsigned char* data = static_cast<const unsigned char*>(s.data);
  63. if (memcmp(data + lhs * s.stride, data + rhs * s.stride, s.size) != 0)
  64. return false;
  65. }
  66. return true;
  67. }
  68. };
  69. struct EdgeHasher
  70. {
  71. const unsigned int* remap;
  72. size_t hash(unsigned long long edge) const
  73. {
  74. unsigned int e0 = unsigned(edge >> 32);
  75. unsigned int e1 = unsigned(edge);
  76. unsigned int h1 = remap[e0];
  77. unsigned int h2 = remap[e1];
  78. const unsigned int m = 0x5bd1e995;
  79. // MurmurHash64B finalizer
  80. h1 ^= h2 >> 18;
  81. h1 *= m;
  82. h2 ^= h1 >> 22;
  83. h2 *= m;
  84. h1 ^= h2 >> 17;
  85. h1 *= m;
  86. h2 ^= h1 >> 19;
  87. h2 *= m;
  88. return h2;
  89. }
  90. bool equal(unsigned long long lhs, unsigned long long rhs) const
  91. {
  92. unsigned int l0 = unsigned(lhs >> 32);
  93. unsigned int l1 = unsigned(lhs);
  94. unsigned int r0 = unsigned(rhs >> 32);
  95. unsigned int r1 = unsigned(rhs);
  96. return remap[l0] == remap[r0] && remap[l1] == remap[r1];
  97. }
  98. };
  99. static size_t hashBuckets(size_t count)
  100. {
  101. size_t buckets = 1;
  102. while (buckets < count + count / 4)
  103. buckets *= 2;
  104. return buckets;
  105. }
  106. template <typename T, typename Hash>
  107. static T* hashLookup(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
  108. {
  109. assert(buckets > 0);
  110. assert((buckets & (buckets - 1)) == 0);
  111. size_t hashmod = buckets - 1;
  112. size_t bucket = hash.hash(key) & hashmod;
  113. for (size_t probe = 0; probe <= hashmod; ++probe)
  114. {
  115. T& item = table[bucket];
  116. if (item == empty)
  117. return &item;
  118. if (hash.equal(item, key))
  119. return &item;
  120. // hash collision, quadratic probing
  121. bucket = (bucket + probe + 1) & hashmod;
  122. }
  123. assert(false && "Hash table is full"); // unreachable
  124. return NULL;
  125. }
  126. static void buildPositionRemap(unsigned int* remap, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, meshopt_Allocator& allocator)
  127. {
  128. VertexHasher vertex_hasher = {reinterpret_cast<const unsigned char*>(vertex_positions), 3 * sizeof(float), vertex_positions_stride};
  129. size_t vertex_table_size = hashBuckets(vertex_count);
  130. unsigned int* vertex_table = allocator.allocate<unsigned int>(vertex_table_size);
  131. memset(vertex_table, -1, vertex_table_size * sizeof(unsigned int));
  132. for (size_t i = 0; i < vertex_count; ++i)
  133. {
  134. unsigned int index = unsigned(i);
  135. unsigned int* entry = hashLookup(vertex_table, vertex_table_size, vertex_hasher, index, ~0u);
  136. if (*entry == ~0u)
  137. *entry = index;
  138. remap[index] = *entry;
  139. }
  140. allocator.deallocate(vertex_table);
  141. }
  142. template <size_t BlockSize>
  143. static void remapVertices(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
  144. {
  145. size_t block_size = BlockSize == 0 ? vertex_size : BlockSize;
  146. assert(block_size == vertex_size);
  147. for (size_t i = 0; i < vertex_count; ++i)
  148. if (remap[i] != ~0u)
  149. {
  150. assert(remap[i] < vertex_count);
  151. memcpy(static_cast<unsigned char*>(destination) + remap[i] * block_size, static_cast<const unsigned char*>(vertices) + i * block_size, block_size);
  152. }
  153. }
  154. } // namespace meshopt
  155. size_t meshopt_generateVertexRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size)
  156. {
  157. using namespace meshopt;
  158. assert(indices || index_count == vertex_count);
  159. assert(!indices || index_count % 3 == 0);
  160. assert(vertex_size > 0 && vertex_size <= 256);
  161. meshopt_Allocator allocator;
  162. memset(destination, -1, vertex_count * sizeof(unsigned int));
  163. VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_size};
  164. size_t table_size = hashBuckets(vertex_count);
  165. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  166. memset(table, -1, table_size * sizeof(unsigned int));
  167. unsigned int next_vertex = 0;
  168. for (size_t i = 0; i < index_count; ++i)
  169. {
  170. unsigned int index = indices ? indices[i] : unsigned(i);
  171. assert(index < vertex_count);
  172. if (destination[index] == ~0u)
  173. {
  174. unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
  175. if (*entry == ~0u)
  176. {
  177. *entry = index;
  178. destination[index] = next_vertex++;
  179. }
  180. else
  181. {
  182. assert(destination[*entry] != ~0u);
  183. destination[index] = destination[*entry];
  184. }
  185. }
  186. }
  187. assert(next_vertex <= vertex_count);
  188. return next_vertex;
  189. }
  190. size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
  191. {
  192. using namespace meshopt;
  193. assert(indices || index_count == vertex_count);
  194. assert(index_count % 3 == 0);
  195. assert(stream_count > 0 && stream_count <= 16);
  196. for (size_t i = 0; i < stream_count; ++i)
  197. {
  198. assert(streams[i].size > 0 && streams[i].size <= 256);
  199. assert(streams[i].size <= streams[i].stride);
  200. }
  201. meshopt_Allocator allocator;
  202. memset(destination, -1, vertex_count * sizeof(unsigned int));
  203. VertexStreamHasher hasher = {streams, stream_count};
  204. size_t table_size = hashBuckets(vertex_count);
  205. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  206. memset(table, -1, table_size * sizeof(unsigned int));
  207. unsigned int next_vertex = 0;
  208. for (size_t i = 0; i < index_count; ++i)
  209. {
  210. unsigned int index = indices ? indices[i] : unsigned(i);
  211. assert(index < vertex_count);
  212. if (destination[index] == ~0u)
  213. {
  214. unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
  215. if (*entry == ~0u)
  216. {
  217. *entry = index;
  218. destination[index] = next_vertex++;
  219. }
  220. else
  221. {
  222. assert(destination[*entry] != ~0u);
  223. destination[index] = destination[*entry];
  224. }
  225. }
  226. }
  227. assert(next_vertex <= vertex_count);
  228. return next_vertex;
  229. }
  230. void meshopt_remapVertexBuffer(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
  231. {
  232. using namespace meshopt;
  233. assert(vertex_size > 0 && vertex_size <= 256);
  234. meshopt_Allocator allocator;
  235. // support in-place remap
  236. if (destination == vertices)
  237. {
  238. unsigned char* vertices_copy = allocator.allocate<unsigned char>(vertex_count * vertex_size);
  239. memcpy(vertices_copy, vertices, vertex_count * vertex_size);
  240. vertices = vertices_copy;
  241. }
  242. // specialize the loop for common vertex sizes to ensure memcpy is compiled as an inlined intrinsic
  243. switch (vertex_size)
  244. {
  245. case 4:
  246. return remapVertices<4>(destination, vertices, vertex_count, vertex_size, remap);
  247. case 8:
  248. return remapVertices<8>(destination, vertices, vertex_count, vertex_size, remap);
  249. case 12:
  250. return remapVertices<12>(destination, vertices, vertex_count, vertex_size, remap);
  251. case 16:
  252. return remapVertices<16>(destination, vertices, vertex_count, vertex_size, remap);
  253. default:
  254. return remapVertices<0>(destination, vertices, vertex_count, vertex_size, remap);
  255. }
  256. }
  257. void meshopt_remapIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const unsigned int* remap)
  258. {
  259. assert(index_count % 3 == 0);
  260. for (size_t i = 0; i < index_count; ++i)
  261. {
  262. unsigned int index = indices ? indices[i] : unsigned(i);
  263. assert(remap[index] != ~0u);
  264. destination[i] = remap[index];
  265. }
  266. }
  267. void meshopt_generateShadowIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride)
  268. {
  269. using namespace meshopt;
  270. assert(indices);
  271. assert(index_count % 3 == 0);
  272. assert(vertex_size > 0 && vertex_size <= 256);
  273. assert(vertex_size <= vertex_stride);
  274. meshopt_Allocator allocator;
  275. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  276. memset(remap, -1, vertex_count * sizeof(unsigned int));
  277. VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_stride};
  278. size_t table_size = hashBuckets(vertex_count);
  279. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  280. memset(table, -1, table_size * sizeof(unsigned int));
  281. for (size_t i = 0; i < index_count; ++i)
  282. {
  283. unsigned int index = indices[i];
  284. assert(index < vertex_count);
  285. if (remap[index] == ~0u)
  286. {
  287. unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
  288. if (*entry == ~0u)
  289. *entry = index;
  290. remap[index] = *entry;
  291. }
  292. destination[i] = remap[index];
  293. }
  294. }
  295. void meshopt_generateShadowIndexBufferMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
  296. {
  297. using namespace meshopt;
  298. assert(indices);
  299. assert(index_count % 3 == 0);
  300. assert(stream_count > 0 && stream_count <= 16);
  301. for (size_t i = 0; i < stream_count; ++i)
  302. {
  303. assert(streams[i].size > 0 && streams[i].size <= 256);
  304. assert(streams[i].size <= streams[i].stride);
  305. }
  306. meshopt_Allocator allocator;
  307. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  308. memset(remap, -1, vertex_count * sizeof(unsigned int));
  309. VertexStreamHasher hasher = {streams, stream_count};
  310. size_t table_size = hashBuckets(vertex_count);
  311. unsigned int* table = allocator.allocate<unsigned int>(table_size);
  312. memset(table, -1, table_size * sizeof(unsigned int));
  313. for (size_t i = 0; i < index_count; ++i)
  314. {
  315. unsigned int index = indices[i];
  316. assert(index < vertex_count);
  317. if (remap[index] == ~0u)
  318. {
  319. unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
  320. if (*entry == ~0u)
  321. *entry = index;
  322. remap[index] = *entry;
  323. }
  324. destination[i] = remap[index];
  325. }
  326. }
  327. void meshopt_generateAdjacencyIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  328. {
  329. using namespace meshopt;
  330. assert(index_count % 3 == 0);
  331. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  332. assert(vertex_positions_stride % sizeof(float) == 0);
  333. meshopt_Allocator allocator;
  334. static const int next[4] = {1, 2, 0, 1};
  335. // build position remap: for each vertex, which other (canonical) vertex does it map to?
  336. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  337. buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
  338. // build edge set; this stores all triangle edges but we can look these up by any other wedge
  339. EdgeHasher edge_hasher = {remap};
  340. size_t edge_table_size = hashBuckets(index_count);
  341. unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
  342. unsigned int* edge_vertex_table = allocator.allocate<unsigned int>(edge_table_size);
  343. memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
  344. memset(edge_vertex_table, -1, edge_table_size * sizeof(unsigned int));
  345. for (size_t i = 0; i < index_count; i += 3)
  346. {
  347. for (int e = 0; e < 3; ++e)
  348. {
  349. unsigned int i0 = indices[i + e];
  350. unsigned int i1 = indices[i + next[e]];
  351. unsigned int i2 = indices[i + next[e + 1]];
  352. assert(i0 < vertex_count && i1 < vertex_count && i2 < vertex_count);
  353. unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
  354. unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  355. if (*entry == ~0ull)
  356. {
  357. *entry = edge;
  358. // store vertex opposite to the edge
  359. edge_vertex_table[entry - edge_table] = i2;
  360. }
  361. }
  362. }
  363. // build resulting index buffer: 6 indices for each input triangle
  364. for (size_t i = 0; i < index_count; i += 3)
  365. {
  366. unsigned int patch[6];
  367. for (int e = 0; e < 3; ++e)
  368. {
  369. unsigned int i0 = indices[i + e];
  370. unsigned int i1 = indices[i + next[e]];
  371. assert(i0 < vertex_count && i1 < vertex_count);
  372. // note: this refers to the opposite edge!
  373. unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
  374. unsigned long long* oppe = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  375. patch[e * 2 + 0] = i0;
  376. patch[e * 2 + 1] = (*oppe == ~0ull) ? i0 : edge_vertex_table[oppe - edge_table];
  377. }
  378. memcpy(destination + i * 2, patch, sizeof(patch));
  379. }
  380. }
  381. void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
  382. {
  383. using namespace meshopt;
  384. assert(index_count % 3 == 0);
  385. assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
  386. assert(vertex_positions_stride % sizeof(float) == 0);
  387. meshopt_Allocator allocator;
  388. static const int next[3] = {1, 2, 0};
  389. // build position remap: for each vertex, which other (canonical) vertex does it map to?
  390. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  391. buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
  392. // build edge set; this stores all triangle edges but we can look these up by any other wedge
  393. EdgeHasher edge_hasher = {remap};
  394. size_t edge_table_size = hashBuckets(index_count);
  395. unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
  396. memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
  397. for (size_t i = 0; i < index_count; i += 3)
  398. {
  399. for (int e = 0; e < 3; ++e)
  400. {
  401. unsigned int i0 = indices[i + e];
  402. unsigned int i1 = indices[i + next[e]];
  403. assert(i0 < vertex_count && i1 < vertex_count);
  404. unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
  405. unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  406. if (*entry == ~0ull)
  407. *entry = edge;
  408. }
  409. }
  410. // build resulting index buffer: 12 indices for each input triangle
  411. for (size_t i = 0; i < index_count; i += 3)
  412. {
  413. unsigned int patch[12];
  414. for (int e = 0; e < 3; ++e)
  415. {
  416. unsigned int i0 = indices[i + e];
  417. unsigned int i1 = indices[i + next[e]];
  418. assert(i0 < vertex_count && i1 < vertex_count);
  419. // note: this refers to the opposite edge!
  420. unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
  421. unsigned long long oppe = *hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
  422. // use the same edge if opposite edge doesn't exist (border)
  423. oppe = (oppe == ~0ull) ? edge : oppe;
  424. // triangle index (0, 1, 2)
  425. patch[e] = i0;
  426. // opposite edge (3, 4; 5, 6; 7, 8)
  427. patch[3 + e * 2 + 0] = unsigned(oppe);
  428. patch[3 + e * 2 + 1] = unsigned(oppe >> 32);
  429. // dominant vertex (9, 10, 11)
  430. patch[9 + e] = remap[i0];
  431. }
  432. memcpy(destination + i * 4, patch, sizeof(patch));
  433. }
  434. }
  435. size_t meshopt_generateProvokingIndexBuffer(unsigned int* destination, unsigned int* reorder, const unsigned int* indices, size_t index_count, size_t vertex_count)
  436. {
  437. assert(index_count % 3 == 0);
  438. meshopt_Allocator allocator;
  439. unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
  440. memset(remap, -1, vertex_count * sizeof(unsigned int));
  441. // compute vertex valence; this is used to prioritize least used corner
  442. // note: we use 8-bit counters for performance; for outlier vertices the valence is incorrect but that just affects the heuristic
  443. unsigned char* valence = allocator.allocate<unsigned char>(vertex_count);
  444. memset(valence, 0, vertex_count);
  445. for (size_t i = 0; i < index_count; ++i)
  446. {
  447. unsigned int index = indices[i];
  448. assert(index < vertex_count);
  449. valence[index]++;
  450. }
  451. unsigned int reorder_offset = 0;
  452. // assign provoking vertices; leave the rest for the next pass
  453. for (size_t i = 0; i < index_count; i += 3)
  454. {
  455. unsigned int a = indices[i + 0], b = indices[i + 1], c = indices[i + 2];
  456. assert(a < vertex_count && b < vertex_count && c < vertex_count);
  457. // try to rotate triangle such that provoking vertex hasn't been seen before
  458. // if multiple vertices are new, prioritize the one with least valence
  459. // this reduces the risk that a future triangle will have all three vertices seen
  460. unsigned int va = remap[a] == ~0u ? valence[a] : ~0u;
  461. unsigned int vb = remap[b] == ~0u ? valence[b] : ~0u;
  462. unsigned int vc = remap[c] == ~0u ? valence[c] : ~0u;
  463. if (vb != ~0u && vb <= va && vb <= vc)
  464. {
  465. // abc -> bca
  466. unsigned int t = a;
  467. a = b, b = c, c = t;
  468. }
  469. else if (vc != ~0u && vc <= va && vc <= vb)
  470. {
  471. // abc -> cab
  472. unsigned int t = c;
  473. c = b, b = a, a = t;
  474. }
  475. unsigned int newidx = reorder_offset;
  476. // now remap[a] = ~0u or all three vertices are old
  477. // recording remap[a] makes it possible to remap future references to the same index, conserving space
  478. if (remap[a] == ~0u)
  479. remap[a] = newidx;
  480. // we need to clone the provoking vertex to get a unique index
  481. // if all three are used the choice is arbitrary since no future triangle will be able to reuse any of these
  482. reorder[reorder_offset++] = a;
  483. // note: first vertex is final, the other two will be fixed up in next pass
  484. destination[i + 0] = newidx;
  485. destination[i + 1] = b;
  486. destination[i + 2] = c;
  487. // update vertex valences for corner heuristic
  488. valence[a]--;
  489. valence[b]--;
  490. valence[c]--;
  491. }
  492. // remap or clone non-provoking vertices (iterating to skip provoking vertices)
  493. int step = 1;
  494. for (size_t i = 1; i < index_count; i += step, step ^= 3)
  495. {
  496. unsigned int index = destination[i];
  497. if (remap[index] == ~0u)
  498. {
  499. // we haven't seen the vertex before as a provoking vertex
  500. // to maintain the reference to the original vertex we need to clone it
  501. unsigned int newidx = reorder_offset;
  502. remap[index] = newidx;
  503. reorder[reorder_offset++] = index;
  504. }
  505. destination[i] = remap[index];
  506. }
  507. assert(reorder_offset <= vertex_count + index_count / 3);
  508. return reorder_offset;
  509. }