WIACompression.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. // Copyright 2020 Dolphin Emulator Project
  2. // SPDX-License-Identifier: GPL-2.0-or-later
  3. #include "DiscIO/WIACompression.h"
  4. #include <algorithm>
  5. #include <cstddef>
  6. #include <cstring>
  7. #include <limits>
  8. #include <memory>
  9. #include <optional>
  10. #include <vector>
  11. #include <bzlib.h>
  12. #include <lzma.h>
  13. #include <zstd.h>
  14. #include "Common/Assert.h"
  15. #include "Common/CommonTypes.h"
  16. #include "Common/MathUtil.h"
  17. #include "Common/Swap.h"
  18. #include "DiscIO/LaggedFibonacciGenerator.h"
  19. namespace DiscIO
  20. {
  21. static u32 LZMA2DictionarySize(u8 p)
  22. {
  23. return (static_cast<u32>(2) | (p & 1)) << (p / 2 + 11);
  24. }
  25. Decompressor::~Decompressor() = default;
  26. bool NoneDecompressor::Decompress(const DecompressionBuffer& in, DecompressionBuffer* out,
  27. size_t* in_bytes_read)
  28. {
  29. const size_t length =
  30. std::min(in.bytes_written - *in_bytes_read, out->data.size() - out->bytes_written);
  31. std::memcpy(out->data.data() + out->bytes_written, in.data.data() + *in_bytes_read, length);
  32. *in_bytes_read += length;
  33. out->bytes_written += length;
  34. m_done = in.data.size() == *in_bytes_read;
  35. return true;
  36. }
  37. PurgeDecompressor::PurgeDecompressor(u64 decompressed_size) : m_decompressed_size(decompressed_size)
  38. {
  39. }
  40. bool PurgeDecompressor::Decompress(const DecompressionBuffer& in, DecompressionBuffer* out,
  41. size_t* in_bytes_read)
  42. {
  43. if (!m_started)
  44. {
  45. m_sha1_context = Common::SHA1::CreateContext();
  46. // Include the exception lists in the SHA-1 calculation (but not in the compression...)
  47. m_sha1_context->Update(in.data.data(), *in_bytes_read);
  48. m_started = true;
  49. }
  50. while (!m_done && in.bytes_written != *in_bytes_read &&
  51. (m_segment_bytes_written < sizeof(m_segment) || out->data.size() != out->bytes_written))
  52. {
  53. if (m_segment_bytes_written == 0 && *in_bytes_read == in.data.size() - Common::SHA1::DIGEST_LEN)
  54. {
  55. const size_t zeroes_to_write = std::min<size_t>(m_decompressed_size - m_out_bytes_written,
  56. out->data.size() - out->bytes_written);
  57. std::memset(out->data.data() + out->bytes_written, 0, zeroes_to_write);
  58. out->bytes_written += zeroes_to_write;
  59. m_out_bytes_written += zeroes_to_write;
  60. if (m_out_bytes_written == m_decompressed_size && in.bytes_written == in.data.size())
  61. {
  62. const auto actual_hash = m_sha1_context->Finish();
  63. Common::SHA1::Digest expected_hash;
  64. std::memcpy(expected_hash.data(), in.data.data() + *in_bytes_read, expected_hash.size());
  65. *in_bytes_read += expected_hash.size();
  66. m_done = true;
  67. if (actual_hash != expected_hash)
  68. return false;
  69. }
  70. return true;
  71. }
  72. if (m_segment_bytes_written < sizeof(m_segment))
  73. {
  74. const size_t bytes_to_copy =
  75. std::min(in.bytes_written - *in_bytes_read, sizeof(m_segment) - m_segment_bytes_written);
  76. std::memcpy(reinterpret_cast<u8*>(&m_segment) + m_segment_bytes_written,
  77. in.data.data() + *in_bytes_read, bytes_to_copy);
  78. m_sha1_context->Update(in.data.data() + *in_bytes_read, bytes_to_copy);
  79. *in_bytes_read += bytes_to_copy;
  80. m_bytes_read += bytes_to_copy;
  81. m_segment_bytes_written += bytes_to_copy;
  82. }
  83. if (m_segment_bytes_written < sizeof(m_segment))
  84. return true;
  85. const size_t offset = Common::swap32(m_segment.offset);
  86. const size_t size = Common::swap32(m_segment.size);
  87. if (m_out_bytes_written < offset)
  88. {
  89. const size_t zeroes_to_write =
  90. std::min(offset - m_out_bytes_written, out->data.size() - out->bytes_written);
  91. std::memset(out->data.data() + out->bytes_written, 0, zeroes_to_write);
  92. out->bytes_written += zeroes_to_write;
  93. m_out_bytes_written += zeroes_to_write;
  94. }
  95. if (m_out_bytes_written >= offset && m_out_bytes_written < offset + size)
  96. {
  97. const size_t bytes_to_copy = std::min(
  98. std::min(offset + size - m_out_bytes_written, out->data.size() - out->bytes_written),
  99. in.bytes_written - *in_bytes_read);
  100. std::memcpy(out->data.data() + out->bytes_written, in.data.data() + *in_bytes_read,
  101. bytes_to_copy);
  102. m_sha1_context->Update(in.data.data() + *in_bytes_read, bytes_to_copy);
  103. *in_bytes_read += bytes_to_copy;
  104. m_bytes_read += bytes_to_copy;
  105. out->bytes_written += bytes_to_copy;
  106. m_out_bytes_written += bytes_to_copy;
  107. }
  108. if (m_out_bytes_written >= offset + size)
  109. m_segment_bytes_written = 0;
  110. }
  111. return true;
  112. }
  113. Bzip2Decompressor::~Bzip2Decompressor()
  114. {
  115. if (m_started)
  116. BZ2_bzDecompressEnd(&m_stream);
  117. }
  118. bool Bzip2Decompressor::Decompress(const DecompressionBuffer& in, DecompressionBuffer* out,
  119. size_t* in_bytes_read)
  120. {
  121. if (!m_started)
  122. {
  123. if (BZ2_bzDecompressInit(&m_stream, 0, 0) != BZ_OK)
  124. return false;
  125. m_started = true;
  126. }
  127. char* const in_ptr = reinterpret_cast<char*>(const_cast<u8*>(in.data.data() + *in_bytes_read));
  128. m_stream.next_in = in_ptr;
  129. m_stream.avail_in = MathUtil::SaturatingCast<u32>(in.bytes_written - *in_bytes_read);
  130. char* const out_ptr = reinterpret_cast<char*>(out->data.data() + out->bytes_written);
  131. m_stream.next_out = out_ptr;
  132. m_stream.avail_out = MathUtil::SaturatingCast<u32>(out->data.size() - out->bytes_written);
  133. const int result = BZ2_bzDecompress(&m_stream);
  134. *in_bytes_read += m_stream.next_in - in_ptr;
  135. out->bytes_written += m_stream.next_out - out_ptr;
  136. m_done = result == BZ_STREAM_END;
  137. return result == BZ_OK || result == BZ_STREAM_END;
  138. }
  139. LZMADecompressor::LZMADecompressor(bool lzma2, const u8* filter_options, size_t filter_options_size)
  140. {
  141. m_options.preset_dict = nullptr;
  142. if (!lzma2 && filter_options_size == 5)
  143. {
  144. // The dictionary size is stored as a 32-bit little endian unsigned integer
  145. static_assert(sizeof(m_options.dict_size) == sizeof(u32));
  146. std::memcpy(&m_options.dict_size, filter_options + 1, sizeof(u32));
  147. const u8 d = filter_options[0];
  148. if (d >= (9 * 5 * 5))
  149. {
  150. m_error_occurred = true;
  151. }
  152. else
  153. {
  154. m_options.lc = d % 9;
  155. const u8 e = d / 9;
  156. m_options.pb = e / 5;
  157. m_options.lp = e % 5;
  158. }
  159. }
  160. else if (lzma2 && filter_options_size == 1)
  161. {
  162. const u8 d = filter_options[0];
  163. if (d > 40)
  164. m_error_occurred = true;
  165. else
  166. m_options.dict_size = d == 40 ? 0xFFFFFFFF : LZMA2DictionarySize(d);
  167. }
  168. else
  169. {
  170. m_error_occurred = true;
  171. }
  172. m_filters[0].id = lzma2 ? LZMA_FILTER_LZMA2 : LZMA_FILTER_LZMA1;
  173. m_filters[0].options = &m_options;
  174. m_filters[1].id = LZMA_VLI_UNKNOWN;
  175. m_filters[1].options = nullptr;
  176. }
  177. LZMADecompressor::~LZMADecompressor()
  178. {
  179. if (m_started)
  180. lzma_end(&m_stream);
  181. }
  182. bool LZMADecompressor::Decompress(const DecompressionBuffer& in, DecompressionBuffer* out,
  183. size_t* in_bytes_read)
  184. {
  185. if (!m_started)
  186. {
  187. if (m_error_occurred || lzma_raw_decoder(&m_stream, m_filters) != LZMA_OK)
  188. return false;
  189. m_started = true;
  190. }
  191. const u8* const in_ptr = in.data.data() + *in_bytes_read;
  192. m_stream.next_in = in_ptr;
  193. m_stream.avail_in = in.bytes_written - *in_bytes_read;
  194. u8* const out_ptr = out->data.data() + out->bytes_written;
  195. m_stream.next_out = out_ptr;
  196. m_stream.avail_out = out->data.size() - out->bytes_written;
  197. const lzma_ret result = lzma_code(&m_stream, LZMA_RUN);
  198. *in_bytes_read += m_stream.next_in - in_ptr;
  199. out->bytes_written += m_stream.next_out - out_ptr;
  200. m_done = result == LZMA_STREAM_END;
  201. return result == LZMA_OK || result == LZMA_STREAM_END;
  202. }
  203. ZstdDecompressor::ZstdDecompressor()
  204. {
  205. m_stream = ZSTD_createDStream();
  206. }
  207. ZstdDecompressor::~ZstdDecompressor()
  208. {
  209. ZSTD_freeDStream(m_stream);
  210. }
  211. bool ZstdDecompressor::Decompress(const DecompressionBuffer& in, DecompressionBuffer* out,
  212. size_t* in_bytes_read)
  213. {
  214. if (!m_stream)
  215. return false;
  216. ZSTD_inBuffer in_buffer{in.data.data(), in.bytes_written, *in_bytes_read};
  217. ZSTD_outBuffer out_buffer{out->data.data(), out->data.size(), out->bytes_written};
  218. const size_t result = ZSTD_decompressStream(m_stream, &out_buffer, &in_buffer);
  219. *in_bytes_read = in_buffer.pos;
  220. out->bytes_written = out_buffer.pos;
  221. m_done = result == 0;
  222. return !ZSTD_isError(result);
  223. }
  224. RVZPackDecompressor::RVZPackDecompressor(std::unique_ptr<Decompressor> decompressor,
  225. DecompressionBuffer decompressed, u64 data_offset,
  226. u32 rvz_packed_size)
  227. : m_decompressor(std::move(decompressor)), m_decompressed(std::move(decompressed)),
  228. m_data_offset(data_offset), m_rvz_packed_size(rvz_packed_size)
  229. {
  230. m_bytes_read = m_decompressed.bytes_written;
  231. }
  232. bool RVZPackDecompressor::IncrementBytesRead(size_t x)
  233. {
  234. m_bytes_read += x;
  235. return m_bytes_read <= m_rvz_packed_size;
  236. }
  237. std::optional<bool> RVZPackDecompressor::ReadToDecompressed(const DecompressionBuffer& in,
  238. size_t* in_bytes_read,
  239. size_t decompressed_bytes_read,
  240. size_t bytes_to_read)
  241. {
  242. if (m_decompressed.data.size() < decompressed_bytes_read + bytes_to_read)
  243. m_decompressed.data.resize(decompressed_bytes_read + bytes_to_read);
  244. if (m_decompressed.bytes_written < decompressed_bytes_read + bytes_to_read)
  245. {
  246. const size_t prev_bytes_written = m_decompressed.bytes_written;
  247. if (!m_decompressor->Decompress(in, &m_decompressed, in_bytes_read))
  248. return false;
  249. if (!IncrementBytesRead(m_decompressed.bytes_written - prev_bytes_written))
  250. return false;
  251. if (m_decompressed.bytes_written < decompressed_bytes_read + bytes_to_read)
  252. return true;
  253. }
  254. return std::nullopt;
  255. }
  256. bool RVZPackDecompressor::Decompress(const DecompressionBuffer& in, DecompressionBuffer* out,
  257. size_t* in_bytes_read)
  258. {
  259. while (out->data.size() != out->bytes_written && !Done())
  260. {
  261. if (m_size == 0)
  262. {
  263. if (m_decompressed.bytes_written == m_decompressed_bytes_read)
  264. {
  265. m_decompressed.data.resize(sizeof(u32));
  266. m_decompressed.bytes_written = 0;
  267. m_decompressed_bytes_read = 0;
  268. }
  269. std::optional<bool> result =
  270. ReadToDecompressed(in, in_bytes_read, m_decompressed_bytes_read, sizeof(u32));
  271. if (result)
  272. return *result;
  273. const u32 size = Common::swap32(m_decompressed.data.data() + m_decompressed_bytes_read);
  274. m_junk = size & 0x80000000;
  275. if (m_junk)
  276. {
  277. constexpr size_t SEED_SIZE = LaggedFibonacciGenerator::SEED_SIZE * sizeof(u32);
  278. constexpr size_t BLOCK_SIZE = 0x8000;
  279. result = ReadToDecompressed(in, in_bytes_read, m_decompressed_bytes_read + sizeof(u32),
  280. SEED_SIZE);
  281. if (result)
  282. return *result;
  283. m_lfg.SetSeed(m_decompressed.data.data() + m_decompressed_bytes_read + sizeof(u32));
  284. m_lfg.Forward(m_data_offset % BLOCK_SIZE);
  285. m_decompressed_bytes_read += SEED_SIZE;
  286. }
  287. m_decompressed_bytes_read += sizeof(u32);
  288. m_size = size & 0x7FFFFFFF;
  289. }
  290. size_t bytes_to_write = std::min<size_t>(m_size, out->data.size() - out->bytes_written);
  291. if (m_junk)
  292. {
  293. m_lfg.GetBytes(bytes_to_write, out->data.data() + out->bytes_written);
  294. out->bytes_written += bytes_to_write;
  295. }
  296. else
  297. {
  298. if (m_decompressed.bytes_written != m_decompressed_bytes_read)
  299. {
  300. bytes_to_write =
  301. std::min(bytes_to_write, m_decompressed.bytes_written - m_decompressed_bytes_read);
  302. std::memcpy(out->data.data() + out->bytes_written,
  303. m_decompressed.data.data() + m_decompressed_bytes_read, bytes_to_write);
  304. m_decompressed_bytes_read += bytes_to_write;
  305. out->bytes_written += bytes_to_write;
  306. }
  307. else
  308. {
  309. const size_t prev_out_bytes_written = out->bytes_written;
  310. const size_t old_out_size = out->data.size();
  311. const size_t new_out_size = out->bytes_written + bytes_to_write;
  312. if (new_out_size < old_out_size)
  313. out->data.resize(new_out_size);
  314. if (!m_decompressor->Decompress(in, out, in_bytes_read))
  315. return false;
  316. out->data.resize(old_out_size);
  317. bytes_to_write = out->bytes_written - prev_out_bytes_written;
  318. if (!IncrementBytesRead(bytes_to_write))
  319. return false;
  320. if (bytes_to_write == 0)
  321. return true;
  322. }
  323. }
  324. m_data_offset += bytes_to_write;
  325. m_size -= static_cast<u32>(bytes_to_write);
  326. }
  327. // If out is full but not all data has been read from in, give the decompressor a chance to read
  328. // from in anyway. This is needed for the case where zstd has read everything except the checksum.
  329. if (out->data.size() == out->bytes_written && in.bytes_written != *in_bytes_read)
  330. {
  331. if (!m_decompressor->Decompress(in, out, in_bytes_read))
  332. return false;
  333. }
  334. return true;
  335. }
  336. bool RVZPackDecompressor::Done() const
  337. {
  338. return m_size == 0 && m_rvz_packed_size == m_bytes_read &&
  339. m_decompressed.bytes_written == m_decompressed_bytes_read && m_decompressor->Done();
  340. }
  341. Compressor::~Compressor() = default;
  342. PurgeCompressor::PurgeCompressor() = default;
  343. PurgeCompressor::~PurgeCompressor() = default;
  344. bool PurgeCompressor::Start(std::optional<u64> size)
  345. {
  346. m_buffer.clear();
  347. m_bytes_written = 0;
  348. m_sha1_context = Common::SHA1::CreateContext();
  349. return true;
  350. }
  351. bool PurgeCompressor::AddPrecedingDataOnlyForPurgeHashing(const u8* data, size_t size)
  352. {
  353. m_sha1_context->Update(data, size);
  354. return true;
  355. }
  356. bool PurgeCompressor::Compress(const u8* data, size_t size)
  357. {
  358. // We could add support for calling this twice if we're fine with
  359. // making the code more complicated, but there's no need to support it
  360. ASSERT_MSG(DISCIO, m_bytes_written == 0,
  361. "Calling PurgeCompressor::Compress() twice is not supported");
  362. m_buffer.resize(size + sizeof(PurgeSegment) + Common::SHA1::DIGEST_LEN);
  363. size_t bytes_read = 0;
  364. while (true)
  365. {
  366. const auto first_non_zero =
  367. std::find_if(data + bytes_read, data + size, [](u8 x) { return x != 0; });
  368. const u32 non_zero_data_start = static_cast<u32>(first_non_zero - data);
  369. if (non_zero_data_start == size)
  370. break;
  371. size_t non_zero_data_end = non_zero_data_start;
  372. size_t sequence_length = 0;
  373. for (size_t i = non_zero_data_start; i < size; ++i)
  374. {
  375. if (data[i] == 0)
  376. {
  377. ++sequence_length;
  378. }
  379. else
  380. {
  381. sequence_length = 0;
  382. non_zero_data_end = i + 1;
  383. }
  384. // To avoid wasting space, only count runs of zeroes that are of a certain length
  385. // (unless there is nothing after the run of zeroes, then we might as well always count it)
  386. if (sequence_length > sizeof(PurgeSegment))
  387. break;
  388. }
  389. const u32 non_zero_data_length = static_cast<u32>(non_zero_data_end - non_zero_data_start);
  390. const PurgeSegment segment{Common::swap32(non_zero_data_start),
  391. Common::swap32(non_zero_data_length)};
  392. std::memcpy(m_buffer.data() + m_bytes_written, &segment, sizeof(segment));
  393. m_bytes_written += sizeof(segment);
  394. std::memcpy(m_buffer.data() + m_bytes_written, data + non_zero_data_start,
  395. non_zero_data_length);
  396. m_bytes_written += non_zero_data_length;
  397. bytes_read = non_zero_data_end;
  398. }
  399. return true;
  400. }
  401. bool PurgeCompressor::End()
  402. {
  403. m_sha1_context->Update(m_buffer.data(), m_bytes_written);
  404. const auto digest = m_sha1_context->Finish();
  405. std::memcpy(m_buffer.data() + m_bytes_written, digest.data(), sizeof(digest));
  406. m_bytes_written += sizeof(digest);
  407. ASSERT(m_bytes_written <= m_buffer.size());
  408. return true;
  409. }
  410. const u8* PurgeCompressor::GetData() const
  411. {
  412. return m_buffer.data();
  413. }
  414. size_t PurgeCompressor::GetSize() const
  415. {
  416. return m_bytes_written;
  417. }
  418. Bzip2Compressor::Bzip2Compressor(int compression_level) : m_compression_level(compression_level)
  419. {
  420. }
  421. Bzip2Compressor::~Bzip2Compressor()
  422. {
  423. BZ2_bzCompressEnd(&m_stream);
  424. }
  425. bool Bzip2Compressor::Start(std::optional<u64> size)
  426. {
  427. ASSERT_MSG(DISCIO, m_stream.state == nullptr,
  428. "Called Bzip2Compressor::Start() twice without calling Bzip2Compressor::End()");
  429. m_buffer.clear();
  430. m_stream.next_out = reinterpret_cast<char*>(m_buffer.data());
  431. return BZ2_bzCompressInit(&m_stream, m_compression_level, 0, 0) == BZ_OK;
  432. }
  433. bool Bzip2Compressor::Compress(const u8* data, size_t size)
  434. {
  435. m_stream.next_in = reinterpret_cast<char*>(const_cast<u8*>(data));
  436. m_stream.avail_in = static_cast<unsigned int>(size);
  437. ExpandBuffer(size);
  438. while (m_stream.avail_in != 0)
  439. {
  440. if (m_stream.avail_out == 0)
  441. ExpandBuffer(0x100);
  442. if (BZ2_bzCompress(&m_stream, BZ_RUN) != BZ_RUN_OK)
  443. return false;
  444. }
  445. return true;
  446. }
  447. bool Bzip2Compressor::End()
  448. {
  449. bool success = true;
  450. while (true)
  451. {
  452. if (m_stream.avail_out == 0)
  453. ExpandBuffer(0x100);
  454. const int result = BZ2_bzCompress(&m_stream, BZ_FINISH);
  455. if (result != BZ_FINISH_OK && result != BZ_STREAM_END)
  456. success = false;
  457. if (result != BZ_FINISH_OK)
  458. break;
  459. }
  460. if (BZ2_bzCompressEnd(&m_stream) != BZ_OK)
  461. success = false;
  462. return success;
  463. }
  464. void Bzip2Compressor::ExpandBuffer(size_t bytes_to_add)
  465. {
  466. const size_t bytes_written = GetSize();
  467. m_buffer.resize(m_buffer.size() + bytes_to_add);
  468. m_stream.next_out = reinterpret_cast<char*>(m_buffer.data()) + bytes_written;
  469. m_stream.avail_out = static_cast<unsigned int>(m_buffer.size() - bytes_written);
  470. }
  471. const u8* Bzip2Compressor::GetData() const
  472. {
  473. return m_buffer.data();
  474. }
  475. size_t Bzip2Compressor::GetSize() const
  476. {
  477. return static_cast<size_t>(reinterpret_cast<u8*>(m_stream.next_out) - m_buffer.data());
  478. }
  479. LZMACompressor::LZMACompressor(bool lzma2, int compression_level, u8 compressor_data_out[7],
  480. u8* compressor_data_size_out)
  481. {
  482. // lzma_lzma_preset returns false on success for some reason
  483. if (lzma_lzma_preset(&m_options, static_cast<uint32_t>(compression_level)))
  484. {
  485. m_initialization_failed = true;
  486. return;
  487. }
  488. if (!lzma2)
  489. {
  490. if (compressor_data_size_out)
  491. *compressor_data_size_out = 5;
  492. if (compressor_data_out)
  493. {
  494. ASSERT(m_options.lc < 9);
  495. ASSERT(m_options.lp < 5);
  496. ASSERT(m_options.pb < 5);
  497. compressor_data_out[0] =
  498. static_cast<u8>((m_options.pb * 5 + m_options.lp) * 9 + m_options.lc);
  499. // The dictionary size is stored as a 32-bit little endian unsigned integer
  500. static_assert(sizeof(m_options.dict_size) == sizeof(u32));
  501. std::memcpy(compressor_data_out + 1, &m_options.dict_size, sizeof(u32));
  502. }
  503. }
  504. else
  505. {
  506. if (compressor_data_size_out)
  507. *compressor_data_size_out = 1;
  508. if (compressor_data_out)
  509. {
  510. u8 encoded_dict_size = 0;
  511. while (encoded_dict_size < 40 && m_options.dict_size > LZMA2DictionarySize(encoded_dict_size))
  512. ++encoded_dict_size;
  513. compressor_data_out[0] = encoded_dict_size;
  514. }
  515. }
  516. m_filters[0].id = lzma2 ? LZMA_FILTER_LZMA2 : LZMA_FILTER_LZMA1;
  517. m_filters[0].options = &m_options;
  518. m_filters[1].id = LZMA_VLI_UNKNOWN;
  519. m_filters[1].options = nullptr;
  520. }
  521. LZMACompressor::~LZMACompressor()
  522. {
  523. lzma_end(&m_stream);
  524. }
  525. bool LZMACompressor::Start(std::optional<u64> size)
  526. {
  527. if (m_initialization_failed)
  528. return false;
  529. m_buffer.clear();
  530. m_stream.next_out = m_buffer.data();
  531. return lzma_raw_encoder(&m_stream, m_filters) == LZMA_OK;
  532. }
  533. bool LZMACompressor::Compress(const u8* data, size_t size)
  534. {
  535. m_stream.next_in = data;
  536. m_stream.avail_in = size;
  537. ExpandBuffer(size);
  538. while (m_stream.avail_in != 0)
  539. {
  540. if (m_stream.avail_out == 0)
  541. ExpandBuffer(0x100);
  542. if (lzma_code(&m_stream, LZMA_RUN) != LZMA_OK)
  543. return false;
  544. }
  545. return true;
  546. }
  547. bool LZMACompressor::End()
  548. {
  549. while (true)
  550. {
  551. if (m_stream.avail_out == 0)
  552. ExpandBuffer(0x100);
  553. switch (lzma_code(&m_stream, LZMA_FINISH))
  554. {
  555. case LZMA_OK:
  556. break;
  557. case LZMA_STREAM_END:
  558. return true;
  559. default:
  560. return false;
  561. }
  562. }
  563. }
  564. void LZMACompressor::ExpandBuffer(size_t bytes_to_add)
  565. {
  566. const size_t bytes_written = GetSize();
  567. m_buffer.resize(m_buffer.size() + bytes_to_add);
  568. m_stream.next_out = m_buffer.data() + bytes_written;
  569. m_stream.avail_out = m_buffer.size() - bytes_written;
  570. }
  571. const u8* LZMACompressor::GetData() const
  572. {
  573. return m_buffer.data();
  574. }
  575. size_t LZMACompressor::GetSize() const
  576. {
  577. return static_cast<size_t>(m_stream.next_out - m_buffer.data());
  578. }
  579. ZstdCompressor::ZstdCompressor(int compression_level)
  580. {
  581. m_stream = ZSTD_createCStream();
  582. if (ZSTD_isError(ZSTD_CCtx_setParameter(m_stream, ZSTD_c_compressionLevel, compression_level)) ||
  583. ZSTD_isError(ZSTD_CCtx_setParameter(m_stream, ZSTD_c_contentSizeFlag, 0)))
  584. {
  585. m_stream = nullptr;
  586. }
  587. }
  588. ZstdCompressor::~ZstdCompressor()
  589. {
  590. ZSTD_freeCStream(m_stream);
  591. }
  592. bool ZstdCompressor::Start(std::optional<u64> size)
  593. {
  594. if (!m_stream)
  595. return false;
  596. m_buffer.clear();
  597. m_out_buffer = {};
  598. if (ZSTD_isError(ZSTD_CCtx_reset(m_stream, ZSTD_reset_session_only)))
  599. return false;
  600. if (size)
  601. {
  602. if (ZSTD_isError(ZSTD_CCtx_setPledgedSrcSize(m_stream, *size)))
  603. return false;
  604. }
  605. return true;
  606. }
  607. bool ZstdCompressor::Compress(const u8* data, size_t size)
  608. {
  609. ZSTD_inBuffer in_buffer{data, size, 0};
  610. ExpandBuffer(size);
  611. while (in_buffer.size != in_buffer.pos)
  612. {
  613. if (m_out_buffer.size == m_out_buffer.pos)
  614. ExpandBuffer(0x100);
  615. if (ZSTD_isError(ZSTD_compressStream(m_stream, &m_out_buffer, &in_buffer)))
  616. return false;
  617. }
  618. return true;
  619. }
  620. bool ZstdCompressor::End()
  621. {
  622. while (true)
  623. {
  624. if (m_out_buffer.size == m_out_buffer.pos)
  625. ExpandBuffer(0x100);
  626. const size_t result = ZSTD_endStream(m_stream, &m_out_buffer);
  627. if (ZSTD_isError(result))
  628. return false;
  629. if (result == 0)
  630. return true;
  631. }
  632. }
  633. void ZstdCompressor::ExpandBuffer(size_t bytes_to_add)
  634. {
  635. m_buffer.resize(m_buffer.size() + bytes_to_add);
  636. m_out_buffer.dst = m_buffer.data();
  637. m_out_buffer.size = m_buffer.size();
  638. }
  639. } // namespace DiscIO