/*
* zlib compression. Currently we use stb's zlib_compress
*
* @param[in] data Input data
* @param[in] data_len Input data bytes(up to 2GB)
* @param[out] out_len Input data
* @param[in] quality Compression quality(5 or more. Usually 8)
*
* @return Compressed bytes upon success. NULL when failed to compress or any input parameter is wrong.
*/
unsigned char *nanoz_compress(unsigned char *data, int data_len, int *out_len,
int quality);
#if 0 // TODO
nanoz_status_t nanoz_stream_uncompress(nanoz_stream_read *reader, nanoz_stream_writer *writer);
#endif
if (status.repr == wuffs_base__suspension__short_read) {
// ok
} else if (status.repr == wuffs_base__suspension__short_write) {
// read&write should succeed at once.
return NANOZ_ERROR_CORRUPTED;
}
static void *nanoz__sbgrowf(void **arr, int increment, int itemsize) {
int m = *arr ? 2 * nanoz__sbm(*arr) + increment : increment + 1;
void *p = NANOZ_REALLOC_SIZED(
*arr ? nanoz__sbraw(*arr) : 0,
*arr ? (nanoz__sbm(*arr) * itemsize + sizeof(int) * 2) : 0,
itemsize * m + sizeof(int) * 2);
if (!p) {
return nullptr;
}
if (p) {
if (!*arr) ((int *)p)[1] = 0;
*arr = (void *)((int *)p + 2);
nanoz__sbm(*arr) = m;
}
return *arr;
}
static unsigned char *nanoz__zlib_flushf(unsigned char *data,
unsigned int *bitbuffer,
int *bitcount) {
while (*bitcount >= 8) {
nanoz__sbpush(data, NANOZ_UCHAR(*bitbuffer));
*bitbuffer >>= 8;
*bitcount -= 8;
}
return data;
}
static int nanoz__zlib_bitrev(int code, int codebits) {
int res = 0;
while (codebits--) {
res = (res << 1) | (code & 1);
code >>= 1;
}
return res;
}
static unsigned int nanoz__zlib_countm(unsigned char *a, unsigned char *b,
int limit) {
int i;
for (i = 0; i < limit && i < 258; ++i)
if (a[i] != b[i]) break;
return i;
}
for (i = 0; i < nanoz__ZHASH; ++i) hash_table[i] = NULL;
i = 0;
while (i < data_len - 3) {
// hash next 3 bytes of data to be compressed
int h = nanoz__zhash(data + i) & (nanoz__ZHASH - 1), best = 3;
unsigned char *bestloc = 0;
unsigned char **hlist = hash_table[h];
int n = nanoz__sbcount(hlist);
for (j = 0; j < n; ++j) {
if (hlist[j] - data > i - 32768) { // if entry lies within window
int d = nanoz__zlib_countm(hlist[j], data + i, data_len - i);
if (d >= best) {
best = d;
bestloc = hlist[j];
}
}
}
// when hash table entry is too long, delete half the entries
if (hash_table[h] && nanoz__sbn(hash_table[h]) == 2 * quality) {
NANOZ_MEMMOVE(hash_table[h], hash_table[h] + quality,
sizeof(hash_table[h][0]) * quality);
nanoz__sbn(hash_table[h]) = quality;
}
nanoz__sbpush(hash_table[h], data + i);
if (bestloc) {
// "lazy matching" - check match at *next* byte, and if it's better, do
// cur byte as literal
h = nanoz__zhash(data + i + 1) & (nanoz__ZHASH - 1);
hlist = hash_table[h];
n = nanoz__sbcount(hlist);
for (j = 0; j < n; ++j) {
if (hlist[j] - data > i - 32767) {
int e = nanoz__zlib_countm(hlist[j], data + i + 1, data_len - i - 1);
if (e > best) { // if next match is better, bail on current match
bestloc = NULL;
break;
}
}
}
}
if (bestloc) {
int d = (int)(data + i - bestloc); // distance back
// NANOZ_ASSERT(d <= 32767 && best <= 258);
if (d <= 32767 && best <= 258) {
// OK
} else {
return NULL; // FIXME: may leak
}
for (j = 0; best > lengthc[j + 1] - 1; ++j)
;
nanoz__zlib_huff(j + 257);
if (lengtheb[j]) nanoz__zlib_add(best - lengthc[j], lengtheb[j]);
for (j = 0; d > distc[j + 1] - 1; ++j)
;
nanoz__zlib_add(nanoz__zlib_bitrev(j, 5), 5);
if (disteb[j]) nanoz__zlib_add(d - distc[j], disteb[j]);
i += best;
} else {
nanoz__zlib_huffb(data[i]);
++i;
}
}
// write out final bytes
for (; i < data_len; ++i) nanoz__zlib_huffb(data[i]);
nanoz__zlib_huff(256); // end of block
// pad with 0 bits to byte boundary
while (bitcount) nanoz__zlib_add(0, 1);
for (i = 0; i < nanoz__ZHASH; ++i) (void)nanoz__sbfree(hash_table[i]);
NANOZ_FREE(hash_table);
// store uncompressed instead if compression was worse
if (nanoz__sbn(out) > data_len + 2 + ((data_len + 32766) / 32767) * 5) {
nanoz__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1
for (j = 0; j < data_len;) {
int blocklen = data_len - j;
if (blocklen > 32767) blocklen = 32767;
nanoz__sbpush(
out,
data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression
nanoz__sbpush(out, NANOZ_UCHAR(blocklen)); // LEN
nanoz__sbpush(out, NANOZ_UCHAR(blocklen >> 8));
nanoz__sbpush(out, NANOZ_UCHAR(~blocklen)); // NLEN
nanoz__sbpush(out, NANOZ_UCHAR(~blocklen >> 8));
memcpy(out + nanoz__sbn(out), data + j, blocklen);
nanoz__sbn(out) += blocklen;
j += blocklen;
}
}
{
// compute adler32 on input
unsigned int s1 = 1, s2 = 0;
int blocklen = (int)(data_len % 5552);
j = 0;
while (j < data_len) {
for (i = 0; i < blocklen; ++i) {
s1 += data[j + i];
s2 += s1;
}
s1 %= 65521;
s2 %= 65521;
j += blocklen;
blocklen = 5552;
}
nanoz__sbpush(out, NANOZ_UCHAR(s2 >> 8));
nanoz__sbpush(out, NANOZ_UCHAR(s2));
nanoz__sbpush(out, NANOZ_UCHAR(s1 >> 8));
nanoz__sbpush(out, NANOZ_UCHAR(s1));
}
*out_len = nanoz__sbn(out);
// make returned pointer freeable
NANOZ_MEMMOVE(nanoz__sbraw(out), out, *out_len);
return (unsigned char *)nanoz__sbraw(out);
}