lz4hc.c revision 94542d845884c922f809725807ecbd2cf7454ace
1/*
2    LZ4 HC - High Compression Mode of LZ4
3    Copyright (C) 2011-2015, Yann Collet.
4
5    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7    Redistribution and use in source and binary forms, with or without
8    modification, are permitted provided that the following conditions are
9    met:
10
11    * Redistributions of source code must retain the above copyright
12    notice, this list of conditions and the following disclaimer.
13    * Redistributions in binary form must reproduce the above
14    copyright notice, this list of conditions and the following disclaimer
15    in the documentation and/or other materials provided with the
16    distribution.
17
18    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30    You can contact the author at :
31       - LZ4 source repository : https://github.com/lz4/lz4
32       - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33*/
34/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
35
36
37/* *************************************
38*  Tuning Parameter
39***************************************/
40
41/*!
42 * HEAPMODE :
43 * Select how default compression function will allocate workplace memory,
44 * in stack (0:fastest), or in heap (1:requires malloc()).
45 * Since workplace is rather large, heap mode is recommended.
46 */
47#ifndef LZ4HC_HEAPMODE
48#  define LZ4HC_HEAPMODE 1
49#endif
50
51
52/* *************************************
53*  Dependency
54***************************************/
55#include "lz4hc.h"
56
57
58/* *************************************
59*  Local Compiler Options
60***************************************/
61#if defined(__GNUC__)
62#  pragma GCC diagnostic ignored "-Wunused-function"
63#endif
64
65#if defined (__clang__)
66#  pragma clang diagnostic ignored "-Wunused-function"
67#endif
68
69
70/* *************************************
71*  Common LZ4 definition
72***************************************/
73#define LZ4_COMMONDEFS_ONLY
74#include "lz4.c"
75
76
77/* *************************************
78*  Local Constants
79***************************************/
80#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
81
82
83/**************************************
84*  Local Macros
85**************************************/
86#define HASH_FUNCTION(i)       (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
87/* #define DELTANEXTU16(p)        chainTable[(p) & LZ4HC_MAXD_MASK] */   /* flexible, LZ4HC_MAXD dependent */
88#define DELTANEXTU16(p)        chainTable[(U16)(p)]   /* faster */
89
90static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
91
92
93
94/**************************************
95*  HC Compression
96**************************************/
97static void LZ4HC_init (LZ4HC_CCtx_internal* hc4, const BYTE* start)
98{
99    MEM_INIT((void*)hc4->hashTable, 0, sizeof(hc4->hashTable));
100    MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
101    hc4->nextToUpdate = 64 KB;
102    hc4->base = start - 64 KB;
103    hc4->end = start;
104    hc4->dictBase = start - 64 KB;
105    hc4->dictLimit = 64 KB;
106    hc4->lowLimit = 64 KB;
107}
108
109
110/* Update chains up to ip (excluded) */
111FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
112{
113    U16* const chainTable = hc4->chainTable;
114    U32* const hashTable  = hc4->hashTable;
115    const BYTE* const base = hc4->base;
116    U32 const target = (U32)(ip - base);
117    U32 idx = hc4->nextToUpdate;
118
119    while (idx < target) {
120        U32 const h = LZ4HC_hashPtr(base+idx);
121        size_t delta = idx - hashTable[h];
122        if (delta>MAX_DISTANCE) delta = MAX_DISTANCE;
123        DELTANEXTU16(idx) = (U16)delta;
124        hashTable[h] = idx;
125        idx++;
126    }
127
128    hc4->nextToUpdate = target;
129}
130
131
132FORCE_INLINE int LZ4HC_InsertAndFindBestMatch (LZ4HC_CCtx_internal* hc4,   /* Index table will be updated */
133                                               const BYTE* ip, const BYTE* const iLimit,
134                                               const BYTE** matchpos,
135                                               const int maxNbAttempts)
136{
137    U16* const chainTable = hc4->chainTable;
138    U32* const HashTable = hc4->hashTable;
139    const BYTE* const base = hc4->base;
140    const BYTE* const dictBase = hc4->dictBase;
141    const U32 dictLimit = hc4->dictLimit;
142    const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1);
143    U32 matchIndex;
144    int nbAttempts=maxNbAttempts;
145    size_t ml=0;
146
147    /* HC4 match finder */
148    LZ4HC_Insert(hc4, ip);
149    matchIndex = HashTable[LZ4HC_hashPtr(ip)];
150
151    while ((matchIndex>=lowLimit) && (nbAttempts)) {
152        nbAttempts--;
153        if (matchIndex >= dictLimit) {
154            const BYTE* const match = base + matchIndex;
155            if (*(match+ml) == *(ip+ml)
156                && (LZ4_read32(match) == LZ4_read32(ip)))
157            {
158                size_t const mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, iLimit) + MINMATCH;
159                if (mlt > ml) { ml = mlt; *matchpos = match; }
160            }
161        } else {
162            const BYTE* const match = dictBase + matchIndex;
163            if (LZ4_read32(match) == LZ4_read32(ip)) {
164                size_t mlt;
165                const BYTE* vLimit = ip + (dictLimit - matchIndex);
166                if (vLimit > iLimit) vLimit = iLimit;
167                mlt = LZ4_count(ip+MINMATCH, match+MINMATCH, vLimit) + MINMATCH;
168                if ((ip+mlt == vLimit) && (vLimit < iLimit))
169                    mlt += LZ4_count(ip+mlt, base+dictLimit, iLimit);
170                if (mlt > ml) { ml = mlt; *matchpos = base + matchIndex; }   /* virtual matchpos */
171            }
172        }
173        matchIndex -= DELTANEXTU16(matchIndex);
174    }
175
176    return (int)ml;
177}
178
179
180FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch (
181    LZ4HC_CCtx_internal* hc4,
182    const BYTE* const ip,
183    const BYTE* const iLowLimit,
184    const BYTE* const iHighLimit,
185    int longest,
186    const BYTE** matchpos,
187    const BYTE** startpos,
188    const int maxNbAttempts)
189{
190    U16* const chainTable = hc4->chainTable;
191    U32* const HashTable = hc4->hashTable;
192    const BYTE* const base = hc4->base;
193    const U32 dictLimit = hc4->dictLimit;
194    const BYTE* const lowPrefixPtr = base + dictLimit;
195    const U32 lowLimit = (hc4->lowLimit + 64 KB > (U32)(ip-base)) ? hc4->lowLimit : (U32)(ip - base) - (64 KB - 1);
196    const BYTE* const dictBase = hc4->dictBase;
197    U32   matchIndex;
198    int nbAttempts = maxNbAttempts;
199    int delta = (int)(ip-iLowLimit);
200
201
202    /* First Match */
203    LZ4HC_Insert(hc4, ip);
204    matchIndex = HashTable[LZ4HC_hashPtr(ip)];
205
206    while ((matchIndex>=lowLimit) && (nbAttempts)) {
207        nbAttempts--;
208        if (matchIndex >= dictLimit) {
209            const BYTE* matchPtr = base + matchIndex;
210            if (*(iLowLimit + longest) == *(matchPtr - delta + longest)) {
211                if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
212                    int mlt = MINMATCH + LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
213                    int back = 0;
214
215                    while ((ip+back > iLowLimit)
216                           && (matchPtr+back > lowPrefixPtr)
217                           && (ip[back-1] == matchPtr[back-1]))
218                            back--;
219
220                    mlt -= back;
221
222                    if (mlt > longest) {
223                        longest = (int)mlt;
224                        *matchpos = matchPtr+back;
225                        *startpos = ip+back;
226                    }
227                }
228            }
229        } else {
230            const BYTE* const matchPtr = dictBase + matchIndex;
231            if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
232                size_t mlt;
233                int back=0;
234                const BYTE* vLimit = ip + (dictLimit - matchIndex);
235                if (vLimit > iHighLimit) vLimit = iHighLimit;
236                mlt = LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
237                if ((ip+mlt == vLimit) && (vLimit < iHighLimit))
238                    mlt += LZ4_count(ip+mlt, base+dictLimit, iHighLimit);
239                while ((ip+back > iLowLimit) && (matchIndex+back > lowLimit) && (ip[back-1] == matchPtr[back-1])) back--;
240                mlt -= back;
241                if ((int)mlt > longest) { longest = (int)mlt; *matchpos = base + matchIndex + back; *startpos = ip+back; }
242            }
243        }
244        matchIndex -= DELTANEXTU16(matchIndex);
245    }
246
247    return longest;
248}
249
250
251typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
252
253#define LZ4HC_DEBUG 0
254#if LZ4HC_DEBUG
255static unsigned debug = 0;
256#endif
257
258FORCE_INLINE int LZ4HC_encodeSequence (
259    const BYTE** ip,
260    BYTE** op,
261    const BYTE** anchor,
262    int matchLength,
263    const BYTE* const match,
264    limitedOutput_directive limitedOutputBuffer,
265    BYTE* oend)
266{
267    int length;
268    BYTE* token;
269
270#if LZ4HC_DEBUG
271    if (debug) printf("literal : %u  --  match : %u  --  offset : %u\n", (U32)(*ip - *anchor), (U32)matchLength, (U32)(*ip-match));
272#endif
273
274    /* Encode Literal length */
275    length = (int)(*ip - *anchor);
276    token = (*op)++;
277    if ((limitedOutputBuffer) && ((*op + (length>>8) + length + (2 + 1 + LASTLITERALS)) > oend)) return 1;   /* Check output limit */
278    if (length>=(int)RUN_MASK) { int len; *token=(RUN_MASK<<ML_BITS); len = length-RUN_MASK; for(; len > 254 ; len-=255) *(*op)++ = 255;  *(*op)++ = (BYTE)len; }
279    else *token = (BYTE)(length<<ML_BITS);
280
281    /* Copy Literals */
282    LZ4_wildCopy(*op, *anchor, (*op) + length);
283    *op += length;
284
285    /* Encode Offset */
286    LZ4_writeLE16(*op, (U16)(*ip-match)); *op += 2;
287
288    /* Encode MatchLength */
289    length = (int)(matchLength-MINMATCH);
290    if ((limitedOutputBuffer) && (*op + (length>>8) + (1 + LASTLITERALS) > oend)) return 1;   /* Check output limit */
291    if (length>=(int)ML_MASK) {
292        *token += ML_MASK;
293        length -= ML_MASK;
294        for(; length > 509 ; length-=510) { *(*op)++ = 255; *(*op)++ = 255; }
295        if (length > 254) { length-=255; *(*op)++ = 255; }
296        *(*op)++ = (BYTE)length;
297    } else {
298        *token += (BYTE)(length);
299    }
300
301    /* Prepare next loop */
302    *ip += matchLength;
303    *anchor = *ip;
304
305    return 0;
306}
307
308
309static int LZ4HC_compress_generic (
310    LZ4HC_CCtx_internal* const ctx,
311    const char* const source,
312    char* const dest,
313    int const inputSize,
314    int const maxOutputSize,
315    int compressionLevel,
316    limitedOutput_directive limit
317    )
318{
319    const BYTE* ip = (const BYTE*) source;
320    const BYTE* anchor = ip;
321    const BYTE* const iend = ip + inputSize;
322    const BYTE* const mflimit = iend - MFLIMIT;
323    const BYTE* const matchlimit = (iend - LASTLITERALS);
324
325    BYTE* op = (BYTE*) dest;
326    BYTE* const oend = op + maxOutputSize;
327
328    unsigned maxNbAttempts;
329    int   ml, ml2, ml3, ml0;
330    const BYTE* ref = NULL;
331    const BYTE* start2 = NULL;
332    const BYTE* ref2 = NULL;
333    const BYTE* start3 = NULL;
334    const BYTE* ref3 = NULL;
335    const BYTE* start0;
336    const BYTE* ref0;
337
338    /* init */
339    if (compressionLevel > LZ4HC_MAX_CLEVEL) compressionLevel = LZ4HC_MAX_CLEVEL;
340    if (compressionLevel < 1) compressionLevel = LZ4HC_DEFAULT_CLEVEL;
341    maxNbAttempts = 1 << (compressionLevel-1);
342    ctx->end += inputSize;
343
344    ip++;
345
346    /* Main Loop */
347    while (ip < mflimit) {
348        ml = LZ4HC_InsertAndFindBestMatch (ctx, ip, matchlimit, (&ref), maxNbAttempts);
349        if (!ml) { ip++; continue; }
350
351        /* saved, in case we would skip too much */
352        start0 = ip;
353        ref0 = ref;
354        ml0 = ml;
355
356_Search2:
357        if (ip+ml < mflimit)
358            ml2 = LZ4HC_InsertAndGetWiderMatch(ctx, ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2, maxNbAttempts);
359        else ml2 = ml;
360
361        if (ml2 == ml) { /* No better match */
362            if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
363            continue;
364        }
365
366        if (start0 < ip) {
367            if (start2 < ip + ml0) {  /* empirical */
368                ip = start0;
369                ref = ref0;
370                ml = ml0;
371            }
372        }
373
374        /* Here, start0==ip */
375        if ((start2 - ip) < 3) {  /* First Match too small : removed */
376            ml = ml2;
377            ip = start2;
378            ref =ref2;
379            goto _Search2;
380        }
381
382_Search3:
383        /*
384        * Currently we have :
385        * ml2 > ml1, and
386        * ip1+3 <= ip2 (usually < ip1+ml1)
387        */
388        if ((start2 - ip) < OPTIMAL_ML) {
389            int correction;
390            int new_ml = ml;
391            if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
392            if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
393            correction = new_ml - (int)(start2 - ip);
394            if (correction > 0) {
395                start2 += correction;
396                ref2 += correction;
397                ml2 -= correction;
398            }
399        }
400        /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
401
402        if (start2 + ml2 < mflimit)
403            ml3 = LZ4HC_InsertAndGetWiderMatch(ctx, start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3, maxNbAttempts);
404        else ml3 = ml2;
405
406        if (ml3 == ml2) {  /* No better match : 2 sequences to encode */
407            /* ip & ref are known; Now for ml */
408            if (start2 < ip+ml)  ml = (int)(start2 - ip);
409            /* Now, encode 2 sequences */
410            if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
411            ip = start2;
412            if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml2, ref2, limit, oend)) return 0;
413            continue;
414        }
415
416        if (start3 < ip+ml+3) {  /* Not enough space for match 2 : remove it */
417            if (start3 >= (ip+ml)) {  /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
418                if (start2 < ip+ml) {
419                    int correction = (int)(ip+ml - start2);
420                    start2 += correction;
421                    ref2 += correction;
422                    ml2 -= correction;
423                    if (ml2 < MINMATCH) {
424                        start2 = start3;
425                        ref2 = ref3;
426                        ml2 = ml3;
427                    }
428                }
429
430                if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
431                ip  = start3;
432                ref = ref3;
433                ml  = ml3;
434
435                start0 = start2;
436                ref0 = ref2;
437                ml0 = ml2;
438                goto _Search2;
439            }
440
441            start2 = start3;
442            ref2 = ref3;
443            ml2 = ml3;
444            goto _Search3;
445        }
446
447        /*
448        * OK, now we have 3 ascending matches; let's write at least the first one
449        * ip & ref are known; Now for ml
450        */
451        if (start2 < ip+ml) {
452            if ((start2 - ip) < (int)ML_MASK) {
453                int correction;
454                if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
455                if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
456                correction = ml - (int)(start2 - ip);
457                if (correction > 0) {
458                    start2 += correction;
459                    ref2 += correction;
460                    ml2 -= correction;
461                }
462            } else {
463                ml = (int)(start2 - ip);
464            }
465        }
466        if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml, ref, limit, oend)) return 0;
467
468        ip = start2;
469        ref = ref2;
470        ml = ml2;
471
472        start2 = start3;
473        ref2 = ref3;
474        ml2 = ml3;
475
476        goto _Search3;
477    }
478
479    /* Encode Last Literals */
480    {   int lastRun = (int)(iend - anchor);
481        if ((limit) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0;  /* Check output limit */
482        if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun > 254 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
483        else *op++ = (BYTE)(lastRun<<ML_BITS);
484        memcpy(op, anchor, iend - anchor);
485        op += iend-anchor;
486    }
487
488    /* End */
489    return (int) (((char*)op)-dest);
490}
491
492
493int LZ4_sizeofStateHC(void) { return sizeof(LZ4_streamHC_t); }
494
495int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel)
496{
497    LZ4HC_CCtx_internal* ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
498    if (((size_t)(state)&(sizeof(void*)-1)) != 0) return 0;   /* Error : state is not aligned for pointers (32 or 64 bits) */
499    LZ4HC_init (ctx, (const BYTE*)src);
500    if (maxDstSize < LZ4_compressBound(srcSize))
501        return LZ4HC_compress_generic (ctx, src, dst, srcSize, maxDstSize, compressionLevel, limitedOutput);
502    else
503        return LZ4HC_compress_generic (ctx, src, dst, srcSize, maxDstSize, compressionLevel, noLimit);
504}
505
506int LZ4_compress_HC(const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel)
507{
508#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
509    LZ4_streamHC_t* const statePtr = malloc(sizeof(LZ4_streamHC_t));
510#else
511    LZ4_streamHC_t state;
512    LZ4_streamHC_t* const statePtr = &state;
513#endif
514    int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, maxDstSize, compressionLevel);
515#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
516    free(statePtr);
517#endif
518    return cSize;
519}
520
521
522
523/**************************************
524*  Streaming Functions
525**************************************/
526/* allocation */
527LZ4_streamHC_t* LZ4_createStreamHC(void) { return (LZ4_streamHC_t*)malloc(sizeof(LZ4_streamHC_t)); }
528int             LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr) { free(LZ4_streamHCPtr); return 0; }
529
530
531/* initialization */
532void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
533{
534    LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= sizeof(size_t) * LZ4_STREAMHCSIZE_SIZET);   /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
535    LZ4_streamHCPtr->internal_donotuse.base = NULL;
536    LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned)compressionLevel;
537}
538
539int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, const char* dictionary, int dictSize)
540{
541    LZ4HC_CCtx_internal* ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
542    if (dictSize > 64 KB) {
543        dictionary += dictSize - 64 KB;
544        dictSize = 64 KB;
545    }
546    LZ4HC_init (ctxPtr, (const BYTE*)dictionary);
547    if (dictSize >= 4) LZ4HC_Insert (ctxPtr, (const BYTE*)dictionary +(dictSize-3));
548    ctxPtr->end = (const BYTE*)dictionary + dictSize;
549    return dictSize;
550}
551
552
553/* compression */
554
555static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
556{
557    if (ctxPtr->end >= ctxPtr->base + 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);   /* Referencing remaining dictionary content */
558    /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
559    ctxPtr->lowLimit  = ctxPtr->dictLimit;
560    ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
561    ctxPtr->dictBase  = ctxPtr->base;
562    ctxPtr->base = newBlock - ctxPtr->dictLimit;
563    ctxPtr->end  = newBlock;
564    ctxPtr->nextToUpdate = ctxPtr->dictLimit;   /* match referencing will resume from there */
565}
566
567static int LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
568                                            const char* source, char* dest,
569                                            int inputSize, int maxOutputSize, limitedOutput_directive limit)
570{
571    LZ4HC_CCtx_internal* ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
572    /* auto-init if forgotten */
573    if (ctxPtr->base == NULL) LZ4HC_init (ctxPtr, (const BYTE*) source);
574
575    /* Check overflow */
576    if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
577        size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
578        if (dictSize > 64 KB) dictSize = 64 KB;
579        LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
580    }
581
582    /* Check if blocks follow each other */
583    if ((const BYTE*)source != ctxPtr->end) LZ4HC_setExternalDict(ctxPtr, (const BYTE*)source);
584
585    /* Check overlapping input/dictionary space */
586    {   const BYTE* sourceEnd = (const BYTE*) source + inputSize;
587        const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
588        const BYTE* const dictEnd   = ctxPtr->dictBase + ctxPtr->dictLimit;
589        if ((sourceEnd > dictBegin) && ((const BYTE*)source < dictEnd)) {
590            if (sourceEnd > dictEnd) sourceEnd = dictEnd;
591            ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
592            if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
593        }
594    }
595
596    return LZ4HC_compress_generic (ctxPtr, source, dest, inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
597}
598
599int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize)
600{
601    if (maxOutputSize < LZ4_compressBound(inputSize))
602        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, source, dest, inputSize, maxOutputSize, limitedOutput);
603    else
604        return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, source, dest, inputSize, maxOutputSize, noLimit);
605}
606
607
608/* dictionary saving */
609
610int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
611{
612    LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
613    int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
614    if (dictSize > 64 KB) dictSize = 64 KB;
615    if (dictSize < 4) dictSize = 0;
616    if (dictSize > prefixSize) dictSize = prefixSize;
617    memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
618    {   U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
619        streamPtr->end = (const BYTE*)safeBuffer + dictSize;
620        streamPtr->base = streamPtr->end - endIndex;
621        streamPtr->dictLimit = endIndex - dictSize;
622        streamPtr->lowLimit = endIndex - dictSize;
623        if (streamPtr->nextToUpdate < streamPtr->dictLimit) streamPtr->nextToUpdate = streamPtr->dictLimit;
624    }
625    return dictSize;
626}
627
628
629/***********************************
630*  Deprecated Functions
631***********************************/
632/* These functions currently generate deprecation warnings */
633/* Deprecated compression functions */
634int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
635int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
636int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
637int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
638int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
639int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
640int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
641int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
642int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
643int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
644
645
646/* Deprecated streaming functions */
647int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
648
649int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
650{
651    LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
652    if ((((size_t)state) & (sizeof(void*)-1)) != 0) return 1;   /* Error : pointer is not aligned for pointer (32 or 64 bits) */
653    LZ4HC_init(ctx, (const BYTE*)inputBuffer);
654    ctx->inputBuffer = (BYTE*)inputBuffer;
655    return 0;
656}
657
658void* LZ4_createHC (char* inputBuffer)
659{
660    LZ4_streamHC_t* hc4 = (LZ4_streamHC_t*)ALLOCATOR(1, sizeof(LZ4_streamHC_t));
661    if (hc4 == NULL) return NULL;   /* not enough memory */
662    LZ4HC_init (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
663    hc4->internal_donotuse.inputBuffer = (BYTE*)inputBuffer;
664    return hc4;
665}
666
667int LZ4_freeHC (void* LZ4HC_Data) { FREEMEM(LZ4HC_Data); return 0; }
668
669int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel)
670{
671    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, source, dest, inputSize, 0, compressionLevel, noLimit);
672}
673
674int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel)
675{
676    return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, source, dest, inputSize, maxOutputSize, compressionLevel, limitedOutput);
677}
678
679char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
680{
681    LZ4HC_CCtx_internal* const hc4 = &((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse;
682    int const dictSize = LZ4_saveDictHC((LZ4_streamHC_t*)LZ4HC_Data, (char*)(hc4->inputBuffer), 64 KB);
683    return (char*)(hc4->inputBuffer + dictSize);
684}
685