blob: 2f327690c1db7856378f6a1b7de9d5d957e43a44 [file] [log] [blame]
/* This is a very slightly modified version of perf/bz2.c, with a
single change that causes it to overrun a global array by one byte.
The change in question is a change of the size of myprintf_buf from
1000 to 70, at line 1278. ptrcheck should report exactly one
error, resulting from an out of range access to this array. */
// This benchmark is basically bzip2 (mashed to be a single file)
// compressing and decompressing some data. It tests Valgrind's handling of
// realistic and "difficult" (ie. lots of branches and memory accesses)
// integer code. Execution is spread out over quite a few basic blocks;
// --profile-flags indicates that to get to the top 90%th percentile of
// dynamic BB counts requires considering the top 51 basic blocks
// This program can be used both as part of the performance test
// suite, in which case we want it to run for quite a while,
// and as part of the regression (correctness) test suite, in
// which case we want it to run quickly and be verbose.
// So it does the latter iff given a command line arg.
// Licensing: the code within is mostly taken from bzip2, which has a BSD
// license. There is a little code from VEX, which is licensed under GPLv2
// And it's all written by Julian Seward.
#define BZ_NO_STDIO
/*-------------------------------------------------------------*/
/*--- Private header file for the library. ---*/
/*--- bzlib_private.h ---*/
/*-------------------------------------------------------------*/
/*--
This file is a part of bzip2 and/or libbzip2, a program and
library for lossless, block-sorting data compression.
Copyright (C) 1996-2004 Julian R Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Julian Seward, Cambridge, UK.
jseward@bzip.org
bzip2/libbzip2 version 1.0 of 21 March 2000
This program is based on (at least) the work of:
Mike Burrows
David Wheeler
Peter Fenwick
Alistair Moffat
Radford Neal
Ian H. Witten
Robert Sedgewick
Jon L. Bentley
For more information on these sources, see the manual.
--*/
#ifndef _BZLIB_PRIVATE_H
#define _BZLIB_PRIVATE_H
#include <stdlib.h>
#ifndef BZ_NO_STDIO
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#endif
/*-------------------------------------------------------------*/
/*--- Public header file for the library. ---*/
/*--- bzlib.h ---*/
/*-------------------------------------------------------------*/
/*--
This file is a part of bzip2 and/or libbzip2, a program and
library for lossless, block-sorting data compression.
Copyright (C) 1996-2004 Julian R Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Julian Seward, Cambridge, UK.
jseward@bzip.org
bzip2/libbzip2 version 1.0 of 21 March 2000
This program is based on (at least) the work of:
Mike Burrows
David Wheeler
Peter Fenwick
Alistair Moffat
Radford Neal
Ian H. Witten
Robert Sedgewick
Jon L. Bentley
For more information on these sources, see the manual.
--*/
#ifndef _BZLIB_H
#define _BZLIB_H
#ifdef __cplusplus
extern "C" {
#endif
#define BZ_RUN 0
#define BZ_FLUSH 1
#define BZ_FINISH 2
#define BZ_OK 0
#define BZ_RUN_OK 1
#define BZ_FLUSH_OK 2
#define BZ_FINISH_OK 3
#define BZ_STREAM_END 4
#define BZ_SEQUENCE_ERROR (-1)
#define BZ_PARAM_ERROR (-2)
#define BZ_MEM_ERROR (-3)
#define BZ_DATA_ERROR (-4)
#define BZ_DATA_ERROR_MAGIC (-5)
#define BZ_IO_ERROR (-6)
#define BZ_UNEXPECTED_EOF (-7)
#define BZ_OUTBUFF_FULL (-8)
#define BZ_CONFIG_ERROR (-9)
typedef
struct {
char *next_in;
unsigned int avail_in;
unsigned int total_in_lo32;
unsigned int total_in_hi32;
char *next_out;
unsigned int avail_out;
unsigned int total_out_lo32;
unsigned int total_out_hi32;
void *state;
void *(*bzalloc)(void *,int,int);
void (*bzfree)(void *,void *);
void *opaque;
}
bz_stream;
#ifndef BZ_IMPORT
#define BZ_EXPORT
#endif
#ifndef BZ_NO_STDIO
/* Need a definitition for FILE */
#include <stdio.h>
#endif
#ifdef _WIN32
# include <windows.h>
# ifdef small
/* windows.h define small to char */
# undef small
# endif
# ifdef BZ_EXPORT
# define BZ_API(func) WINAPI func
# define BZ_EXTERN extern
# else
/* import windows dll dynamically */
# define BZ_API(func) (WINAPI * func)
# define BZ_EXTERN
# endif
#else
# define BZ_API(func) func
# define BZ_EXTERN extern
#endif
/*-- Core (low-level) library functions --*/
BZ_EXTERN int BZ_API(BZ2_bzCompressInit) (
bz_stream* strm,
int blockSize100k,
int verbosity,
int workFactor
);
BZ_EXTERN int BZ_API(BZ2_bzCompress) (
bz_stream* strm,
int action
);
BZ_EXTERN int BZ_API(BZ2_bzCompressEnd) (
bz_stream* strm
);
BZ_EXTERN int BZ_API(BZ2_bzDecompressInit) (
bz_stream *strm,
int verbosity,
int small
);
BZ_EXTERN int BZ_API(BZ2_bzDecompress) (
bz_stream* strm
);
BZ_EXTERN int BZ_API(BZ2_bzDecompressEnd) (
bz_stream *strm
);
/*-- High(er) level library functions --*/
#ifndef BZ_NO_STDIO
#define BZ_MAX_UNUSED 5000
typedef void BZFILE;
BZ_EXTERN BZFILE* BZ_API(BZ2_bzReadOpen) (
int* bzerror,
FILE* f,
int verbosity,
int small,
void* unused,
int nUnused
);
BZ_EXTERN void BZ_API(BZ2_bzReadClose) (
int* bzerror,
BZFILE* b
);
BZ_EXTERN void BZ_API(BZ2_bzReadGetUnused) (
int* bzerror,
BZFILE* b,
void** unused,
int* nUnused
);
BZ_EXTERN int BZ_API(BZ2_bzRead) (
int* bzerror,
BZFILE* b,
void* buf,
int len
);
BZ_EXTERN BZFILE* BZ_API(BZ2_bzWriteOpen) (
int* bzerror,
FILE* f,
int blockSize100k,
int verbosity,
int workFactor
);
BZ_EXTERN void BZ_API(BZ2_bzWrite) (
int* bzerror,
BZFILE* b,
void* buf,
int len
);
BZ_EXTERN void BZ_API(BZ2_bzWriteClose) (
int* bzerror,
BZFILE* b,
int abandon,
unsigned int* nbytes_in,
unsigned int* nbytes_out
);
BZ_EXTERN void BZ_API(BZ2_bzWriteClose64) (
int* bzerror,
BZFILE* b,
int abandon,
unsigned int* nbytes_in_lo32,
unsigned int* nbytes_in_hi32,
unsigned int* nbytes_out_lo32,
unsigned int* nbytes_out_hi32
);
#endif
/*-- Utility functions --*/
BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffCompress) (
char* dest,
unsigned int* destLen,
char* source,
unsigned int sourceLen,
int blockSize100k,
int verbosity,
int workFactor
);
BZ_EXTERN int BZ_API(BZ2_bzBuffToBuffDecompress) (
char* dest,
unsigned int* destLen,
char* source,
unsigned int sourceLen,
int small,
int verbosity
);
/*--
Code contributed by Yoshioka Tsuneo
(QWF00133@niftyserve.or.jp/tsuneo-y@is.aist-nara.ac.jp),
to support better zlib compatibility.
This code is not _officially_ part of libbzip2 (yet);
I haven't tested it, documented it, or considered the
threading-safeness of it.
If this code breaks, please contact both Yoshioka and me.
--*/
BZ_EXTERN const char * BZ_API(BZ2_bzlibVersion) (
void
);
#ifndef BZ_NO_STDIO
BZ_EXTERN BZFILE * BZ_API(BZ2_bzopen) (
const char *path,
const char *mode
);
BZ_EXTERN BZFILE * BZ_API(BZ2_bzdopen) (
int fd,
const char *mode
);
BZ_EXTERN int BZ_API(BZ2_bzread) (
BZFILE* b,
void* buf,
int len
);
BZ_EXTERN int BZ_API(BZ2_bzwrite) (
BZFILE* b,
void* buf,
int len
);
BZ_EXTERN int BZ_API(BZ2_bzflush) (
BZFILE* b
);
BZ_EXTERN void BZ_API(BZ2_bzclose) (
BZFILE* b
);
BZ_EXTERN const char * BZ_API(BZ2_bzerror) (
BZFILE *b,
int *errnum
);
#endif
#ifdef __cplusplus
}
#endif
#endif
/*-------------------------------------------------------------*/
/*--- end bzlib.h ---*/
/*-------------------------------------------------------------*/
/*-- General stuff. --*/
#define BZ_VERSION "1.0.3, 17-Oct-2004"
typedef char Char;
typedef unsigned char Bool;
typedef unsigned char UChar;
typedef int Int32;
typedef unsigned int UInt32;
typedef short Int16;
typedef unsigned short UInt16;
#define True ((Bool)1)
#define False ((Bool)0)
#ifndef __GNUC__
#define __inline__ /* */
#endif
#ifndef BZ_NO_STDIO
extern void BZ2_bz__AssertH__fail ( int errcode );
#define AssertH(cond,errcode) \
{ if (!(cond)) BZ2_bz__AssertH__fail ( errcode ); }
#if BZ_DEBUG
#define AssertD(cond,msg) \
{ if (!(cond)) { \
fprintf ( stderr, \
"\n\nlibbzip2(debug build): internal error\n\t%s\n", msg );\
exit(1); \
}}
#else
#define AssertD(cond,msg) /* */
#endif
#define VPrintf0(zf) \
fprintf(stderr,zf)
#define VPrintf1(zf,za1) \
fprintf(stderr,zf,za1)
#define VPrintf2(zf,za1,za2) \
fprintf(stderr,zf,za1,za2)
#define VPrintf3(zf,za1,za2,za3) \
fprintf(stderr,zf,za1,za2,za3)
#define VPrintf4(zf,za1,za2,za3,za4) \
fprintf(stderr,zf,za1,za2,za3,za4)
#define VPrintf5(zf,za1,za2,za3,za4,za5) \
fprintf(stderr,zf,za1,za2,za3,za4,za5)
#else
extern void bz_internal_error ( int errcode );
#define AssertH(cond,errcode) \
{ if (!(cond)) bz_internal_error ( errcode ); }
#define AssertD(cond,msg) /* */
#define VPrintf0(zf) \
vex_printf(zf)
#define VPrintf1(zf,za1) \
vex_printf(zf,za1)
#define VPrintf2(zf,za1,za2) \
vex_printf(zf,za1,za2)
#define VPrintf3(zf,za1,za2,za3) \
vex_printf(zf,za1,za2,za3)
#define VPrintf4(zf,za1,za2,za3,za4) \
vex_printf(zf,za1,za2,za3,za4)
#define VPrintf5(zf,za1,za2,za3,za4,za5) \
vex_printf(zf,za1,za2,za3,za4,za5)
#endif
#define BZALLOC(nnn) (strm->bzalloc)(strm->opaque,(nnn),1)
#define BZFREE(ppp) (strm->bzfree)(strm->opaque,(ppp))
/*-- Header bytes. --*/
#define BZ_HDR_B 0x42 /* 'B' */
#define BZ_HDR_Z 0x5a /* 'Z' */
#define BZ_HDR_h 0x68 /* 'h' */
#define BZ_HDR_0 0x30 /* '0' */
/*-- Constants for the back end. --*/
#define BZ_MAX_ALPHA_SIZE 258
#define BZ_MAX_CODE_LEN 23
#define BZ_RUNA 0
#define BZ_RUNB 1
#define BZ_N_GROUPS 6
#define BZ_G_SIZE 50
#define BZ_N_ITERS 4
#define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE))
/*-- Stuff for randomising repetitive blocks. --*/
extern Int32 BZ2_rNums[512];
#define BZ_RAND_DECLS \
Int32 rNToGo; \
Int32 rTPos \
#define BZ_RAND_INIT_MASK \
s->rNToGo = 0; \
s->rTPos = 0 \
#define BZ_RAND_MASK ((s->rNToGo == 1) ? 1 : 0)
#define BZ_RAND_UPD_MASK \
if (s->rNToGo == 0) { \
s->rNToGo = BZ2_rNums[s->rTPos]; \
s->rTPos++; \
if (s->rTPos == 512) s->rTPos = 0; \
} \
s->rNToGo--;
/*-- Stuff for doing CRCs. --*/
extern UInt32 BZ2_crc32Table[256];
#define BZ_INITIALISE_CRC(crcVar) \
{ \
crcVar = 0xffffffffL; \
}
#define BZ_FINALISE_CRC(crcVar) \
{ \
crcVar = ~(crcVar); \
}
#define BZ_UPDATE_CRC(crcVar,cha) \
{ \
crcVar = (crcVar << 8) ^ \
BZ2_crc32Table[(crcVar >> 24) ^ \
((UChar)cha)]; \
}
/*-- States and modes for compression. --*/
#define BZ_M_IDLE 1
#define BZ_M_RUNNING 2
#define BZ_M_FLUSHING 3
#define BZ_M_FINISHING 4
#define BZ_S_OUTPUT 1
#define BZ_S_INPUT 2
#define BZ_N_RADIX 2
#define BZ_N_QSORT 12
#define BZ_N_SHELL 18
#define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2)
/*-- Structure holding all the compression-side stuff. --*/
typedef
struct {
/* pointer back to the struct bz_stream */
bz_stream* strm;
/* mode this stream is in, and whether inputting */
/* or outputting data */
Int32 mode;
Int32 state;
/* remembers avail_in when flush/finish requested */
UInt32 avail_in_expect;
/* for doing the block sorting */
UInt32* arr1;
UInt32* arr2;
UInt32* ftab;
Int32 origPtr;
/* aliases for arr1 and arr2 */
UInt32* ptr;
UChar* block;
UInt16* mtfv;
UChar* zbits;
/* for deciding when to use the fallback sorting algorithm */
Int32 workFactor;
/* run-length-encoding of the input */
UInt32 state_in_ch;
Int32 state_in_len;
BZ_RAND_DECLS;
/* input and output limits and current posns */
Int32 nblock;
Int32 nblockMAX;
Int32 numZ;
Int32 state_out_pos;
/* map of bytes used in block */
Int32 nInUse;
Bool inUse[256];
UChar unseqToSeq[256];
/* the buffer for bit stream creation */
UInt32 bsBuff;
Int32 bsLive;
/* block and combined CRCs */
UInt32 blockCRC;
UInt32 combinedCRC;
/* misc administratium */
Int32 verbosity;
Int32 blockNo;
Int32 blockSize100k;
/* stuff for coding the MTF values */
Int32 nMTF;
Int32 mtfFreq [BZ_MAX_ALPHA_SIZE];
UChar selector [BZ_MAX_SELECTORS];
UChar selectorMtf[BZ_MAX_SELECTORS];
UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 code [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 rfreq [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
/* second dimension: only 3 needed; 4 makes index calculations faster */
UInt32 len_pack[BZ_MAX_ALPHA_SIZE][4];
}
EState;
/*-- externs for compression. --*/
extern void
BZ2_blockSort ( EState* );
extern void
BZ2_compressBlock ( EState*, Bool );
extern void
BZ2_bsInitWrite ( EState* );
extern void
BZ2_hbAssignCodes ( Int32*, UChar*, Int32, Int32, Int32 );
extern void
BZ2_hbMakeCodeLengths ( UChar*, Int32*, Int32, Int32 );
/*-- states for decompression. --*/
#define BZ_X_IDLE 1
#define BZ_X_OUTPUT 2
#define BZ_X_MAGIC_1 10
#define BZ_X_MAGIC_2 11
#define BZ_X_MAGIC_3 12
#define BZ_X_MAGIC_4 13
#define BZ_X_BLKHDR_1 14
#define BZ_X_BLKHDR_2 15
#define BZ_X_BLKHDR_3 16
#define BZ_X_BLKHDR_4 17
#define BZ_X_BLKHDR_5 18
#define BZ_X_BLKHDR_6 19
#define BZ_X_BCRC_1 20
#define BZ_X_BCRC_2 21
#define BZ_X_BCRC_3 22
#define BZ_X_BCRC_4 23
#define BZ_X_RANDBIT 24
#define BZ_X_ORIGPTR_1 25
#define BZ_X_ORIGPTR_2 26
#define BZ_X_ORIGPTR_3 27
#define BZ_X_MAPPING_1 28
#define BZ_X_MAPPING_2 29
#define BZ_X_SELECTOR_1 30
#define BZ_X_SELECTOR_2 31
#define BZ_X_SELECTOR_3 32
#define BZ_X_CODING_1 33
#define BZ_X_CODING_2 34
#define BZ_X_CODING_3 35
#define BZ_X_MTF_1 36
#define BZ_X_MTF_2 37
#define BZ_X_MTF_3 38
#define BZ_X_MTF_4 39
#define BZ_X_MTF_5 40
#define BZ_X_MTF_6 41
#define BZ_X_ENDHDR_2 42
#define BZ_X_ENDHDR_3 43
#define BZ_X_ENDHDR_4 44
#define BZ_X_ENDHDR_5 45
#define BZ_X_ENDHDR_6 46
#define BZ_X_CCRC_1 47
#define BZ_X_CCRC_2 48
#define BZ_X_CCRC_3 49
#define BZ_X_CCRC_4 50
/*-- Constants for the fast MTF decoder. --*/
#define MTFA_SIZE 4096
#define MTFL_SIZE 16
/*-- Structure holding all the decompression-side stuff. --*/
typedef
struct {
/* pointer back to the struct bz_stream */
bz_stream* strm;
/* state indicator for this stream */
Int32 state;
/* for doing the final run-length decoding */
UChar state_out_ch;
Int32 state_out_len;
Bool blockRandomised;
BZ_RAND_DECLS;
/* the buffer for bit stream reading */
UInt32 bsBuff;
Int32 bsLive;
/* misc administratium */
Int32 blockSize100k;
Bool smallDecompress;
Int32 currBlockNo;
Int32 verbosity;
/* for undoing the Burrows-Wheeler transform */
Int32 origPtr;
UInt32 tPos;
Int32 k0;
Int32 unzftab[256];
Int32 nblock_used;
Int32 cftab[257];
Int32 cftabCopy[257];
/* for undoing the Burrows-Wheeler transform (FAST) */
UInt32 *tt;
/* for undoing the Burrows-Wheeler transform (SMALL) */
UInt16 *ll16;
UChar *ll4;
/* stored and calculated CRCs */
UInt32 storedBlockCRC;
UInt32 storedCombinedCRC;
UInt32 calculatedBlockCRC;
UInt32 calculatedCombinedCRC;
/* map of bytes used in block */
Int32 nInUse;
Bool inUse[256];
Bool inUse16[16];
UChar seqToUnseq[256];
/* for decoding the MTF values */
UChar mtfa [MTFA_SIZE];
Int32 mtfbase[256 / MTFL_SIZE];
UChar selector [BZ_MAX_SELECTORS];
UChar selectorMtf[BZ_MAX_SELECTORS];
UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 limit [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 base [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 perm [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
Int32 minLens[BZ_N_GROUPS];
/* save area for scalars in the main decompress code */
Int32 save_i;
Int32 save_j;
Int32 save_t;
Int32 save_alphaSize;
Int32 save_nGroups;
Int32 save_nSelectors;
Int32 save_EOB;
Int32 save_groupNo;
Int32 save_groupPos;
Int32 save_nextSym;
Int32 save_nblockMAX;
Int32 save_nblock;
Int32 save_es;
Int32 save_N;
Int32 save_curr;
Int32 save_zt;
Int32 save_zn;
Int32 save_zvec;
Int32 save_zj;
Int32 save_gSel;
Int32 save_gMinlen;
Int32* save_gLimit;
Int32* save_gBase;
Int32* save_gPerm;
}
DState;
/*-- Macros for decompression. --*/
#define BZ_GET_FAST(cccc) \
s->tPos = s->tt[s->tPos]; \
cccc = (UChar)(s->tPos & 0xff); \
s->tPos >>= 8;
#define BZ_GET_FAST_C(cccc) \
c_tPos = c_tt[c_tPos]; \
cccc = (UChar)(c_tPos & 0xff); \
c_tPos >>= 8;
#define SET_LL4(i,n) \
{ if (((i) & 0x1) == 0) \
s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0xf0) | (n); else \
s->ll4[(i) >> 1] = (s->ll4[(i) >> 1] & 0x0f) | ((n) << 4); \
}
#define GET_LL4(i) \
((((UInt32)(s->ll4[(i) >> 1])) >> (((i) << 2) & 0x4)) & 0xF)
#define SET_LL(i,n) \
{ s->ll16[i] = (UInt16)(n & 0x0000ffff); \
SET_LL4(i, n >> 16); \
}
#define GET_LL(i) \
(((UInt32)s->ll16[i]) | (GET_LL4(i) << 16))
#define BZ_GET_SMALL(cccc) \
cccc = BZ2_indexIntoF ( s->tPos, s->cftab ); \
s->tPos = GET_LL(s->tPos);
/*-- externs for decompression. --*/
extern Int32
BZ2_indexIntoF ( Int32, Int32* );
extern Int32
BZ2_decompress ( DState* );
extern void
BZ2_hbCreateDecodeTables ( Int32*, Int32*, Int32*, UChar*,
Int32, Int32, Int32 );
#endif
/*-- BZ_NO_STDIO seems to make NULL disappear on some platforms. --*/
#ifdef BZ_NO_STDIO
#ifndef NULL
#define NULL 0
#endif
#endif
/*-------------------------------------------------------------*/
/*--- end bzlib_private.h ---*/
/*-------------------------------------------------------------*/
/* Something which has the same size as void* on the host. That is,
it is 32 bits on a 32-bit host and 64 bits on a 64-bit host, and so
it can safely be coerced to and from a pointer type on the host
machine. */
typedef unsigned long HWord;
typedef char HChar;
typedef signed int Int;
typedef unsigned int UInt;
typedef signed long long int Long;
typedef unsigned long long int ULong;
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
static HWord (*serviceFn)(HWord,HWord) = 0;
#if 0
static char* my_strcpy ( char* dest, const char* src )
{
char* dest_orig = dest;
while (*src) *dest++ = *src++;
*dest = 0;
return dest_orig;
}
static void* my_memcpy ( void *dest, const void *src, int sz )
{
const char *s = (const char *)src;
char *d = (char *)dest;
while (sz--)
*d++ = *s++;
return dest;
}
static void* my_memmove( void *dst, const void *src, unsigned int len )
{
register char *d;
register char *s;
if ( dst > src ) {
d = (char *)dst + len - 1;
s = (char *)src + len - 1;
while ( len >= 4 ) {
*d-- = *s--;
*d-- = *s--;
*d-- = *s--;
*d-- = *s--;
len -= 4;
}
while ( len-- ) {
*d-- = *s--;
}
} else if ( dst < src ) {
d = (char *)dst;
s = (char *)src;
while ( len >= 4 ) {
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
*d++ = *s++;
len -= 4;
}
while ( len-- ) {
*d++ = *s++;
}
}
return dst;
}
#endif
char* my_strcat ( char* dest, const char* src )
{
char* dest_orig = dest;
while (*dest) dest++;
while (*src) *dest++ = *src++;
*dest = 0;
return dest_orig;
}
/////////////////////////////////////////////////////////////////////
static void vex_log_bytes ( char* p, int n )
{
int i;
for (i = 0; i < n; i++)
(*serviceFn)( 1, (int)p[i] );
}
/*---------------------------------------------------------*/
/*--- vex_printf ---*/
/*---------------------------------------------------------*/
/* This should be the only <...> include in the entire VEX library.
New code for vex_util.c should go above this point. */
#include <stdarg.h>
static HChar vex_toupper ( HChar c )
{
if (c >= 'a' && c <= 'z')
return c + ('A' - 'a');
else
return c;
}
/* Explicitly set noinline so the test can check it is in the backtrace. */
static __attribute__(( noinline)) Int vex_strlen ( const HChar* str )
{
Int i = 0;
while (str[i] != 0) i++;
return i;
}
Bool vex_streq ( const HChar* s1, const HChar* s2 )
{
while (True) {
if (*s1 == 0 && *s2 == 0)
return True;
if (*s1 != *s2)
return False;
s1++;
s2++;
}
}
/* Some flags. */
#define VG_MSG_SIGNED 1 /* The value is signed. */
#define VG_MSG_ZJUSTIFY 2 /* Must justify with '0'. */
#define VG_MSG_LJUSTIFY 4 /* Must justify on the left. */
#define VG_MSG_PAREN 8 /* Parenthesize if present (for %y) */
#define VG_MSG_COMMA 16 /* Add commas to numbers (for %d, %u) */
/* Copy a string into the buffer. */
static UInt
myvprintf_str ( void(*send)(HChar), Int flags, Int width, HChar* str,
Bool capitalise )
{
# define MAYBE_TOUPPER(ch) (capitalise ? vex_toupper(ch) : (ch))
UInt ret = 0;
Int i, extra;
Int len = vex_strlen(str);
if (width == 0) {
ret += len;
for (i = 0; i < len; i++)
send(MAYBE_TOUPPER(str[i]));
return ret;
}
if (len > width) {
ret += width;
for (i = 0; i < width; i++)
send(MAYBE_TOUPPER(str[i]));
return ret;
}
extra = width - len;
if (flags & VG_MSG_LJUSTIFY) {
ret += extra;
for (i = 0; i < extra; i++)
send(' ');
}
ret += len;
for (i = 0; i < len; i++)
send(MAYBE_TOUPPER(str[i]));
if (!(flags & VG_MSG_LJUSTIFY)) {
ret += extra;
for (i = 0; i < extra; i++)
send(' ');
}
# undef MAYBE_TOUPPER
return ret;
}
/* Write P into the buffer according to these args:
* If SIGN is true, p is a signed.
* BASE is the base.
* If WITH_ZERO is true, '0' must be added.
* WIDTH is the width of the field.
*/
static UInt
myvprintf_int64 ( void(*send)(HChar), Int flags, Int base, Int width, ULong pL)
{
HChar buf[40];
Int ind = 0;
Int i, nc = 0;
Bool neg = False;
HChar *digits = "0123456789ABCDEF";
UInt ret = 0;
UInt p = (UInt)pL;
if (base < 2 || base > 16)
return ret;
if ((flags & VG_MSG_SIGNED) && (Int)p < 0) {
p = - (Int)p;
neg = True;
}
if (p == 0)
buf[ind++] = '0';
else {
while (p > 0) {
if ((flags & VG_MSG_COMMA) && 10 == base &&
0 == (ind-nc) % 3 && 0 != ind)
{
buf[ind++] = ',';
nc++;
}
buf[ind++] = digits[p % base];
p /= base;
}
}
if (neg)
buf[ind++] = '-';
if (width > 0 && !(flags & VG_MSG_LJUSTIFY)) {
for(; ind < width; ind++) {
//vassert(ind < 39);
buf[ind] = ((flags & VG_MSG_ZJUSTIFY) ? '0': ' ');
}
}
/* Reverse copy to buffer. */
ret += ind;
for (i = ind -1; i >= 0; i--) {
send(buf[i]);
}
if (width > 0 && (flags & VG_MSG_LJUSTIFY)) {
for(; ind < width; ind++) {
ret++;
send(' '); // Never pad with zeroes on RHS -- changes the value!
}
}
return ret;
}
/* A simple vprintf(). */
static
UInt vprintf_wrk ( void(*send)(HChar), const HChar *format, va_list vargs )
{
UInt ret = 0;
int i;
int flags;
int width;
Bool is_long;
/* We assume that vargs has already been initialised by the
caller, using va_start, and that the caller will similarly
clean up with va_end.
*/
for (i = 0; format[i] != 0; i++) {
if (format[i] != '%') {
send(format[i]);
ret++;
continue;
}
i++;
/* A '%' has been found. Ignore a trailing %. */
if (format[i] == 0)
break;
if (format[i] == '%') {
/* `%%' is replaced by `%'. */
send('%');
ret++;
continue;
}
flags = 0;
is_long = False;
width = 0; /* length of the field. */
if (format[i] == '(') {
flags |= VG_MSG_PAREN;
i++;
}
/* If ',' follows '%', commas will be inserted. */
if (format[i] == ',') {
flags |= VG_MSG_COMMA;
i++;
}
/* If '-' follows '%', justify on the left. */
if (format[i] == '-') {
flags |= VG_MSG_LJUSTIFY;
i++;
}
/* If '0' follows '%', pads will be inserted. */
if (format[i] == '0') {
flags |= VG_MSG_ZJUSTIFY;
i++;
}
/* Compute the field length. */
while (format[i] >= '0' && format[i] <= '9') {
width *= 10;
width += format[i++] - '0';
}
while (format[i] == 'l') {
i++;
is_long = True;
}
switch (format[i]) {
case 'd': /* %d */
flags |= VG_MSG_SIGNED;
if (is_long)
ret += myvprintf_int64(send, flags, 10, width,
(ULong)(va_arg (vargs, Long)));
else
ret += myvprintf_int64(send, flags, 10, width,
(ULong)(va_arg (vargs, Int)));
break;
case 'u': /* %u */
if (is_long)
ret += myvprintf_int64(send, flags, 10, width,
(ULong)(va_arg (vargs, ULong)));
else
ret += myvprintf_int64(send, flags, 10, width,
(ULong)(va_arg (vargs, UInt)));
break;
case 'p': /* %p */
ret += 2;
send('0');
send('x');
ret += myvprintf_int64(send, flags, 16, width,
(ULong)((HWord)va_arg (vargs, void *)));
break;
case 'x': /* %x */
if (is_long)
ret += myvprintf_int64(send, flags, 16, width,
(ULong)(va_arg (vargs, ULong)));
else
ret += myvprintf_int64(send, flags, 16, width,
(ULong)(va_arg (vargs, UInt)));
break;
case 'c': /* %c */
ret++;
send((va_arg (vargs, int)));
break;
case 's': case 'S': { /* %s */
char *str = va_arg (vargs, char *);
if (str == (char*) 0) str = "(null)";
ret += myvprintf_str(send, flags, width, str,
(format[i]=='S'));
break;
}
# if 0
case 'y': { /* %y - print symbol */
Char buf[100];
Char *cp = buf;
Addr a = va_arg(vargs, Addr);
if (flags & VG_MSG_PAREN)
*cp++ = '(';
if (VG_(get_fnname_w_offset)(a, cp, sizeof(buf)-4)) {
if (flags & VG_MSG_PAREN) {
cp += VG_(strlen)(cp);
*cp++ = ')';
*cp = '\0';
}
ret += myvprintf_str(send, flags, width, buf, 0);
}
break;
}
# endif
default:
break;
}
}
return ret;
}
/* A general replacement for printf(). Note that only low-level
debugging info should be sent via here. The official route is to
to use vg_message(). This interface is deprecated.
*/
/* XXX re 930: make the buffer just to small (by 1 byte) to be OK
for this particular run. */
static HChar myprintf_buf[1000 -930];
static Int n_myprintf_buf;
static void add_to_myprintf_buf ( HChar c )
{
if (c == '\n' || n_myprintf_buf >= 1000-10 /*paranoia*/ ) {
(*vex_log_bytes)( myprintf_buf, vex_strlen(myprintf_buf) );
n_myprintf_buf = 0;
myprintf_buf[n_myprintf_buf] = 0;
}
myprintf_buf[n_myprintf_buf++] = c;
myprintf_buf[n_myprintf_buf] = 0;
}
static UInt vex_printf ( const char *format, ... )
{
UInt ret;
va_list vargs;
va_start(vargs,format);
n_myprintf_buf = 0;
myprintf_buf[n_myprintf_buf] = 0;
ret = vprintf_wrk ( add_to_myprintf_buf, format, vargs );
if (n_myprintf_buf > 0) {
(*vex_log_bytes)( myprintf_buf, n_myprintf_buf );
}
va_end(vargs);
return ret;
}
/*---------------------------------------------------------------*/
/*--- end vex_util.c ---*/
/*---------------------------------------------------------------*/
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
/*-------------------------------------------------------------*/
/*--- Decompression machinery ---*/
/*--- decompress.c ---*/
/*-------------------------------------------------------------*/
/*--
This file is a part of bzip2 and/or libbzip2, a program and
library for lossless, block-sorting data compression.
Copyright (C) 1996-2004 Julian R Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Julian Seward, Cambridge, UK.
jseward@bzip.org
bzip2/libbzip2 version 1.0 of 21 March 2000
This program is based on (at least) the work of:
Mike Burrows
David Wheeler
Peter Fenwick
Alistair Moffat
Radford Neal
Ian H. Witten
Robert Sedgewick
Jon L. Bentley
For more information on these sources, see the manual.
--*/
/*---------------------------------------------------*/
static
void makeMaps_d ( DState* s )
{
Int32 i;
s->nInUse = 0;
for (i = 0; i < 256; i++)
if (s->inUse[i]) {
s->seqToUnseq[s->nInUse] = i;
s->nInUse++;
}
}
/*---------------------------------------------------*/
#define RETURN(rrr) \
{ retVal = rrr; goto save_state_and_return; };
#define GET_BITS(lll,vvv,nnn) \
case lll: s->state = lll; \
while (True) { \
if (s->bsLive >= nnn) { \
UInt32 v; \
v = (s->bsBuff >> \
(s->bsLive-nnn)) & ((1 << nnn)-1); \
s->bsLive -= nnn; \
vvv = v; \
break; \
} \
if (s->strm->avail_in == 0) RETURN(BZ_OK); \
s->bsBuff \
= (s->bsBuff << 8) | \
((UInt32) \
(*((UChar*)(s->strm->next_in)))); \
s->bsLive += 8; \
s->strm->next_in++; \
s->strm->avail_in--; \
s->strm->total_in_lo32++; \
if (s->strm->total_in_lo32 == 0) \
s->strm->total_in_hi32++; \
}
#define GET_UCHAR(lll,uuu) \
GET_BITS(lll,uuu,8)
#define GET_BIT(lll,uuu) \
GET_BITS(lll,uuu,1)
/*---------------------------------------------------*/
#define GET_MTF_VAL(label1,label2,lval) \
{ \
if (groupPos == 0) { \
groupNo++; \
if (groupNo >= nSelectors) \
RETURN(BZ_DATA_ERROR); \
groupPos = BZ_G_SIZE; \
gSel = s->selector[groupNo]; \
gMinlen = s->minLens[gSel]; \
gLimit = &(s->limit[gSel][0]); \
gPerm = &(s->perm[gSel][0]); \
gBase = &(s->base[gSel][0]); \
} \
groupPos--; \
zn = gMinlen; \
GET_BITS(label1, zvec, zn); \
while (1) { \
if (zn > 20 /* the longest code */) \
RETURN(BZ_DATA_ERROR); \
if (zvec <= gLimit[zn]) break; \
zn++; \
GET_BIT(label2, zj); \
zvec = (zvec << 1) | zj; \
}; \
if (zvec - gBase[zn] < 0 \
|| zvec - gBase[zn] >= BZ_MAX_ALPHA_SIZE) \
RETURN(BZ_DATA_ERROR); \
lval = gPerm[zvec - gBase[zn]]; \
}
/*---------------------------------------------------*/
__inline__ Int32 BZ2_indexIntoF ( Int32 indx, Int32 *cftab )
{
Int32 nb, na, mid;
nb = 0;
na = 256;
do {
mid = (nb + na) >> 1;
if (indx >= cftab[mid]) nb = mid; else na = mid;
}
while (na - nb != 1);
return nb;
}
/*---------------------------------------------------*/
Int32 BZ2_decompress ( DState* s )
{
UChar uc;
Int32 retVal;
Int32 minLen, maxLen;
bz_stream* strm = s->strm;
/* stuff that needs to be saved/restored */
Int32 i;
Int32 j;
Int32 t;
Int32 alphaSize;
Int32 nGroups;
Int32 nSelectors;
Int32 EOB;
Int32 groupNo;
Int32 groupPos;
Int32 nextSym;
Int32 nblockMAX;
Int32 nblock;
Int32 es;
Int32 N;
Int32 curr;
Int32 zt;
Int32 zn;
Int32 zvec;
Int32 zj;
Int32 gSel;
Int32 gMinlen;
Int32* gLimit;
Int32* gBase;
Int32* gPerm;
if (s->state == BZ_X_MAGIC_1) {
/*initialise the save area*/
s->save_i = 0;
s->save_j = 0;
s->save_t = 0;
s->save_alphaSize = 0;
s->save_nGroups = 0;
s->save_nSelectors = 0;
s->save_EOB = 0;
s->save_groupNo = 0;
s->save_groupPos = 0;
s->save_nextSym = 0;
s->save_nblockMAX = 0;
s->save_nblock = 0;
s->save_es = 0;
s->save_N = 0;
s->save_curr = 0;
s->save_zt = 0;
s->save_zn = 0;
s->save_zvec = 0;
s->save_zj = 0;
s->save_gSel = 0;
s->save_gMinlen = 0;
s->save_gLimit = NULL;
s->save_gBase = NULL;
s->save_gPerm = NULL;
}
/*restore from the save area*/
i = s->save_i;
j = s->save_j;
t = s->save_t;
alphaSize = s->save_alphaSize;
nGroups = s->save_nGroups;
nSelectors = s->save_nSelectors;
EOB = s->save_EOB;
groupNo = s->save_groupNo;
groupPos = s->save_groupPos;
nextSym = s->save_nextSym;
nblockMAX = s->save_nblockMAX;
nblock = s->save_nblock;
es = s->save_es;
N = s->save_N;
curr = s->save_curr;
zt = s->save_zt;
zn = s->save_zn;
zvec = s->save_zvec;
zj = s->save_zj;
gSel = s->save_gSel;
gMinlen = s->save_gMinlen;
gLimit = s->save_gLimit;
gBase = s->save_gBase;
gPerm = s->save_gPerm;
retVal = BZ_OK;
switch (s->state) {
GET_UCHAR(BZ_X_MAGIC_1, uc);
if (uc != BZ_HDR_B) RETURN(BZ_DATA_ERROR_MAGIC);
GET_UCHAR(BZ_X_MAGIC_2, uc);
if (uc != BZ_HDR_Z) RETURN(BZ_DATA_ERROR_MAGIC);
GET_UCHAR(BZ_X_MAGIC_3, uc)
if (uc != BZ_HDR_h) RETURN(BZ_DATA_ERROR_MAGIC);
GET_BITS(BZ_X_MAGIC_4, s->blockSize100k, 8)
if (s->blockSize100k < (BZ_HDR_0 + 1) ||
s->blockSize100k > (BZ_HDR_0 + 9)) RETURN(BZ_DATA_ERROR_MAGIC);
s->blockSize100k -= BZ_HDR_0;
if (s->smallDecompress) {
s->ll16 = BZALLOC( s->blockSize100k * 100000 * sizeof(UInt16) );
s->ll4 = BZALLOC(
((1 + s->blockSize100k * 100000) >> 1) * sizeof(UChar)
);
if (s->ll16 == NULL || s->ll4 == NULL) RETURN(BZ_MEM_ERROR);
} else {
s->tt = BZALLOC( s->blockSize100k * 100000 * sizeof(Int32) );
if (s->tt == NULL) RETURN(BZ_MEM_ERROR);
}
GET_UCHAR(BZ_X_BLKHDR_1, uc);
if (uc == 0x17) goto endhdr_2;
if (uc != 0x31) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_BLKHDR_2, uc);
if (uc != 0x41) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_BLKHDR_3, uc);
if (uc != 0x59) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_BLKHDR_4, uc);
if (uc != 0x26) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_BLKHDR_5, uc);
if (uc != 0x53) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_BLKHDR_6, uc);
if (uc != 0x59) RETURN(BZ_DATA_ERROR);
s->currBlockNo++;
if (s->verbosity >= 2)
VPrintf1 ( "\n [%d: huff+mtf ", s->currBlockNo );
s->storedBlockCRC = 0;
GET_UCHAR(BZ_X_BCRC_1, uc);
s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
GET_UCHAR(BZ_X_BCRC_2, uc);
s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
GET_UCHAR(BZ_X_BCRC_3, uc);
s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
GET_UCHAR(BZ_X_BCRC_4, uc);
s->storedBlockCRC = (s->storedBlockCRC << 8) | ((UInt32)uc);
GET_BITS(BZ_X_RANDBIT, s->blockRandomised, 1);
s->origPtr = 0;
GET_UCHAR(BZ_X_ORIGPTR_1, uc);
s->origPtr = (s->origPtr << 8) | ((Int32)uc);
GET_UCHAR(BZ_X_ORIGPTR_2, uc);
s->origPtr = (s->origPtr << 8) | ((Int32)uc);
GET_UCHAR(BZ_X_ORIGPTR_3, uc);
s->origPtr = (s->origPtr << 8) | ((Int32)uc);
if (s->origPtr < 0)
RETURN(BZ_DATA_ERROR);
if (s->origPtr > 10 + 100000*s->blockSize100k)
RETURN(BZ_DATA_ERROR);
/*--- Receive the mapping table ---*/
for (i = 0; i < 16; i++) {
GET_BIT(BZ_X_MAPPING_1, uc);
if (uc == 1)
s->inUse16[i] = True; else
s->inUse16[i] = False;
}
for (i = 0; i < 256; i++) s->inUse[i] = False;
for (i = 0; i < 16; i++)
if (s->inUse16[i])
for (j = 0; j < 16; j++) {
GET_BIT(BZ_X_MAPPING_2, uc);
if (uc == 1) s->inUse[i * 16 + j] = True;
}
makeMaps_d ( s );
if (s->nInUse == 0) RETURN(BZ_DATA_ERROR);
alphaSize = s->nInUse+2;
/*--- Now the selectors ---*/
GET_BITS(BZ_X_SELECTOR_1, nGroups, 3);
if (nGroups < 2 || nGroups > 6) RETURN(BZ_DATA_ERROR);
GET_BITS(BZ_X_SELECTOR_2, nSelectors, 15);
if (nSelectors < 1) RETURN(BZ_DATA_ERROR);
for (i = 0; i < nSelectors; i++) {
j = 0;
while (True) {
GET_BIT(BZ_X_SELECTOR_3, uc);
if (uc == 0) break;
j++;
if (j >= nGroups) RETURN(BZ_DATA_ERROR);
}
s->selectorMtf[i] = j;
}
/*--- Undo the MTF values for the selectors. ---*/
{
UChar pos[BZ_N_GROUPS], tmp, v;
for (v = 0; v < nGroups; v++) pos[v] = v;
for (i = 0; i < nSelectors; i++) {
v = s->selectorMtf[i];
tmp = pos[v];
while (v > 0) { pos[v] = pos[v-1]; v--; }
pos[0] = tmp;
s->selector[i] = tmp;
}
}
/*--- Now the coding tables ---*/
for (t = 0; t < nGroups; t++) {
GET_BITS(BZ_X_CODING_1, curr, 5);
for (i = 0; i < alphaSize; i++) {
while (True) {
if (curr < 1 || curr > 20) RETURN(BZ_DATA_ERROR);
GET_BIT(BZ_X_CODING_2, uc);
if (uc == 0) break;
GET_BIT(BZ_X_CODING_3, uc);
if (uc == 0) curr++; else curr--;
}
s->len[t][i] = curr;
}
}
/*--- Create the Huffman decoding tables ---*/
for (t = 0; t < nGroups; t++) {
minLen = 32;
maxLen = 0;
for (i = 0; i < alphaSize; i++) {
if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
if (s->len[t][i] < minLen) minLen = s->len[t][i];
}
BZ2_hbCreateDecodeTables (
&(s->limit[t][0]),
&(s->base[t][0]),
&(s->perm[t][0]),
&(s->len[t][0]),
minLen, maxLen, alphaSize
);
s->minLens[t] = minLen;
}
/*--- Now the MTF values ---*/
EOB = s->nInUse+1;
nblockMAX = 100000 * s->blockSize100k;
groupNo = -1;
groupPos = 0;
for (i = 0; i <= 255; i++) s->unzftab[i] = 0;
/*-- MTF init --*/
{
Int32 ii, jj, kk;
kk = MTFA_SIZE-1;
for (ii = 256 / MTFL_SIZE - 1; ii >= 0; ii--) {
for (jj = MTFL_SIZE-1; jj >= 0; jj--) {
s->mtfa[kk] = (UChar)(ii * MTFL_SIZE + jj);
kk--;
}
s->mtfbase[ii] = kk + 1;
}
}
/*-- end MTF init --*/
nblock = 0;
GET_MTF_VAL(BZ_X_MTF_1, BZ_X_MTF_2, nextSym);
while (True) {
if (nextSym == EOB) break;
if (nextSym == BZ_RUNA || nextSym == BZ_RUNB) {
es = -1;
N = 1;
do {
if (nextSym == BZ_RUNA) es = es + (0+1) * N; else
if (nextSym == BZ_RUNB) es = es + (1+1) * N;
N = N * 2;
GET_MTF_VAL(BZ_X_MTF_3, BZ_X_MTF_4, nextSym);
}
while (nextSym == BZ_RUNA || nextSym == BZ_RUNB);
es++;
uc = s->seqToUnseq[ s->mtfa[s->mtfbase[0]] ];
s->unzftab[uc] += es;
if (s->smallDecompress)
while (es > 0) {
if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR);
s->ll16[nblock] = (UInt16)uc;
nblock++;
es--;
}
else
while (es > 0) {
if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR);
s->tt[nblock] = (UInt32)uc;
nblock++;
es--;
};
continue;
} else {
if (nblock >= nblockMAX) RETURN(BZ_DATA_ERROR);
/*-- uc = MTF ( nextSym-1 ) --*/
{
Int32 ii, jj, kk, pp, lno, off;
UInt32 nn;
nn = (UInt32)(nextSym - 1);
if (nn < MTFL_SIZE) {
/* avoid general-case expense */
pp = s->mtfbase[0];
uc = s->mtfa[pp+nn];
while (nn > 3) {
Int32 z = pp+nn;
s->mtfa[(z) ] = s->mtfa[(z)-1];
s->mtfa[(z)-1] = s->mtfa[(z)-2];
s->mtfa[(z)-2] = s->mtfa[(z)-3];
s->mtfa[(z)-3] = s->mtfa[(z)-4];
nn -= 4;
}
while (nn > 0) {
s->mtfa[(pp+nn)] = s->mtfa[(pp+nn)-1]; nn--;
};
s->mtfa[pp] = uc;
} else {
/* general case */
lno = nn / MTFL_SIZE;
off = nn % MTFL_SIZE;
pp = s->mtfbase[lno] + off;
uc = s->mtfa[pp];
while (pp > s->mtfbase[lno]) {
s->mtfa[pp] = s->mtfa[pp-1]; pp--;
};
s->mtfbase[lno]++;
while (lno > 0) {
s->mtfbase[lno]--;
s->mtfa[s->mtfbase[lno]]
= s->mtfa[s->mtfbase[lno-1] + MTFL_SIZE - 1];
lno--;
}
s->mtfbase[0]--;
s->mtfa[s->mtfbase[0]] = uc;
if (s->mtfbase[0] == 0) {
kk = MTFA_SIZE-1;
for (ii = 256 / MTFL_SIZE-1; ii >= 0; ii--) {
for (jj = MTFL_SIZE-1; jj >= 0; jj--) {
s->mtfa[kk] = s->mtfa[s->mtfbase[ii] + jj];
kk--;
}
s->mtfbase[ii] = kk + 1;
}
}
}
}
/*-- end uc = MTF ( nextSym-1 ) --*/
s->unzftab[s->seqToUnseq[uc]]++;
if (s->smallDecompress)
s->ll16[nblock] = (UInt16)(s->seqToUnseq[uc]); else
s->tt[nblock] = (UInt32)(s->seqToUnseq[uc]);
nblock++;
GET_MTF_VAL(BZ_X_MTF_5, BZ_X_MTF_6, nextSym);
continue;
}
}
/* Now we know what nblock is, we can do a better sanity
check on s->origPtr.
*/
if (s->origPtr < 0 || s->origPtr >= nblock)
RETURN(BZ_DATA_ERROR);
/*-- Set up cftab to facilitate generation of T^(-1) --*/
s->cftab[0] = 0;
for (i = 1; i <= 256; i++) s->cftab[i] = s->unzftab[i-1];
for (i = 1; i <= 256; i++) s->cftab[i] += s->cftab[i-1];
for (i = 0; i <= 256; i++) {
if (s->cftab[i] < 0 || s->cftab[i] > nblock) {
/* s->cftab[i] can legitimately be == nblock */
RETURN(BZ_DATA_ERROR);
}
}
s->state_out_len = 0;
s->state_out_ch = 0;
BZ_INITIALISE_CRC ( s->calculatedBlockCRC );
s->state = BZ_X_OUTPUT;
if (s->verbosity >= 2) VPrintf0 ( "rt+rld" );
if (s->smallDecompress) {
/*-- Make a copy of cftab, used in generation of T --*/
for (i = 0; i <= 256; i++) s->cftabCopy[i] = s->cftab[i];
/*-- compute the T vector --*/
for (i = 0; i < nblock; i++) {
uc = (UChar)(s->ll16[i]);
SET_LL(i, s->cftabCopy[uc]);
s->cftabCopy[uc]++;
}
/*-- Compute T^(-1) by pointer reversal on T --*/
i = s->origPtr;
j = GET_LL(i);
do {
Int32 tmp = GET_LL(j);
SET_LL(j, i);
i = j;
j = tmp;
}
while (i != s->origPtr);
s->tPos = s->origPtr;
s->nblock_used = 0;
if (s->blockRandomised) {
BZ_RAND_INIT_MASK;
BZ_GET_SMALL(s->k0); s->nblock_used++;
BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK;
} else {
BZ_GET_SMALL(s->k0); s->nblock_used++;
}
} else {
/*-- compute the T^(-1) vector --*/
for (i = 0; i < nblock; i++) {
uc = (UChar)(s->tt[i] & 0xff);
s->tt[s->cftab[uc]] |= (i << 8);
s->cftab[uc]++;
}
s->tPos = s->tt[s->origPtr] >> 8;
s->nblock_used = 0;
if (s->blockRandomised) {
BZ_RAND_INIT_MASK;
BZ_GET_FAST(s->k0); s->nblock_used++;
BZ_RAND_UPD_MASK; s->k0 ^= BZ_RAND_MASK;
} else {
BZ_GET_FAST(s->k0); s->nblock_used++;
}
}
RETURN(BZ_OK);
endhdr_2:
GET_UCHAR(BZ_X_ENDHDR_2, uc);
if (uc != 0x72) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_ENDHDR_3, uc);
if (uc != 0x45) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_ENDHDR_4, uc);
if (uc != 0x38) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_ENDHDR_5, uc);
if (uc != 0x50) RETURN(BZ_DATA_ERROR);
GET_UCHAR(BZ_X_ENDHDR_6, uc);
if (uc != 0x90) RETURN(BZ_DATA_ERROR);
s->storedCombinedCRC = 0;
GET_UCHAR(BZ_X_CCRC_1, uc);
s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
GET_UCHAR(BZ_X_CCRC_2, uc);
s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
GET_UCHAR(BZ_X_CCRC_3, uc);
s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
GET_UCHAR(BZ_X_CCRC_4, uc);
s->storedCombinedCRC = (s->storedCombinedCRC << 8) | ((UInt32)uc);
s->state = BZ_X_IDLE;
RETURN(BZ_STREAM_END);
default: AssertH ( False, 4001 );
}
AssertH ( False, 4002 );
save_state_and_return:
s->save_i = i;
s->save_j = j;
s->save_t = t;
s->save_alphaSize = alphaSize;
s->save_nGroups = nGroups;
s->save_nSelectors = nSelectors;
s->save_EOB = EOB;
s->save_groupNo = groupNo;
s->save_groupPos = groupPos;
s->save_nextSym = nextSym;
s->save_nblockMAX = nblockMAX;
s->save_nblock = nblock;
s->save_es = es;
s->save_N = N;
s->save_curr = curr;
s->save_zt = zt;
s->save_zn = zn;
s->save_zvec = zvec;
s->save_zj = zj;
s->save_gSel = gSel;
s->save_gMinlen = gMinlen;
s->save_gLimit = gLimit;
s->save_gBase = gBase;
s->save_gPerm = gPerm;
return retVal;
}
/*-------------------------------------------------------------*/
/*--- end decompress.c ---*/
/*-------------------------------------------------------------*/
/*-------------------------------------------------------------*/
/*--- Block sorting machinery ---*/
/*--- blocksort.c ---*/
/*-------------------------------------------------------------*/
/*--
This file is a part of bzip2 and/or libbzip2, a program and
library for lossless, block-sorting data compression.
Copyright (C) 1996-2004 Julian R Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Julian Seward, Cambridge, UK.
jseward@bzip.org
bzip2/libbzip2 version 1.0 of 21 March 2000
This program is based on (at least) the work of:
Mike Burrows
David Wheeler
Peter Fenwick
Alistair Moffat
Radford Neal
Ian H. Witten
Robert Sedgewick
Jon L. Bentley
For more information on these sources, see the manual.
To get some idea how the block sorting algorithms in this file
work, read my paper
On the Performance of BWT Sorting Algorithms
in Proceedings of the IEEE Data Compression Conference 2000,
Snowbird, Utah, USA, 27-30 March 2000. The main sort in this
file implements the algorithm called cache in the paper.
--*/
/*---------------------------------------------*/
/*--- Fallback O(N log(N)^2) sorting ---*/
/*--- algorithm, for repetitive blocks ---*/
/*---------------------------------------------*/
/*---------------------------------------------*/
static
__inline__
void fallbackSimpleSort ( UInt32* fmap,
UInt32* eclass,
Int32 lo,
Int32 hi )
{
Int32 i, j, tmp;
UInt32 ec_tmp;
if (lo == hi) return;
if (hi - lo > 3) {
for ( i = hi-4; i >= lo; i-- ) {
tmp = fmap[i];
ec_tmp = eclass[tmp];
for ( j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4 )
fmap[j-4] = fmap[j];
fmap[j-4] = tmp;
}
}
for ( i = hi-1; i >= lo; i-- ) {
tmp = fmap[i];
ec_tmp = eclass[tmp];
for ( j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++ )
fmap[j-1] = fmap[j];
fmap[j-1] = tmp;
}
}
/*---------------------------------------------*/
#define fswap(zz1, zz2) \
{ Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; }
#define fvswap(zzp1, zzp2, zzn) \
{ \
Int32 yyp1 = (zzp1); \
Int32 yyp2 = (zzp2); \
Int32 yyn = (zzn); \
while (yyn > 0) { \
fswap(fmap[yyp1], fmap[yyp2]); \
yyp1++; yyp2++; yyn--; \
} \
}
#define fmin(a,b) ((a) < (b)) ? (a) : (b)
#define fpush(lz,hz) { stackLo[sp] = lz; \
stackHi[sp] = hz; \
sp++; }
#define fpop(lz,hz) { sp--; \
lz = stackLo[sp]; \
hz = stackHi[sp]; }
#define FALLBACK_QSORT_SMALL_THRESH 10
#define FALLBACK_QSORT_STACK_SIZE 100
static
void fallbackQSort3 ( UInt32* fmap,
UInt32* eclass,
Int32 loSt,
Int32 hiSt )
{
Int32 unLo, unHi, ltLo, gtHi, n, m;
Int32 sp, lo, hi;
UInt32 med, r, r3;
Int32 stackLo[FALLBACK_QSORT_STACK_SIZE];
Int32 stackHi[FALLBACK_QSORT_STACK_SIZE];
r = 0;
sp = 0;
fpush ( loSt, hiSt );
while (sp > 0) {
AssertH ( sp < FALLBACK_QSORT_STACK_SIZE, 1004 );
fpop ( lo, hi );
if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) {
fallbackSimpleSort ( fmap, eclass, lo, hi );
continue;
}
/* Random partitioning. Median of 3 sometimes fails to
avoid bad cases. Median of 9 seems to help but
looks rather expensive. This too seems to work but
is cheaper. Guidance for the magic constants
7621 and 32768 is taken from Sedgewick's algorithms
book, chapter 35.
*/
r = ((r * 7621) + 1) % 32768;
r3 = r % 3;
if (r3 == 0) med = eclass[fmap[lo]]; else
if (r3 == 1) med = eclass[fmap[(lo+hi)>>1]]; else
med = eclass[fmap[hi]];
unLo = ltLo = lo;
unHi = gtHi = hi;
while (1) {
while (1) {
if (unLo > unHi) break;
n = (Int32)eclass[fmap[unLo]] - (Int32)med;
if (n == 0) {
fswap(fmap[unLo], fmap[ltLo]);
ltLo++; unLo++;
continue;
};
if (n > 0) break;
unLo++;
}
while (1) {
if (unLo > unHi) break;
n = (Int32)eclass[fmap[unHi]] - (Int32)med;
if (n == 0) {
fswap(fmap[unHi], fmap[gtHi]);
gtHi--; unHi--;
continue;
};
if (n < 0) break;
unHi--;
}
if (unLo > unHi) break;
fswap(fmap[unLo], fmap[unHi]); unLo++; unHi--;
}
AssertD ( unHi == unLo-1, "fallbackQSort3(2)" );
if (gtHi < ltLo) continue;
n = fmin(ltLo-lo, unLo-ltLo); fvswap(lo, unLo-n, n);
m = fmin(hi-gtHi, gtHi-unHi); fvswap(unLo, hi-m+1, m);
n = lo + unLo - ltLo - 1;
m = hi - (gtHi - unHi) + 1;
if (n - lo > hi - m) {
fpush ( lo, n );
fpush ( m, hi );
} else {
fpush ( m, hi );
fpush ( lo, n );
}
}
}
#undef fmin
#undef fpush
#undef fpop
#undef fswap
#undef fvswap
#undef FALLBACK_QSORT_SMALL_THRESH
#undef FALLBACK_QSORT_STACK_SIZE
/*---------------------------------------------*/
/* Pre:
nblock > 0
eclass exists for [0 .. nblock-1]
((UChar*)eclass) [0 .. nblock-1] holds block
ptr exists for [0 .. nblock-1]
Post:
((UChar*)eclass) [0 .. nblock-1] holds block
All other areas of eclass destroyed
fmap [0 .. nblock-1] holds sorted order
bhtab [ 0 .. 2+(nblock/32) ] destroyed
*/
#define SET_BH(zz) bhtab[(zz) >> 5] |= (1 << ((zz) & 31))
#define CLEAR_BH(zz) bhtab[(zz) >> 5] &= ~(1 << ((zz) & 31))
#define ISSET_BH(zz) (bhtab[(zz) >> 5] & (1 << ((zz) & 31)))
#define WORD_BH(zz) bhtab[(zz) >> 5]
#define UNALIGNED_BH(zz) ((zz) & 0x01f)
static
void fallbackSort ( UInt32* fmap,
UInt32* eclass,
UInt32* bhtab,
Int32 nblock,
Int32 verb )
{
Int32 ftab[257];
Int32 ftabCopy[256];
Int32 H, i, j, k, l, r, cc, cc1;
Int32 nNotDone;
Int32 nBhtab;
UChar* eclass8 = (UChar*)eclass;
/*--
Initial 1-char radix sort to generate
initial fmap and initial BH bits.
--*/
if (verb >= 4)
VPrintf0 ( " bucket sorting ...\n" );
for (i = 0; i < 257; i++) ftab[i] = 0;
for (i = 0; i < nblock; i++) ftab[eclass8[i]]++;
for (i = 0; i < 256; i++) ftabCopy[i] = ftab[i];
for (i = 1; i < 257; i++) ftab[i] += ftab[i-1];
for (i = 0; i < nblock; i++) {
j = eclass8[i];
k = ftab[j] - 1;
ftab[j] = k;
fmap[k] = i;
}
nBhtab = 2 + (nblock / 32);
for (i = 0; i < nBhtab; i++) bhtab[i] = 0;
for (i = 0; i < 256; i++) SET_BH(ftab[i]);
/*--
Inductively refine the buckets. Kind-of an
"exponential radix sort" (!), inspired by the
Manber-Myers suffix array construction algorithm.
--*/
/*-- set sentinel bits for block-end detection --*/
for (i = 0; i < 32; i++) {
SET_BH(nblock + 2*i);
CLEAR_BH(nblock + 2*i + 1);
}
/*-- the log(N) loop --*/
H = 1;
while (1) {
if (verb >= 4)
VPrintf1 ( " depth %6d has ", H );
j = 0;
for (i = 0; i < nblock; i++) {
if (ISSET_BH(i)) j = i;
k = fmap[i] - H; if (k < 0) k += nblock;
eclass[k] = j;
}
nNotDone = 0;
r = -1;
while (1) {
/*-- find the next non-singleton bucket --*/
k = r + 1;
while (ISSET_BH(k) && UNALIGNED_BH(k)) k++;
if (ISSET_BH(k)) {
while (WORD_BH(k) == 0xffffffff) k += 32;
while (ISSET_BH(k)) k++;
}
l = k - 1;
if (l >= nblock) break;
while (!ISSET_BH(k) && UNALIGNED_BH(k)) k++;
if (!ISSET_BH(k)) {
while (WORD_BH(k) == 0x00000000) k += 32;
while (!ISSET_BH(k)) k++;
}
r = k - 1;
if (r >= nblock) break;
/*-- now [l, r] bracket current bucket --*/
if (r > l) {
nNotDone += (r - l + 1);
fallbackQSort3 ( fmap, eclass, l, r );
/*-- scan bucket and generate header bits-- */
cc = -1;
for (i = l; i <= r; i++) {
cc1 = eclass[fmap[i]];
if (cc != cc1) { SET_BH(i); cc = cc1; };
}
}
}
if (verb >= 4)
VPrintf1 ( "%6d unresolved strings\n", nNotDone );
H *= 2;
if (H > nblock || nNotDone == 0) break;
}
/*--
Reconstruct the original block in
eclass8 [0 .. nblock-1], since the
previous phase destroyed it.
--*/
if (verb >= 4)
VPrintf0 ( " reconstructing block ...\n" );
j = 0;
for (i = 0; i < nblock; i++) {
while (ftabCopy[j] == 0) j++;
ftabCopy[j]--;
eclass8[fmap[i]] = (UChar)j;
}
AssertH ( j < 256, 1005 );
}
#undef SET_BH
#undef CLEAR_BH
#undef ISSET_BH
#undef WORD_BH
#undef UNALIGNED_BH
/*---------------------------------------------*/
/*--- The main, O(N^2 log(N)) sorting ---*/
/*--- algorithm. Faster for "normal" ---*/
/*--- non-repetitive blocks. ---*/
/*---------------------------------------------*/
/*---------------------------------------------*/
static
__inline__
Bool mainGtU ( UInt32 i1,
UInt32 i2,
UChar* block,
UInt16* quadrant,
UInt32 nblock,
Int32* budget )
{
Int32 k;
UChar c1, c2;
UInt16 s1, s2;
AssertD ( i1 != i2, "mainGtU" );
/* 1 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 2 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 3 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 4 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 5 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 6 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 7 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 8 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 9 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 10 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 11 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
/* 12 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
i1++; i2++;
k = nblock + 8;
do {
/* 1 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 2 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 3 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 4 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 5 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 6 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 7 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
/* 8 */
c1 = block[i1]; c2 = block[i2];
if (c1 != c2) return (c1 > c2);
s1 = quadrant[i1]; s2 = quadrant[i2];
if (s1 != s2) return (s1 > s2);
i1++; i2++;
if (i1 >= nblock) i1 -= nblock;
if (i2 >= nblock) i2 -= nblock;
k -= 8;
(*budget)--;
}
while (k >= 0);
return False;
}
/*---------------------------------------------*/
/*--
Knuth's increments seem to work better
than Incerpi-Sedgewick here. Possibly
because the number of elems to sort is
usually small, typically <= 20.
--*/
static
Int32 incs[14] = { 1, 4, 13, 40, 121, 364, 1093, 3280,
9841, 29524, 88573, 265720,
797161, 2391484 };
static
void mainSimpleSort ( UInt32* ptr,
UChar* block,
UInt16* quadrant,
Int32 nblock,
Int32 lo,
Int32 hi,
Int32 d,
Int32* budget )
{
Int32 i, j, h, bigN, hp;
UInt32 v;
bigN = hi - lo + 1;
if (bigN < 2) return;
hp = 0;
while (incs[hp] < bigN) hp++;
hp--;
for (; hp >= 0; hp--) {
h = incs[hp];
i = lo + h;
while (True) {
/*-- copy 1 --*/
if (i > hi) break;
v = ptr[i];
j = i;
while ( mainGtU (
ptr[j-h]+d, v+d, block, quadrant, nblock, budget
) ) {
ptr[j] = ptr[j-h];
j = j - h;
if (j <= (lo + h - 1)) break;
}
ptr[j] = v;
i++;
/*-- copy 2 --*/
if (i > hi) break;
v = ptr[i];
j = i;
while ( mainGtU (
ptr[j-h]+d, v+d, block, quadrant, nblock, budget
) ) {
ptr[j] = ptr[j-h];
j = j - h;
if (j <= (lo + h - 1)) break;
}
ptr[j] = v;
i++;
/*-- copy 3 --*/
if (i > hi) break;
v = ptr[i];
j = i;
while ( mainGtU (
ptr[j-h]+d, v+d, block, quadrant, nblock, budget
) ) {
ptr[j] = ptr[j-h];
j = j - h;
if (j <= (lo + h - 1)) break;
}
ptr[j] = v;
i++;
if (*budget < 0) return;
}
}
}
/*---------------------------------------------*/
/*--
The following is an implementation of
an elegant 3-way quicksort for strings,
described in a paper "Fast Algorithms for
Sorting and Searching Strings", by Robert
Sedgewick and Jon L. Bentley.
--*/
#define mswap(zz1, zz2) \
{ Int32 zztmp = zz1; zz1 = zz2; zz2 = zztmp; }
#define mvswap(zzp1, zzp2, zzn) \
{ \
Int32 yyp1 = (zzp1); \
Int32 yyp2 = (zzp2); \
Int32 yyn = (zzn); \
while (yyn > 0) { \
mswap(ptr[yyp1], ptr[yyp2]); \
yyp1++; yyp2++; yyn--; \
} \
}
static
__inline__
UChar mmed3 ( UChar a, UChar b, UChar c )
{
UChar t;
if (a > b) { t = a; a = b; b = t; };
if (b > c) {
b = c;
if (a > b) b = a;
}
return b;
}
#define mmin(a,b) ((a) < (b)) ? (a) : (b)
#define mpush(lz,hz,dz) { stackLo[sp] = lz; \
stackHi[sp] = hz; \
stackD [sp] = dz; \
sp++; }
#define mpop(lz,hz,dz) { sp--; \
lz = stackLo[sp]; \
hz = stackHi[sp]; \
dz = stackD [sp]; }
#define mnextsize(az) (nextHi[az]-nextLo[az])
#define mnextswap(az,bz) \
{ Int32 tz; \
tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \
tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \
tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; }
#define MAIN_QSORT_SMALL_THRESH 20
#define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT)
#define MAIN_QSORT_STACK_SIZE 100
static
void mainQSort3 ( UInt32* ptr,
UChar* block,
UInt16* quadrant,
Int32 nblock,
Int32 loSt,
Int32 hiSt,
Int32 dSt,
Int32* budget )
{
Int32 unLo, unHi, ltLo, gtHi, n, m, med;
Int32 sp, lo, hi, d;
Int32 stackLo[MAIN_QSORT_STACK_SIZE];
Int32 stackHi[MAIN_QSORT_STACK_SIZE];
Int32 stackD [MAIN_QSORT_STACK_SIZE];
Int32 nextLo[3];
Int32 nextHi[3];
Int32 nextD [3];
sp = 0;
mpush ( loSt, hiSt, dSt );
while (sp > 0) {
AssertH ( sp < MAIN_QSORT_STACK_SIZE, 1001 );
mpop ( lo, hi, d );
if (hi - lo < MAIN_QSORT_SMALL_THRESH ||
d > MAIN_QSORT_DEPTH_THRESH) {
mainSimpleSort ( ptr, block, quadrant, nblock, lo, hi, d, budget );
if (*budget < 0) return;
continue;
}
med = (Int32)
mmed3 ( block[ptr[ lo ]+d],
block[ptr[ hi ]+d],
block[ptr[ (lo+hi)>>1 ]+d] );