aboutsummaryrefslogtreecommitdiff
path: root/archival/bz
diff options
context:
space:
mode:
authorDenys Vlasenko <vda.linux@googlemail.com>2010-11-03 02:38:31 +0100
committerDenys Vlasenko <vda.linux@googlemail.com>2010-11-03 02:38:31 +0100
commit833d4e7f84f59099ee66eabfa3457ebb7d37eaa8 (patch)
tree3be84e1049707ce8077291065fe3689497c69b9c /archival/bz
parent5e9934028aa030312a1a2e2e32d5ceade8672beb (diff)
downloadbusybox-w32-833d4e7f84f59099ee66eabfa3457ebb7d37eaa8.tar.gz
busybox-w32-833d4e7f84f59099ee66eabfa3457ebb7d37eaa8.tar.bz2
busybox-w32-833d4e7f84f59099ee66eabfa3457ebb7d37eaa8.zip
rename archival/libunarchive -> archival/libarchive; move bz/ into it
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
Diffstat (limited to 'archival/bz')
-rw-r--r--archival/bz/LICENSE44
-rw-r--r--archival/bz/README90
-rw-r--r--archival/bz/blocksort.c1072
-rw-r--r--archival/bz/bzlib.c431
-rw-r--r--archival/bz/bzlib.h65
-rw-r--r--archival/bz/bzlib_private.h219
-rw-r--r--archival/bz/compress.c685
-rw-r--r--archival/bz/huffman.c229
8 files changed, 0 insertions, 2835 deletions
diff --git a/archival/bz/LICENSE b/archival/bz/LICENSE
deleted file mode 100644
index da4346520..000000000
--- a/archival/bz/LICENSE
+++ /dev/null
@@ -1,44 +0,0 @@
1bzip2 applet in busybox is based on lightly-modified source
2of bzip2 version 1.0.4. bzip2 source is distributed
3under the following conditions (copied verbatim from LICENSE file)
4===========================================================
5
6
7This program, "bzip2", the associated library "libbzip2", and all
8documentation, are copyright (C) 1996-2006 Julian R Seward. All
9rights reserved.
10
11Redistribution and use in source and binary forms, with or without
12modification, are permitted provided that the following conditions
13are met:
14
151. Redistributions of source code must retain the above copyright
16 notice, this list of conditions and the following disclaimer.
17
182. The origin of this software must not be misrepresented; you must
19 not claim that you wrote the original software. If you use this
20 software in a product, an acknowledgment in the product
21 documentation would be appreciated but is not required.
22
233. Altered source versions must be plainly marked as such, and must
24 not be misrepresented as being the original software.
25
264. The name of the author may not be used to endorse or promote
27 products derived from this software without specific prior written
28 permission.
29
30THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
31OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
34DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
36GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
38WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
39NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
40SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41
42Julian Seward, Cambridge, UK.
43jseward@bzip.org
44bzip2/libbzip2 version 1.0.4 of 20 December 2006
diff --git a/archival/bz/README b/archival/bz/README
deleted file mode 100644
index fffd47b8a..000000000
--- a/archival/bz/README
+++ /dev/null
@@ -1,90 +0,0 @@
1This file is an abridged version of README from bzip2 1.0.4
2Build instructions (which are not relevant to busyboxed bzip2)
3are removed.
4===========================================================
5
6
7This is the README for bzip2/libzip2.
8This version is fully compatible with the previous public releases.
9
10------------------------------------------------------------------
11This file is part of bzip2/libbzip2, a program and library for
12lossless, block-sorting data compression.
13
14bzip2/libbzip2 version 1.0.4 of 20 December 2006
15Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
16
17Please read the WARNING, DISCLAIMER and PATENTS sections in this file.
18
19This program is released under the terms of the license contained
20in the file LICENSE.
21------------------------------------------------------------------
22
23Please read and be aware of the following:
24
25
26WARNING:
27
28 This program and library (attempts to) compress data by
29 performing several non-trivial transformations on it.
30 Unless you are 100% familiar with *all* the algorithms
31 contained herein, and with the consequences of modifying them,
32 you should NOT meddle with the compression or decompression
33 machinery. Incorrect changes can and very likely *will*
34 lead to disastrous loss of data.
35
36
37DISCLAIMER:
38
39 I TAKE NO RESPONSIBILITY FOR ANY LOSS OF DATA ARISING FROM THE
40 USE OF THIS PROGRAM/LIBRARY, HOWSOEVER CAUSED.
41
42 Every compression of a file implies an assumption that the
43 compressed file can be decompressed to reproduce the original.
44 Great efforts in design, coding and testing have been made to
45 ensure that this program works correctly. However, the complexity
46 of the algorithms, and, in particular, the presence of various
47 special cases in the code which occur with very low but non-zero
48 probability make it impossible to rule out the possibility of bugs
49 remaining in the program. DO NOT COMPRESS ANY DATA WITH THIS
50 PROGRAM UNLESS YOU ARE PREPARED TO ACCEPT THE POSSIBILITY, HOWEVER
51 SMALL, THAT THE DATA WILL NOT BE RECOVERABLE.
52
53 That is not to say this program is inherently unreliable.
54 Indeed, I very much hope the opposite is true. bzip2/libbzip2
55 has been carefully constructed and extensively tested.
56
57
58PATENTS:
59
60 To the best of my knowledge, bzip2/libbzip2 does not use any
61 patented algorithms. However, I do not have the resources
62 to carry out a patent search. Therefore I cannot give any
63 guarantee of the above statement.
64
65
66I hope you find bzip2 useful. Feel free to contact me at
67 jseward@bzip.org
68if you have any suggestions or queries. Many people mailed me with
69comments, suggestions and patches after the releases of bzip-0.15,
70bzip-0.21, and bzip2 versions 0.1pl2, 0.9.0, 0.9.5, 1.0.0, 1.0.1,
711.0.2 and 1.0.3, and the changes in bzip2 are largely a result of this
72feedback. I thank you for your comments.
73
74bzip2's "home" is http://www.bzip.org/
75
76Julian Seward
77jseward@bzip.org
78Cambridge, UK.
79
8018 July 1996 (version 0.15)
8125 August 1996 (version 0.21)
82 7 August 1997 (bzip2, version 0.1)
8329 August 1997 (bzip2, version 0.1pl2)
8423 August 1998 (bzip2, version 0.9.0)
85 8 June 1999 (bzip2, version 0.9.5)
86 4 Sept 1999 (bzip2, version 0.9.5d)
87 5 May 2000 (bzip2, version 1.0pre8)
8830 December 2001 (bzip2, version 1.0.2pre1)
8915 February 2005 (bzip2, version 1.0.3)
9020 December 2006 (bzip2, version 1.0.4)
diff --git a/archival/bz/blocksort.c b/archival/bz/blocksort.c
deleted file mode 100644
index f70c3701d..000000000
--- a/archival/bz/blocksort.c
+++ /dev/null
@@ -1,1072 +0,0 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Block sorting machinery ---*/
9/*--- blocksort.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* #include "bzlib_private.h" */
27
28#define mswap(zz1, zz2) \
29{ \
30 int32_t zztmp = zz1; \
31 zz1 = zz2; \
32 zz2 = zztmp; \
33}
34
35static
36/* No measurable speed gain with inlining */
37/* ALWAYS_INLINE */
38void mvswap(uint32_t* ptr, int32_t zzp1, int32_t zzp2, int32_t zzn)
39{
40 while (zzn > 0) {
41 mswap(ptr[zzp1], ptr[zzp2]);
42 zzp1++;
43 zzp2++;
44 zzn--;
45 }
46}
47
48static
49ALWAYS_INLINE
50int32_t mmin(int32_t a, int32_t b)
51{
52 return (a < b) ? a : b;
53}
54
55
56/*---------------------------------------------*/
57/*--- Fallback O(N log(N)^2) sorting ---*/
58/*--- algorithm, for repetitive blocks ---*/
59/*---------------------------------------------*/
60
61/*---------------------------------------------*/
62static
63inline
64void fallbackSimpleSort(uint32_t* fmap,
65 uint32_t* eclass,
66 int32_t lo,
67 int32_t hi)
68{
69 int32_t i, j, tmp;
70 uint32_t ec_tmp;
71
72 if (lo == hi) return;
73
74 if (hi - lo > 3) {
75 for (i = hi-4; i >= lo; i--) {
76 tmp = fmap[i];
77 ec_tmp = eclass[tmp];
78 for (j = i+4; j <= hi && ec_tmp > eclass[fmap[j]]; j += 4)
79 fmap[j-4] = fmap[j];
80 fmap[j-4] = tmp;
81 }
82 }
83
84 for (i = hi-1; i >= lo; i--) {
85 tmp = fmap[i];
86 ec_tmp = eclass[tmp];
87 for (j = i+1; j <= hi && ec_tmp > eclass[fmap[j]]; j++)
88 fmap[j-1] = fmap[j];
89 fmap[j-1] = tmp;
90 }
91}
92
93
94/*---------------------------------------------*/
95#define fpush(lz,hz) { \
96 stackLo[sp] = lz; \
97 stackHi[sp] = hz; \
98 sp++; \
99}
100
101#define fpop(lz,hz) { \
102 sp--; \
103 lz = stackLo[sp]; \
104 hz = stackHi[sp]; \
105}
106
107#define FALLBACK_QSORT_SMALL_THRESH 10
108#define FALLBACK_QSORT_STACK_SIZE 100
109
110static
111void fallbackQSort3(uint32_t* fmap,
112 uint32_t* eclass,
113 int32_t loSt,
114 int32_t hiSt)
115{
116 int32_t unLo, unHi, ltLo, gtHi, n, m;
117 int32_t sp, lo, hi;
118 uint32_t med, r, r3;
119 int32_t stackLo[FALLBACK_QSORT_STACK_SIZE];
120 int32_t stackHi[FALLBACK_QSORT_STACK_SIZE];
121
122 r = 0;
123
124 sp = 0;
125 fpush(loSt, hiSt);
126
127 while (sp > 0) {
128 AssertH(sp < FALLBACK_QSORT_STACK_SIZE - 1, 1004);
129
130 fpop(lo, hi);
131 if (hi - lo < FALLBACK_QSORT_SMALL_THRESH) {
132 fallbackSimpleSort(fmap, eclass, lo, hi);
133 continue;
134 }
135
136 /* Random partitioning. Median of 3 sometimes fails to
137 * avoid bad cases. Median of 9 seems to help but
138 * looks rather expensive. This too seems to work but
139 * is cheaper. Guidance for the magic constants
140 * 7621 and 32768 is taken from Sedgewick's algorithms
141 * book, chapter 35.
142 */
143 r = ((r * 7621) + 1) % 32768;
144 r3 = r % 3;
145 if (r3 == 0)
146 med = eclass[fmap[lo]];
147 else if (r3 == 1)
148 med = eclass[fmap[(lo+hi)>>1]];
149 else
150 med = eclass[fmap[hi]];
151
152 unLo = ltLo = lo;
153 unHi = gtHi = hi;
154
155 while (1) {
156 while (1) {
157 if (unLo > unHi) break;
158 n = (int32_t)eclass[fmap[unLo]] - (int32_t)med;
159 if (n == 0) {
160 mswap(fmap[unLo], fmap[ltLo]);
161 ltLo++;
162 unLo++;
163 continue;
164 };
165 if (n > 0) break;
166 unLo++;
167 }
168 while (1) {
169 if (unLo > unHi) break;
170 n = (int32_t)eclass[fmap[unHi]] - (int32_t)med;
171 if (n == 0) {
172 mswap(fmap[unHi], fmap[gtHi]);
173 gtHi--; unHi--;
174 continue;
175 };
176 if (n < 0) break;
177 unHi--;
178 }
179 if (unLo > unHi) break;
180 mswap(fmap[unLo], fmap[unHi]); unLo++; unHi--;
181 }
182
183 AssertD(unHi == unLo-1, "fallbackQSort3(2)");
184
185 if (gtHi < ltLo) continue;
186
187 n = mmin(ltLo-lo, unLo-ltLo); mvswap(fmap, lo, unLo-n, n);
188 m = mmin(hi-gtHi, gtHi-unHi); mvswap(fmap, unLo, hi-m+1, m);
189
190 n = lo + unLo - ltLo - 1;
191 m = hi - (gtHi - unHi) + 1;
192
193 if (n - lo > hi - m) {
194 fpush(lo, n);
195 fpush(m, hi);
196 } else {
197 fpush(m, hi);
198 fpush(lo, n);
199 }
200 }
201}
202
203#undef fpush
204#undef fpop
205#undef FALLBACK_QSORT_SMALL_THRESH
206#undef FALLBACK_QSORT_STACK_SIZE
207
208
209/*---------------------------------------------*/
210/* Pre:
211 * nblock > 0
212 * eclass exists for [0 .. nblock-1]
213 * ((uint8_t*)eclass) [0 .. nblock-1] holds block
214 * ptr exists for [0 .. nblock-1]
215 *
216 * Post:
217 * ((uint8_t*)eclass) [0 .. nblock-1] holds block
218 * All other areas of eclass destroyed
219 * fmap [0 .. nblock-1] holds sorted order
220 * bhtab[0 .. 2+(nblock/32)] destroyed
221*/
222
223#define SET_BH(zz) bhtab[(zz) >> 5] |= (1 << ((zz) & 31))
224#define CLEAR_BH(zz) bhtab[(zz) >> 5] &= ~(1 << ((zz) & 31))
225#define ISSET_BH(zz) (bhtab[(zz) >> 5] & (1 << ((zz) & 31)))
226#define WORD_BH(zz) bhtab[(zz) >> 5]
227#define UNALIGNED_BH(zz) ((zz) & 0x01f)
228
229static
230void fallbackSort(uint32_t* fmap,
231 uint32_t* eclass,
232 uint32_t* bhtab,
233 int32_t nblock)
234{
235 int32_t ftab[257];
236 int32_t ftabCopy[256];
237 int32_t H, i, j, k, l, r, cc, cc1;
238 int32_t nNotDone;
239 int32_t nBhtab;
240 uint8_t* eclass8 = (uint8_t*)eclass;
241
242 /*
243 * Initial 1-char radix sort to generate
244 * initial fmap and initial BH bits.
245 */
246 for (i = 0; i < 257; i++) ftab[i] = 0;
247 for (i = 0; i < nblock; i++) ftab[eclass8[i]]++;
248 for (i = 0; i < 256; i++) ftabCopy[i] = ftab[i];
249
250 j = ftab[0]; /* bbox: optimized */
251 for (i = 1; i < 257; i++) {
252 j += ftab[i];
253 ftab[i] = j;
254 }
255
256 for (i = 0; i < nblock; i++) {
257 j = eclass8[i];
258 k = ftab[j] - 1;
259 ftab[j] = k;
260 fmap[k] = i;
261 }
262
263 nBhtab = 2 + ((uint32_t)nblock / 32); /* bbox: unsigned div is easier */
264 for (i = 0; i < nBhtab; i++) bhtab[i] = 0;
265 for (i = 0; i < 256; i++) SET_BH(ftab[i]);
266
267 /*
268 * Inductively refine the buckets. Kind-of an
269 * "exponential radix sort" (!), inspired by the
270 * Manber-Myers suffix array construction algorithm.
271 */
272
273 /*-- set sentinel bits for block-end detection --*/
274 for (i = 0; i < 32; i++) {
275 SET_BH(nblock + 2*i);
276 CLEAR_BH(nblock + 2*i + 1);
277 }
278
279 /*-- the log(N) loop --*/
280 H = 1;
281 while (1) {
282 j = 0;
283 for (i = 0; i < nblock; i++) {
284 if (ISSET_BH(i))
285 j = i;
286 k = fmap[i] - H;
287 if (k < 0)
288 k += nblock;
289 eclass[k] = j;
290 }
291
292 nNotDone = 0;
293 r = -1;
294 while (1) {
295
296 /*-- find the next non-singleton bucket --*/
297 k = r + 1;
298 while (ISSET_BH(k) && UNALIGNED_BH(k))
299 k++;
300 if (ISSET_BH(k)) {
301 while (WORD_BH(k) == 0xffffffff) k += 32;
302 while (ISSET_BH(k)) k++;
303 }
304 l = k - 1;
305 if (l >= nblock)
306 break;
307 while (!ISSET_BH(k) && UNALIGNED_BH(k))
308 k++;
309 if (!ISSET_BH(k)) {
310 while (WORD_BH(k) == 0x00000000) k += 32;
311 while (!ISSET_BH(k)) k++;
312 }
313 r = k - 1;
314 if (r >= nblock)
315 break;
316
317 /*-- now [l, r] bracket current bucket --*/
318 if (r > l) {
319 nNotDone += (r - l + 1);
320 fallbackQSort3(fmap, eclass, l, r);
321
322 /*-- scan bucket and generate header bits-- */
323 cc = -1;
324 for (i = l; i <= r; i++) {
325 cc1 = eclass[fmap[i]];
326 if (cc != cc1) {
327 SET_BH(i);
328 cc = cc1;
329 };
330 }
331 }
332 }
333
334 H *= 2;
335 if (H > nblock || nNotDone == 0)
336 break;
337 }
338
339 /*
340 * Reconstruct the original block in
341 * eclass8 [0 .. nblock-1], since the
342 * previous phase destroyed it.
343 */
344 j = 0;
345 for (i = 0; i < nblock; i++) {
346 while (ftabCopy[j] == 0)
347 j++;
348 ftabCopy[j]--;
349 eclass8[fmap[i]] = (uint8_t)j;
350 }
351 AssertH(j < 256, 1005);
352}
353
354#undef SET_BH
355#undef CLEAR_BH
356#undef ISSET_BH
357#undef WORD_BH
358#undef UNALIGNED_BH
359
360
361/*---------------------------------------------*/
362/*--- The main, O(N^2 log(N)) sorting ---*/
363/*--- algorithm. Faster for "normal" ---*/
364/*--- non-repetitive blocks. ---*/
365/*---------------------------------------------*/
366
367/*---------------------------------------------*/
368static
369NOINLINE
370int mainGtU(
371 uint32_t i1,
372 uint32_t i2,
373 uint8_t* block,
374 uint16_t* quadrant,
375 uint32_t nblock,
376 int32_t* budget)
377{
378 int32_t k;
379 uint8_t c1, c2;
380 uint16_t s1, s2;
381
382/* Loop unrolling here is actually very useful
383 * (generated code is much simpler),
384 * code size increase is only 270 bytes (i386)
385 * but speeds up compression 10% overall
386 */
387
388#if CONFIG_BZIP2_FEATURE_SPEED >= 1
389
390#define TIMES_8(code) \
391 code; code; code; code; \
392 code; code; code; code;
393#define TIMES_12(code) \
394 code; code; code; code; \
395 code; code; code; code; \
396 code; code; code; code;
397
398#else
399
400#define TIMES_8(code) \
401{ \
402 int nn = 8; \
403 do { \
404 code; \
405 } while (--nn); \
406}
407#define TIMES_12(code) \
408{ \
409 int nn = 12; \
410 do { \
411 code; \
412 } while (--nn); \
413}
414
415#endif
416
417 AssertD(i1 != i2, "mainGtU");
418 TIMES_12(
419 c1 = block[i1]; c2 = block[i2];
420 if (c1 != c2) return (c1 > c2);
421 i1++; i2++;
422 )
423
424 k = nblock + 8;
425
426 do {
427 TIMES_8(
428 c1 = block[i1]; c2 = block[i2];
429 if (c1 != c2) return (c1 > c2);
430 s1 = quadrant[i1]; s2 = quadrant[i2];
431 if (s1 != s2) return (s1 > s2);
432 i1++; i2++;
433 )
434
435 if (i1 >= nblock) i1 -= nblock;
436 if (i2 >= nblock) i2 -= nblock;
437
438 (*budget)--;
439 k -= 8;
440 } while (k >= 0);
441
442 return False;
443}
444#undef TIMES_8
445#undef TIMES_12
446
447/*---------------------------------------------*/
448/*
449 * Knuth's increments seem to work better
450 * than Incerpi-Sedgewick here. Possibly
451 * because the number of elems to sort is
452 * usually small, typically <= 20.
453 */
454static
455const int32_t incs[14] = {
456 1, 4, 13, 40, 121, 364, 1093, 3280,
457 9841, 29524, 88573, 265720,
458 797161, 2391484
459};
460
461static
462void mainSimpleSort(uint32_t* ptr,
463 uint8_t* block,
464 uint16_t* quadrant,
465 int32_t nblock,
466 int32_t lo,
467 int32_t hi,
468 int32_t d,
469 int32_t* budget)
470{
471 int32_t i, j, h, bigN, hp;
472 uint32_t v;
473
474 bigN = hi - lo + 1;
475 if (bigN < 2) return;
476
477 hp = 0;
478 while (incs[hp] < bigN) hp++;
479 hp--;
480
481 for (; hp >= 0; hp--) {
482 h = incs[hp];
483
484 i = lo + h;
485 while (1) {
486 /*-- copy 1 --*/
487 if (i > hi) break;
488 v = ptr[i];
489 j = i;
490 while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) {
491 ptr[j] = ptr[j-h];
492 j = j - h;
493 if (j <= (lo + h - 1)) break;
494 }
495 ptr[j] = v;
496 i++;
497
498/* 1.5% overall speedup, +290 bytes */
499#if CONFIG_BZIP2_FEATURE_SPEED >= 3
500 /*-- copy 2 --*/
501 if (i > hi) break;
502 v = ptr[i];
503 j = i;
504 while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) {
505 ptr[j] = ptr[j-h];
506 j = j - h;
507 if (j <= (lo + h - 1)) break;
508 }
509 ptr[j] = v;
510 i++;
511
512 /*-- copy 3 --*/
513 if (i > hi) break;
514 v = ptr[i];
515 j = i;
516 while (mainGtU(ptr[j-h]+d, v+d, block, quadrant, nblock, budget)) {
517 ptr[j] = ptr[j-h];
518 j = j - h;
519 if (j <= (lo + h - 1)) break;
520 }
521 ptr[j] = v;
522 i++;
523#endif
524 if (*budget < 0) return;
525 }
526 }
527}
528
529
530/*---------------------------------------------*/
531/*
532 * The following is an implementation of
533 * an elegant 3-way quicksort for strings,
534 * described in a paper "Fast Algorithms for
535 * Sorting and Searching Strings", by Robert
536 * Sedgewick and Jon L. Bentley.
537 */
538
539static
540ALWAYS_INLINE
541uint8_t mmed3(uint8_t a, uint8_t b, uint8_t c)
542{
543 uint8_t t;
544 if (a > b) {
545 t = a;
546 a = b;
547 b = t;
548 };
549 /* here b >= a */
550 if (b > c) {
551 b = c;
552 if (a > b)
553 b = a;
554 }
555 return b;
556}
557
558#define mpush(lz,hz,dz) \
559{ \
560 stackLo[sp] = lz; \
561 stackHi[sp] = hz; \
562 stackD [sp] = dz; \
563 sp++; \
564}
565
566#define mpop(lz,hz,dz) \
567{ \
568 sp--; \
569 lz = stackLo[sp]; \
570 hz = stackHi[sp]; \
571 dz = stackD [sp]; \
572}
573
574#define mnextsize(az) (nextHi[az] - nextLo[az])
575
576#define mnextswap(az,bz) \
577{ \
578 int32_t tz; \
579 tz = nextLo[az]; nextLo[az] = nextLo[bz]; nextLo[bz] = tz; \
580 tz = nextHi[az]; nextHi[az] = nextHi[bz]; nextHi[bz] = tz; \
581 tz = nextD [az]; nextD [az] = nextD [bz]; nextD [bz] = tz; \
582}
583
584#define MAIN_QSORT_SMALL_THRESH 20
585#define MAIN_QSORT_DEPTH_THRESH (BZ_N_RADIX + BZ_N_QSORT)
586#define MAIN_QSORT_STACK_SIZE 100
587
588static NOINLINE
589void mainQSort3(uint32_t* ptr,
590 uint8_t* block,
591 uint16_t* quadrant,
592 int32_t nblock,
593 int32_t loSt,
594 int32_t hiSt,
595 int32_t dSt,
596 int32_t* budget)
597{
598 int32_t unLo, unHi, ltLo, gtHi, n, m, med;
599 int32_t sp, lo, hi, d;
600
601 int32_t stackLo[MAIN_QSORT_STACK_SIZE];
602 int32_t stackHi[MAIN_QSORT_STACK_SIZE];
603 int32_t stackD [MAIN_QSORT_STACK_SIZE];
604
605 int32_t nextLo[3];
606 int32_t nextHi[3];
607 int32_t nextD [3];
608
609 sp = 0;
610 mpush(loSt, hiSt, dSt);
611
612 while (sp > 0) {
613 AssertH(sp < MAIN_QSORT_STACK_SIZE - 2, 1001);
614
615 mpop(lo, hi, d);
616 if (hi - lo < MAIN_QSORT_SMALL_THRESH
617 || d > MAIN_QSORT_DEPTH_THRESH
618 ) {
619 mainSimpleSort(ptr, block, quadrant, nblock, lo, hi, d, budget);
620 if (*budget < 0)
621 return;
622 continue;
623 }
624 med = (int32_t) mmed3(block[ptr[lo ] + d],
625 block[ptr[hi ] + d],
626 block[ptr[(lo+hi) >> 1] + d]);
627
628 unLo = ltLo = lo;
629 unHi = gtHi = hi;
630
631 while (1) {
632 while (1) {
633 if (unLo > unHi)
634 break;
635 n = ((int32_t)block[ptr[unLo]+d]) - med;
636 if (n == 0) {
637 mswap(ptr[unLo], ptr[ltLo]);
638 ltLo++;
639 unLo++;
640 continue;
641 };
642 if (n > 0) break;
643 unLo++;
644 }
645 while (1) {
646 if (unLo > unHi)
647 break;
648 n = ((int32_t)block[ptr[unHi]+d]) - med;
649 if (n == 0) {
650 mswap(ptr[unHi], ptr[gtHi]);
651 gtHi--;
652 unHi--;
653 continue;
654 };
655 if (n < 0) break;
656 unHi--;
657 }
658 if (unLo > unHi)
659 break;
660 mswap(ptr[unLo], ptr[unHi]);
661 unLo++;
662 unHi--;
663 }
664
665 AssertD(unHi == unLo-1, "mainQSort3(2)");
666
667 if (gtHi < ltLo) {
668 mpush(lo, hi, d + 1);
669 continue;
670 }
671
672 n = mmin(ltLo-lo, unLo-ltLo); mvswap(ptr, lo, unLo-n, n);
673 m = mmin(hi-gtHi, gtHi-unHi); mvswap(ptr, unLo, hi-m+1, m);
674
675 n = lo + unLo - ltLo - 1;
676 m = hi - (gtHi - unHi) + 1;
677
678 nextLo[0] = lo; nextHi[0] = n; nextD[0] = d;
679 nextLo[1] = m; nextHi[1] = hi; nextD[1] = d;
680 nextLo[2] = n+1; nextHi[2] = m-1; nextD[2] = d+1;
681
682 if (mnextsize(0) < mnextsize(1)) mnextswap(0, 1);
683 if (mnextsize(1) < mnextsize(2)) mnextswap(1, 2);
684 if (mnextsize(0) < mnextsize(1)) mnextswap(0, 1);
685
686 AssertD (mnextsize(0) >= mnextsize(1), "mainQSort3(8)");
687 AssertD (mnextsize(1) >= mnextsize(2), "mainQSort3(9)");
688
689 mpush(nextLo[0], nextHi[0], nextD[0]);
690 mpush(nextLo[1], nextHi[1], nextD[1]);
691 mpush(nextLo[2], nextHi[2], nextD[2]);
692 }
693}
694
695#undef mpush
696#undef mpop
697#undef mnextsize
698#undef mnextswap
699#undef MAIN_QSORT_SMALL_THRESH
700#undef MAIN_QSORT_DEPTH_THRESH
701#undef MAIN_QSORT_STACK_SIZE
702
703
704/*---------------------------------------------*/
705/* Pre:
706 * nblock > N_OVERSHOOT
707 * block32 exists for [0 .. nblock-1 +N_OVERSHOOT]
708 * ((uint8_t*)block32) [0 .. nblock-1] holds block
709 * ptr exists for [0 .. nblock-1]
710 *
711 * Post:
712 * ((uint8_t*)block32) [0 .. nblock-1] holds block
713 * All other areas of block32 destroyed
714 * ftab[0 .. 65536] destroyed
715 * ptr [0 .. nblock-1] holds sorted order
716 * if (*budget < 0), sorting was abandoned
717 */
718
719#define BIGFREQ(b) (ftab[((b)+1) << 8] - ftab[(b) << 8])
720#define SETMASK (1 << 21)
721#define CLEARMASK (~(SETMASK))
722
723static NOINLINE
724void mainSort(EState* state,
725 uint32_t* ptr,
726 uint8_t* block,
727 uint16_t* quadrant,
728 uint32_t* ftab,
729 int32_t nblock,
730 int32_t* budget)
731{
732 int32_t i, j, k, ss, sb;
733 uint8_t c1;
734 int32_t numQSorted;
735 uint16_t s;
736 Bool bigDone[256];
737 /* bbox: moved to EState to save stack
738 int32_t runningOrder[256];
739 int32_t copyStart[256];
740 int32_t copyEnd [256];
741 */
742#define runningOrder (state->mainSort__runningOrder)
743#define copyStart (state->mainSort__copyStart)
744#define copyEnd (state->mainSort__copyEnd)
745
746 /*-- set up the 2-byte frequency table --*/
747 /* was: for (i = 65536; i >= 0; i--) ftab[i] = 0; */
748 memset(ftab, 0, 65537 * sizeof(ftab[0]));
749
750 j = block[0] << 8;
751 i = nblock - 1;
752/* 3%, +300 bytes */
753#if CONFIG_BZIP2_FEATURE_SPEED >= 2
754 for (; i >= 3; i -= 4) {
755 quadrant[i] = 0;
756 j = (j >> 8) | (((uint16_t)block[i]) << 8);
757 ftab[j]++;
758 quadrant[i-1] = 0;
759 j = (j >> 8) | (((uint16_t)block[i-1]) << 8);
760 ftab[j]++;
761 quadrant[i-2] = 0;
762 j = (j >> 8) | (((uint16_t)block[i-2]) << 8);
763 ftab[j]++;
764 quadrant[i-3] = 0;
765 j = (j >> 8) | (((uint16_t)block[i-3]) << 8);
766 ftab[j]++;
767 }
768#endif
769 for (; i >= 0; i--) {
770 quadrant[i] = 0;
771 j = (j >> 8) | (((uint16_t)block[i]) << 8);
772 ftab[j]++;
773 }
774
775 /*-- (emphasises close relationship of block & quadrant) --*/
776 for (i = 0; i < BZ_N_OVERSHOOT; i++) {
777 block [nblock+i] = block[i];
778 quadrant[nblock+i] = 0;
779 }
780
781 /*-- Complete the initial radix sort --*/
782 j = ftab[0]; /* bbox: optimized */
783 for (i = 1; i <= 65536; i++) {
784 j += ftab[i];
785 ftab[i] = j;
786 }
787
788 s = block[0] << 8;
789 i = nblock - 1;
790#if CONFIG_BZIP2_FEATURE_SPEED >= 2
791 for (; i >= 3; i -= 4) {
792 s = (s >> 8) | (block[i] << 8);
793 j = ftab[s] - 1;
794 ftab[s] = j;
795 ptr[j] = i;
796 s = (s >> 8) | (block[i-1] << 8);
797 j = ftab[s] - 1;
798 ftab[s] = j;
799 ptr[j] = i-1;
800 s = (s >> 8) | (block[i-2] << 8);
801 j = ftab[s] - 1;
802 ftab[s] = j;
803 ptr[j] = i-2;
804 s = (s >> 8) | (block[i-3] << 8);
805 j = ftab[s] - 1;
806 ftab[s] = j;
807 ptr[j] = i-3;
808 }
809#endif
810 for (; i >= 0; i--) {
811 s = (s >> 8) | (block[i] << 8);
812 j = ftab[s] - 1;
813 ftab[s] = j;
814 ptr[j] = i;
815 }
816
817 /*
818 * Now ftab contains the first loc of every small bucket.
819 * Calculate the running order, from smallest to largest
820 * big bucket.
821 */
822 for (i = 0; i <= 255; i++) {
823 bigDone [i] = False;
824 runningOrder[i] = i;
825 }
826
827 {
828 int32_t vv;
829 /* bbox: was: int32_t h = 1; */
830 /* do h = 3 * h + 1; while (h <= 256); */
831 uint32_t h = 364;
832
833 do {
834 /*h = h / 3;*/
835 h = (h * 171) >> 9; /* bbox: fast h/3 */
836 for (i = h; i <= 255; i++) {
837 vv = runningOrder[i];
838 j = i;
839 while (BIGFREQ(runningOrder[j-h]) > BIGFREQ(vv)) {
840 runningOrder[j] = runningOrder[j-h];
841 j = j - h;
842 if (j <= (h - 1))
843 goto zero;
844 }
845 zero:
846 runningOrder[j] = vv;
847 }
848 } while (h != 1);
849 }
850
851 /*
852 * The main sorting loop.
853 */
854
855 numQSorted = 0;
856
857 for (i = 0; i <= 255; i++) {
858
859 /*
860 * Process big buckets, starting with the least full.
861 * Basically this is a 3-step process in which we call
862 * mainQSort3 to sort the small buckets [ss, j], but
863 * also make a big effort to avoid the calls if we can.
864 */
865 ss = runningOrder[i];
866
867 /*
868 * Step 1:
869 * Complete the big bucket [ss] by quicksorting
870 * any unsorted small buckets [ss, j], for j != ss.
871 * Hopefully previous pointer-scanning phases have already
872 * completed many of the small buckets [ss, j], so
873 * we don't have to sort them at all.
874 */
875 for (j = 0; j <= 255; j++) {
876 if (j != ss) {
877 sb = (ss << 8) + j;
878 if (!(ftab[sb] & SETMASK)) {
879 int32_t lo = ftab[sb] & CLEARMASK;
880 int32_t hi = (ftab[sb+1] & CLEARMASK) - 1;
881 if (hi > lo) {
882 mainQSort3(
883 ptr, block, quadrant, nblock,
884 lo, hi, BZ_N_RADIX, budget
885 );
886 if (*budget < 0) return;
887 numQSorted += (hi - lo + 1);
888 }
889 }
890 ftab[sb] |= SETMASK;
891 }
892 }
893
894 AssertH(!bigDone[ss], 1006);
895
896 /*
897 * Step 2:
898 * Now scan this big bucket [ss] so as to synthesise the
899 * sorted order for small buckets [t, ss] for all t,
900 * including, magically, the bucket [ss,ss] too.
901 * This will avoid doing Real Work in subsequent Step 1's.
902 */
903 {
904 for (j = 0; j <= 255; j++) {
905 copyStart[j] = ftab[(j << 8) + ss] & CLEARMASK;
906 copyEnd [j] = (ftab[(j << 8) + ss + 1] & CLEARMASK) - 1;
907 }
908 for (j = ftab[ss << 8] & CLEARMASK; j < copyStart[ss]; j++) {
909 k = ptr[j] - 1;
910 if (k < 0)
911 k += nblock;
912 c1 = block[k];
913 if (!bigDone[c1])
914 ptr[copyStart[c1]++] = k;
915 }
916 for (j = (ftab[(ss+1) << 8] & CLEARMASK) - 1; j > copyEnd[ss]; j--) {
917 k = ptr[j]-1;
918 if (k < 0)
919 k += nblock;
920 c1 = block[k];
921 if (!bigDone[c1])
922 ptr[copyEnd[c1]--] = k;
923 }
924 }
925
926 /* Extremely rare case missing in bzip2-1.0.0 and 1.0.1.
927 * Necessity for this case is demonstrated by compressing
928 * a sequence of approximately 48.5 million of character
929 * 251; 1.0.0/1.0.1 will then die here. */
930 AssertH((copyStart[ss]-1 == copyEnd[ss]) \
931 || (copyStart[ss] == 0 && copyEnd[ss] == nblock-1), 1007);
932
933 for (j = 0; j <= 255; j++)
934 ftab[(j << 8) + ss] |= SETMASK;
935
936 /*
937 * Step 3:
938 * The [ss] big bucket is now done. Record this fact,
939 * and update the quadrant descriptors. Remember to
940 * update quadrants in the overshoot area too, if
941 * necessary. The "if (i < 255)" test merely skips
942 * this updating for the last bucket processed, since
943 * updating for the last bucket is pointless.
944 *
945 * The quadrant array provides a way to incrementally
946 * cache sort orderings, as they appear, so as to
947 * make subsequent comparisons in fullGtU() complete
948 * faster. For repetitive blocks this makes a big
949 * difference (but not big enough to be able to avoid
950 * the fallback sorting mechanism, exponential radix sort).
951 *
952 * The precise meaning is: at all times:
953 *
954 * for 0 <= i < nblock and 0 <= j <= nblock
955 *
956 * if block[i] != block[j],
957 *
958 * then the relative values of quadrant[i] and
959 * quadrant[j] are meaningless.
960 *
961 * else {
962 * if quadrant[i] < quadrant[j]
963 * then the string starting at i lexicographically
964 * precedes the string starting at j
965 *
966 * else if quadrant[i] > quadrant[j]
967 * then the string starting at j lexicographically
968 * precedes the string starting at i
969 *
970 * else
971 * the relative ordering of the strings starting
972 * at i and j has not yet been determined.
973 * }
974 */
975 bigDone[ss] = True;
976
977 if (i < 255) {
978 int32_t bbStart = ftab[ss << 8] & CLEARMASK;
979 int32_t bbSize = (ftab[(ss+1) << 8] & CLEARMASK) - bbStart;
980 int32_t shifts = 0;
981
982 while ((bbSize >> shifts) > 65534) shifts++;
983
984 for (j = bbSize-1; j >= 0; j--) {
985 int32_t a2update = ptr[bbStart + j];
986 uint16_t qVal = (uint16_t)(j >> shifts);
987 quadrant[a2update] = qVal;
988 if (a2update < BZ_N_OVERSHOOT)
989 quadrant[a2update + nblock] = qVal;
990 }
991 AssertH(((bbSize-1) >> shifts) <= 65535, 1002);
992 }
993 }
994#undef runningOrder
995#undef copyStart
996#undef copyEnd
997}
998
999#undef BIGFREQ
1000#undef SETMASK
1001#undef CLEARMASK
1002
1003
1004/*---------------------------------------------*/
1005/* Pre:
1006 * nblock > 0
1007 * arr2 exists for [0 .. nblock-1 +N_OVERSHOOT]
1008 * ((uint8_t*)arr2)[0 .. nblock-1] holds block
1009 * arr1 exists for [0 .. nblock-1]
1010 *
1011 * Post:
1012 * ((uint8_t*)arr2) [0 .. nblock-1] holds block
1013 * All other areas of block destroyed
1014 * ftab[0 .. 65536] destroyed
1015 * arr1[0 .. nblock-1] holds sorted order
1016 */
1017static NOINLINE
1018void BZ2_blockSort(EState* s)
1019{
1020 /* In original bzip2 1.0.4, it's a parameter, but 30
1021 * (which was the default) should work ok. */
1022 enum { wfact = 30 };
1023
1024 uint32_t* ptr = s->ptr;
1025 uint8_t* block = s->block;
1026 uint32_t* ftab = s->ftab;
1027 int32_t nblock = s->nblock;
1028 uint16_t* quadrant;
1029 int32_t budget;
1030 int32_t i;
1031
1032 if (nblock < 10000) {
1033 fallbackSort(s->arr1, s->arr2, ftab, nblock);
1034 } else {
1035 /* Calculate the location for quadrant, remembering to get
1036 * the alignment right. Assumes that &(block[0]) is at least
1037 * 2-byte aligned -- this should be ok since block is really
1038 * the first section of arr2.
1039 */
1040 i = nblock + BZ_N_OVERSHOOT;
1041 if (i & 1) i++;
1042 quadrant = (uint16_t*)(&(block[i]));
1043
1044 /* (wfact-1) / 3 puts the default-factor-30
1045 * transition point at very roughly the same place as
1046 * with v0.1 and v0.9.0.
1047 * Not that it particularly matters any more, since the
1048 * resulting compressed stream is now the same regardless
1049 * of whether or not we use the main sort or fallback sort.
1050 */
1051 budget = nblock * ((wfact-1) / 3);
1052
1053 mainSort(s, ptr, block, quadrant, ftab, nblock, &budget);
1054 if (budget < 0) {
1055 fallbackSort(s->arr1, s->arr2, ftab, nblock);
1056 }
1057 }
1058
1059 s->origPtr = -1;
1060 for (i = 0; i < s->nblock; i++)
1061 if (ptr[i] == 0) {
1062 s->origPtr = i;
1063 break;
1064 };
1065
1066 AssertH(s->origPtr != -1, 1003);
1067}
1068
1069
1070/*-------------------------------------------------------------*/
1071/*--- end blocksort.c ---*/
1072/*-------------------------------------------------------------*/
diff --git a/archival/bz/bzlib.c b/archival/bz/bzlib.c
deleted file mode 100644
index b3beeabed..000000000
--- a/archival/bz/bzlib.c
+++ /dev/null
@@ -1,431 +0,0 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Library top-level functions. ---*/
9/*--- bzlib.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- made zero-length BZ_FLUSH work correctly in bzCompress().
30 * fixed bzWrite/bzRead to ignore zero-length requests.
31 * fixed bzread to correctly handle read requests after EOF.
32 * wrong parameter order in call to bzDecompressInit in
33 * bzBuffToBuffDecompress. Fixed.
34 */
35
36/* #include "bzlib_private.h" */
37
38/*---------------------------------------------------*/
39/*--- Compression stuff ---*/
40/*---------------------------------------------------*/
41
42/*---------------------------------------------------*/
43#if BZ_LIGHT_DEBUG
44static
45void bz_assert_fail(int errcode)
46{
47 /* if (errcode == 1007) bb_error_msg_and_die("probably bad RAM"); */
48 bb_error_msg_and_die("internal error %d", errcode);
49}
50#endif
51
52/*---------------------------------------------------*/
53static
54void prepare_new_block(EState* s)
55{
56 int i;
57 s->nblock = 0;
58 s->numZ = 0;
59 s->state_out_pos = 0;
60 BZ_INITIALISE_CRC(s->blockCRC);
61 /* inlined memset would be nice to have here */
62 for (i = 0; i < 256; i++)
63 s->inUse[i] = 0;
64 s->blockNo++;
65}
66
67
68/*---------------------------------------------------*/
69static
70ALWAYS_INLINE
71void init_RL(EState* s)
72{
73 s->state_in_ch = 256;
74 s->state_in_len = 0;
75}
76
77
78static
79int isempty_RL(EState* s)
80{
81 return (s->state_in_ch >= 256 || s->state_in_len <= 0);
82}
83
84
85/*---------------------------------------------------*/
86static
87void BZ2_bzCompressInit(bz_stream *strm, int blockSize100k)
88{
89 int32_t n;
90 EState* s;
91
92 s = xzalloc(sizeof(EState));
93 s->strm = strm;
94
95 n = 100000 * blockSize100k;
96 s->arr1 = xmalloc(n * sizeof(uint32_t));
97 s->mtfv = (uint16_t*)s->arr1;
98 s->ptr = (uint32_t*)s->arr1;
99 s->arr2 = xmalloc((n + BZ_N_OVERSHOOT) * sizeof(uint32_t));
100 s->block = (uint8_t*)s->arr2;
101 s->ftab = xmalloc(65537 * sizeof(uint32_t));
102
103 s->crc32table = crc32_filltable(NULL, 1);
104
105 s->state = BZ_S_INPUT;
106 s->mode = BZ_M_RUNNING;
107 s->blockSize100k = blockSize100k;
108 s->nblockMAX = n - 19;
109
110 strm->state = s;
111 /*strm->total_in = 0;*/
112 strm->total_out = 0;
113 init_RL(s);
114 prepare_new_block(s);
115}
116
117
118/*---------------------------------------------------*/
119static
120void add_pair_to_block(EState* s)
121{
122 int32_t i;
123 uint8_t ch = (uint8_t)(s->state_in_ch);
124 for (i = 0; i < s->state_in_len; i++) {
125 BZ_UPDATE_CRC(s, s->blockCRC, ch);
126 }
127 s->inUse[s->state_in_ch] = 1;
128 switch (s->state_in_len) {
129 case 3:
130 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
131 /* fall through */
132 case 2:
133 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
134 /* fall through */
135 case 1:
136 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
137 break;
138 default:
139 s->inUse[s->state_in_len - 4] = 1;
140 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
141 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
142 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
143 s->block[s->nblock] = (uint8_t)ch; s->nblock++;
144 s->block[s->nblock] = (uint8_t)(s->state_in_len - 4);
145 s->nblock++;
146 break;
147 }
148}
149
150
151/*---------------------------------------------------*/
152static
153void flush_RL(EState* s)
154{
155 if (s->state_in_ch < 256) add_pair_to_block(s);
156 init_RL(s);
157}
158
159
160/*---------------------------------------------------*/
161#define ADD_CHAR_TO_BLOCK(zs, zchh0) \
162{ \
163 uint32_t zchh = (uint32_t)(zchh0); \
164 /*-- fast track the common case --*/ \
165 if (zchh != zs->state_in_ch && zs->state_in_len == 1) { \
166 uint8_t ch = (uint8_t)(zs->state_in_ch); \
167 BZ_UPDATE_CRC(zs, zs->blockCRC, ch); \
168 zs->inUse[zs->state_in_ch] = 1; \
169 zs->block[zs->nblock] = (uint8_t)ch; \
170 zs->nblock++; \
171 zs->state_in_ch = zchh; \
172 } \
173 else \
174 /*-- general, uncommon cases --*/ \
175 if (zchh != zs->state_in_ch || zs->state_in_len == 255) { \
176 if (zs->state_in_ch < 256) \
177 add_pair_to_block(zs); \
178 zs->state_in_ch = zchh; \
179 zs->state_in_len = 1; \
180 } else { \
181 zs->state_in_len++; \
182 } \
183}
184
185
186/*---------------------------------------------------*/
187static
188void /*Bool*/ copy_input_until_stop(EState* s)
189{
190 /*Bool progress_in = False;*/
191
192#ifdef SAME_CODE_AS_BELOW
193 if (s->mode == BZ_M_RUNNING) {
194 /*-- fast track the common case --*/
195 while (1) {
196 /*-- no input? --*/
197 if (s->strm->avail_in == 0) break;
198 /*-- block full? --*/
199 if (s->nblock >= s->nblockMAX) break;
200 /*progress_in = True;*/
201 ADD_CHAR_TO_BLOCK(s, (uint32_t)(*(uint8_t*)(s->strm->next_in)));
202 s->strm->next_in++;
203 s->strm->avail_in--;
204 /*s->strm->total_in++;*/
205 }
206 } else
207#endif
208 {
209 /*-- general, uncommon case --*/
210 while (1) {
211 /*-- no input? --*/
212 if (s->strm->avail_in == 0) break;
213 /*-- block full? --*/
214 if (s->nblock >= s->nblockMAX) break;
215 //# /*-- flush/finish end? --*/
216 //# if (s->avail_in_expect == 0) break;
217 /*progress_in = True;*/
218 ADD_CHAR_TO_BLOCK(s, *(uint8_t*)(s->strm->next_in));
219 s->strm->next_in++;
220 s->strm->avail_in--;
221 /*s->strm->total_in++;*/
222 //# s->avail_in_expect--;
223 }
224 }
225 /*return progress_in;*/
226}
227
228
229/*---------------------------------------------------*/
230static
231void /*Bool*/ copy_output_until_stop(EState* s)
232{
233 /*Bool progress_out = False;*/
234
235 while (1) {
236 /*-- no output space? --*/
237 if (s->strm->avail_out == 0) break;
238
239 /*-- block done? --*/
240 if (s->state_out_pos >= s->numZ) break;
241
242 /*progress_out = True;*/
243 *(s->strm->next_out) = s->zbits[s->state_out_pos];
244 s->state_out_pos++;
245 s->strm->avail_out--;
246 s->strm->next_out++;
247 s->strm->total_out++;
248 }
249 /*return progress_out;*/
250}
251
252
253/*---------------------------------------------------*/
254static
255void /*Bool*/ handle_compress(bz_stream *strm)
256{
257 /*Bool progress_in = False;*/
258 /*Bool progress_out = False;*/
259 EState* s = strm->state;
260
261 while (1) {
262 if (s->state == BZ_S_OUTPUT) {
263 /*progress_out |=*/ copy_output_until_stop(s);
264 if (s->state_out_pos < s->numZ) break;
265 if (s->mode == BZ_M_FINISHING
266 //# && s->avail_in_expect == 0
267 && s->strm->avail_in == 0
268 && isempty_RL(s))
269 break;
270 prepare_new_block(s);
271 s->state = BZ_S_INPUT;
272#ifdef FLUSH_IS_UNUSED
273 if (s->mode == BZ_M_FLUSHING
274 && s->avail_in_expect == 0
275 && isempty_RL(s))
276 break;
277#endif
278 }
279
280 if (s->state == BZ_S_INPUT) {
281 /*progress_in |=*/ copy_input_until_stop(s);
282 //#if (s->mode != BZ_M_RUNNING && s->avail_in_expect == 0) {
283 if (s->mode != BZ_M_RUNNING && s->strm->avail_in == 0) {
284 flush_RL(s);
285 BZ2_compressBlock(s, (s->mode == BZ_M_FINISHING));
286 s->state = BZ_S_OUTPUT;
287 } else
288 if (s->nblock >= s->nblockMAX) {
289 BZ2_compressBlock(s, 0);
290 s->state = BZ_S_OUTPUT;
291 } else
292 if (s->strm->avail_in == 0) {
293 break;
294 }
295 }
296 }
297
298 /*return progress_in || progress_out;*/
299}
300
301
302/*---------------------------------------------------*/
303static
304int BZ2_bzCompress(bz_stream *strm, int action)
305{
306 /*Bool progress;*/
307 EState* s;
308
309 s = strm->state;
310
311 switch (s->mode) {
312 case BZ_M_RUNNING:
313 if (action == BZ_RUN) {
314 /*progress =*/ handle_compress(strm);
315 /*return progress ? BZ_RUN_OK : BZ_PARAM_ERROR;*/
316 return BZ_RUN_OK;
317 }
318#ifdef FLUSH_IS_UNUSED
319 else
320 if (action == BZ_FLUSH) {
321 //#s->avail_in_expect = strm->avail_in;
322 s->mode = BZ_M_FLUSHING;
323 goto case_BZ_M_FLUSHING;
324 }
325#endif
326 else
327 /*if (action == BZ_FINISH)*/ {
328 //#s->avail_in_expect = strm->avail_in;
329 s->mode = BZ_M_FINISHING;
330 goto case_BZ_M_FINISHING;
331 }
332
333#ifdef FLUSH_IS_UNUSED
334 case_BZ_M_FLUSHING:
335 case BZ_M_FLUSHING:
336 /*if (s->avail_in_expect != s->strm->avail_in)
337 return BZ_SEQUENCE_ERROR;*/
338 /*progress =*/ handle_compress(strm);
339 if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ)
340 return BZ_FLUSH_OK;
341 s->mode = BZ_M_RUNNING;
342 return BZ_RUN_OK;
343#endif
344
345 case_BZ_M_FINISHING:
346 /*case BZ_M_FINISHING:*/
347 default:
348 /*if (s->avail_in_expect != s->strm->avail_in)
349 return BZ_SEQUENCE_ERROR;*/
350 /*progress =*/ handle_compress(strm);
351 /*if (!progress) return BZ_SEQUENCE_ERROR;*/
352 //#if (s->avail_in_expect > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ)
353 //# return BZ_FINISH_OK;
354 if (s->strm->avail_in > 0 || !isempty_RL(s) || s->state_out_pos < s->numZ)
355 return BZ_FINISH_OK;
356 /*s->mode = BZ_M_IDLE;*/
357 return BZ_STREAM_END;
358 }
359 /* return BZ_OK; --not reached--*/
360}
361
362
363/*---------------------------------------------------*/
364#if ENABLE_FEATURE_CLEAN_UP
365static
366void BZ2_bzCompressEnd(bz_stream *strm)
367{
368 EState* s;
369
370 s = strm->state;
371 free(s->arr1);
372 free(s->arr2);
373 free(s->ftab);
374 free(s->crc32table);
375 free(strm->state);
376}
377#endif
378
379
380/*---------------------------------------------------*/
381/*--- Misc convenience stuff ---*/
382/*---------------------------------------------------*/
383
384/*---------------------------------------------------*/
385#ifdef EXAMPLE_CODE_FOR_MEM_TO_MEM_COMPRESSION
386static
387int BZ2_bzBuffToBuffCompress(char* dest,
388 unsigned int* destLen,
389 char* source,
390 unsigned int sourceLen,
391 int blockSize100k)
392{
393 bz_stream strm;
394 int ret;
395
396 if (dest == NULL || destLen == NULL
397 || source == NULL
398 || blockSize100k < 1 || blockSize100k > 9
399 ) {
400 return BZ_PARAM_ERROR;
401 }
402
403 BZ2_bzCompressInit(&strm, blockSize100k);
404
405 strm.next_in = source;
406 strm.next_out = dest;
407 strm.avail_in = sourceLen;
408 strm.avail_out = *destLen;
409
410 ret = BZ2_bzCompress(&strm, BZ_FINISH);
411 if (ret == BZ_FINISH_OK) goto output_overflow;
412 if (ret != BZ_STREAM_END) goto errhandler;
413
414 /* normal termination */
415 *destLen -= strm.avail_out;
416 BZ2_bzCompressEnd(&strm);
417 return BZ_OK;
418
419 output_overflow:
420 BZ2_bzCompressEnd(&strm);
421 return BZ_OUTBUFF_FULL;
422
423 errhandler:
424 BZ2_bzCompressEnd(&strm);
425 return ret;
426}
427#endif
428
429/*-------------------------------------------------------------*/
430/*--- end bzlib.c ---*/
431/*-------------------------------------------------------------*/
diff --git a/archival/bz/bzlib.h b/archival/bz/bzlib.h
deleted file mode 100644
index 1bb811c4a..000000000
--- a/archival/bz/bzlib.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Public header file for the library. ---*/
9/*--- bzlib.h ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26#define BZ_RUN 0
27#define BZ_FLUSH 1
28#define BZ_FINISH 2
29
30#define BZ_OK 0
31#define BZ_RUN_OK 1
32#define BZ_FLUSH_OK 2
33#define BZ_FINISH_OK 3
34#define BZ_STREAM_END 4
35#define BZ_SEQUENCE_ERROR (-1)
36#define BZ_PARAM_ERROR (-2)
37#define BZ_MEM_ERROR (-3)
38#define BZ_DATA_ERROR (-4)
39#define BZ_DATA_ERROR_MAGIC (-5)
40#define BZ_IO_ERROR (-6)
41#define BZ_UNEXPECTED_EOF (-7)
42#define BZ_OUTBUFF_FULL (-8)
43#define BZ_CONFIG_ERROR (-9)
44
45typedef struct bz_stream {
46 void *state;
47 char *next_in;
48 char *next_out;
49 unsigned avail_in;
50 unsigned avail_out;
51 /*unsigned long long total_in;*/
52 unsigned long long total_out;
53} bz_stream;
54
55/*-- Core (low-level) library functions --*/
56
57static void BZ2_bzCompressInit(bz_stream *strm, int blockSize100k);
58static int BZ2_bzCompress(bz_stream *strm, int action);
59#if ENABLE_FEATURE_CLEAN_UP
60static void BZ2_bzCompressEnd(bz_stream *strm);
61#endif
62
63/*-------------------------------------------------------------*/
64/*--- end bzlib.h ---*/
65/*-------------------------------------------------------------*/
diff --git a/archival/bz/bzlib_private.h b/archival/bz/bzlib_private.h
deleted file mode 100644
index 6430ce407..000000000
--- a/archival/bz/bzlib_private.h
+++ /dev/null
@@ -1,219 +0,0 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Private header file for the library. ---*/
9/*--- bzlib_private.h ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* #include "bzlib.h" */
27
28/*-- General stuff. --*/
29
30typedef unsigned char Bool;
31
32#define True ((Bool)1)
33#define False ((Bool)0)
34
35#if BZ_LIGHT_DEBUG
36static void bz_assert_fail(int errcode) NORETURN;
37#define AssertH(cond, errcode) \
38do { \
39 if (!(cond)) \
40 bz_assert_fail(errcode); \
41} while (0)
42#else
43#define AssertH(cond, msg) do { } while (0)
44#endif
45
46#if BZ_DEBUG
47#define AssertD(cond, msg) \
48do { \
49 if (!(cond)) \
50 bb_error_msg_and_die("(debug build): internal error %s", msg); \
51} while (0)
52#else
53#define AssertD(cond, msg) do { } while (0)
54#endif
55
56
57/*-- Header bytes. --*/
58
59#define BZ_HDR_B 0x42 /* 'B' */
60#define BZ_HDR_Z 0x5a /* 'Z' */
61#define BZ_HDR_h 0x68 /* 'h' */
62#define BZ_HDR_0 0x30 /* '0' */
63
64#define BZ_HDR_BZh0 0x425a6830
65
66/*-- Constants for the back end. --*/
67
68#define BZ_MAX_ALPHA_SIZE 258
69#define BZ_MAX_CODE_LEN 23
70
71#define BZ_RUNA 0
72#define BZ_RUNB 1
73
74#define BZ_N_GROUPS 6
75#define BZ_G_SIZE 50
76#define BZ_N_ITERS 4
77
78#define BZ_MAX_SELECTORS (2 + (900000 / BZ_G_SIZE))
79
80
81/*-- Stuff for doing CRCs. --*/
82
83#define BZ_INITIALISE_CRC(crcVar) \
84{ \
85 crcVar = 0xffffffffL; \
86}
87
88#define BZ_FINALISE_CRC(crcVar) \
89{ \
90 crcVar = ~(crcVar); \
91}
92
93#define BZ_UPDATE_CRC(s, crcVar, cha) \
94{ \
95 crcVar = (crcVar << 8) ^ s->crc32table[(crcVar >> 24) ^ ((uint8_t)cha)]; \
96}
97
98
99/*-- States and modes for compression. --*/
100
101#define BZ_M_IDLE 1
102#define BZ_M_RUNNING 2
103#define BZ_M_FLUSHING 3
104#define BZ_M_FINISHING 4
105
106#define BZ_S_OUTPUT 1
107#define BZ_S_INPUT 2
108
109#define BZ_N_RADIX 2
110#define BZ_N_QSORT 12
111#define BZ_N_SHELL 18
112#define BZ_N_OVERSHOOT (BZ_N_RADIX + BZ_N_QSORT + BZ_N_SHELL + 2)
113
114
115/*-- Structure holding all the compression-side stuff. --*/
116
117typedef struct EState {
118 /* pointer back to the struct bz_stream */
119 bz_stream *strm;
120
121 /* mode this stream is in, and whether inputting */
122 /* or outputting data */
123 int32_t mode;
124 int32_t state;
125
126 /* remembers avail_in when flush/finish requested */
127/* bbox: not needed, strm->avail_in always has the same value */
128/* commented out with '//#' throughout the code */
129 /* uint32_t avail_in_expect; */
130
131 /* for doing the block sorting */
132 int32_t origPtr;
133 uint32_t *arr1;
134 uint32_t *arr2;
135 uint32_t *ftab;
136
137 /* aliases for arr1 and arr2 */
138 uint32_t *ptr;
139 uint8_t *block;
140 uint16_t *mtfv;
141 uint8_t *zbits;
142
143 /* guess what */
144 uint32_t *crc32table;
145
146 /* run-length-encoding of the input */
147 uint32_t state_in_ch;
148 int32_t state_in_len;
149
150 /* input and output limits and current posns */
151 int32_t nblock;
152 int32_t nblockMAX;
153 int32_t numZ;
154 int32_t state_out_pos;
155
156 /* the buffer for bit stream creation */
157 uint32_t bsBuff;
158 int32_t bsLive;
159
160 /* block and combined CRCs */
161 uint32_t blockCRC;
162 uint32_t combinedCRC;
163
164 /* misc administratium */
165 int32_t blockNo;
166 int32_t blockSize100k;
167
168 /* stuff for coding the MTF values */
169 int32_t nMTF;
170
171 /* map of bytes used in block */
172 int32_t nInUse;
173 Bool inUse[256] ALIGNED(sizeof(long));
174 uint8_t unseqToSeq[256];
175
176 /* stuff for coding the MTF values */
177 int32_t mtfFreq [BZ_MAX_ALPHA_SIZE];
178 uint8_t selector [BZ_MAX_SELECTORS];
179 uint8_t selectorMtf[BZ_MAX_SELECTORS];
180
181 uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
182
183 /* stack-saving measures: these can be local, but they are too big */
184 int32_t sendMTFValues__code [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
185 int32_t sendMTFValues__rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
186#if CONFIG_BZIP2_FEATURE_SPEED >= 5
187 /* second dimension: only 3 needed; 4 makes index calculations faster */
188 uint32_t sendMTFValues__len_pack[BZ_MAX_ALPHA_SIZE][4];
189#endif
190 int32_t BZ2_hbMakeCodeLengths__heap [BZ_MAX_ALPHA_SIZE + 2];
191 int32_t BZ2_hbMakeCodeLengths__weight[BZ_MAX_ALPHA_SIZE * 2];
192 int32_t BZ2_hbMakeCodeLengths__parent[BZ_MAX_ALPHA_SIZE * 2];
193
194 int32_t mainSort__runningOrder[256];
195 int32_t mainSort__copyStart[256];
196 int32_t mainSort__copyEnd[256];
197} EState;
198
199
200/*-- compression. --*/
201
202static void
203BZ2_blockSort(EState*);
204
205static void
206BZ2_compressBlock(EState*, int);
207
208static void
209BZ2_bsInitWrite(EState*);
210
211static void
212BZ2_hbAssignCodes(int32_t*, uint8_t*, int32_t, int32_t, int32_t);
213
214static void
215BZ2_hbMakeCodeLengths(EState*, uint8_t*, int32_t*, int32_t, int32_t);
216
217/*-------------------------------------------------------------*/
218/*--- end bzlib_private.h ---*/
219/*-------------------------------------------------------------*/
diff --git a/archival/bz/compress.c b/archival/bz/compress.c
deleted file mode 100644
index 6f1c70a08..000000000
--- a/archival/bz/compress.c
+++ /dev/null
@@ -1,685 +0,0 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Compression machinery (not incl block sorting) ---*/
9/*--- compress.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
31*/
32
33/* #include "bzlib_private.h" */
34
35/*---------------------------------------------------*/
36/*--- Bit stream I/O ---*/
37/*---------------------------------------------------*/
38
39/*---------------------------------------------------*/
40static
41void BZ2_bsInitWrite(EState* s)
42{
43 s->bsLive = 0;
44 s->bsBuff = 0;
45}
46
47
48/*---------------------------------------------------*/
49static NOINLINE
50void bsFinishWrite(EState* s)
51{
52 while (s->bsLive > 0) {
53 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
54 s->numZ++;
55 s->bsBuff <<= 8;
56 s->bsLive -= 8;
57 }
58}
59
60
61/*---------------------------------------------------*/
62static
63/* Helps only on level 5, on other levels hurts. ? */
64#if CONFIG_BZIP2_FEATURE_SPEED >= 5
65ALWAYS_INLINE
66#endif
67void bsW(EState* s, int32_t n, uint32_t v)
68{
69 while (s->bsLive >= 8) {
70 s->zbits[s->numZ] = (uint8_t)(s->bsBuff >> 24);
71 s->numZ++;
72 s->bsBuff <<= 8;
73 s->bsLive -= 8;
74 }
75 s->bsBuff |= (v << (32 - s->bsLive - n));
76 s->bsLive += n;
77}
78
79
80/*---------------------------------------------------*/
81static
82void bsPutU32(EState* s, unsigned u)
83{
84 bsW(s, 8, (u >> 24) & 0xff);
85 bsW(s, 8, (u >> 16) & 0xff);
86 bsW(s, 8, (u >> 8) & 0xff);
87 bsW(s, 8, u & 0xff);
88}
89
90
91/*---------------------------------------------------*/
92static
93void bsPutU16(EState* s, unsigned u)
94{
95 bsW(s, 8, (u >> 8) & 0xff);
96 bsW(s, 8, u & 0xff);
97}
98
99
100/*---------------------------------------------------*/
101/*--- The back end proper ---*/
102/*---------------------------------------------------*/
103
104/*---------------------------------------------------*/
105static
106void makeMaps_e(EState* s)
107{
108 int i;
109 s->nInUse = 0;
110 for (i = 0; i < 256; i++) {
111 if (s->inUse[i]) {
112 s->unseqToSeq[i] = s->nInUse;
113 s->nInUse++;
114 }
115 }
116}
117
118
119/*---------------------------------------------------*/
120static NOINLINE
121void generateMTFValues(EState* s)
122{
123 uint8_t yy[256];
124 int32_t i, j;
125 int32_t zPend;
126 int32_t wr;
127 int32_t EOB;
128
129 /*
130 * After sorting (eg, here),
131 * s->arr1[0 .. s->nblock-1] holds sorted order,
132 * and
133 * ((uint8_t*)s->arr2)[0 .. s->nblock-1]
134 * holds the original block data.
135 *
136 * The first thing to do is generate the MTF values,
137 * and put them in ((uint16_t*)s->arr1)[0 .. s->nblock-1].
138 *
139 * Because there are strictly fewer or equal MTF values
140 * than block values, ptr values in this area are overwritten
141 * with MTF values only when they are no longer needed.
142 *
143 * The final compressed bitstream is generated into the
144 * area starting at &((uint8_t*)s->arr2)[s->nblock]
145 *
146 * These storage aliases are set up in bzCompressInit(),
147 * except for the last one, which is arranged in
148 * compressBlock().
149 */
150 uint32_t* ptr = s->ptr;
151 uint8_t* block = s->block;
152 uint16_t* mtfv = s->mtfv;
153
154 makeMaps_e(s);
155 EOB = s->nInUse+1;
156
157 for (i = 0; i <= EOB; i++)
158 s->mtfFreq[i] = 0;
159
160 wr = 0;
161 zPend = 0;
162 for (i = 0; i < s->nInUse; i++)
163 yy[i] = (uint8_t) i;
164
165 for (i = 0; i < s->nblock; i++) {
166 uint8_t ll_i;
167 AssertD(wr <= i, "generateMTFValues(1)");
168 j = ptr[i] - 1;
169 if (j < 0)
170 j += s->nblock;
171 ll_i = s->unseqToSeq[block[j]];
172 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
173
174 if (yy[0] == ll_i) {
175 zPend++;
176 } else {
177 if (zPend > 0) {
178 zPend--;
179 while (1) {
180 if (zPend & 1) {
181 mtfv[wr] = BZ_RUNB; wr++;
182 s->mtfFreq[BZ_RUNB]++;
183 } else {
184 mtfv[wr] = BZ_RUNA; wr++;
185 s->mtfFreq[BZ_RUNA]++;
186 }
187 if (zPend < 2) break;
188 zPend = (uint32_t)(zPend - 2) / 2;
189 /* bbox: unsigned div is easier */
190 };
191 zPend = 0;
192 }
193 {
194 register uint8_t rtmp;
195 register uint8_t* ryy_j;
196 register uint8_t rll_i;
197 rtmp = yy[1];
198 yy[1] = yy[0];
199 ryy_j = &(yy[1]);
200 rll_i = ll_i;
201 while (rll_i != rtmp) {
202 register uint8_t rtmp2;
203 ryy_j++;
204 rtmp2 = rtmp;
205 rtmp = *ryy_j;
206 *ryy_j = rtmp2;
207 };
208 yy[0] = rtmp;
209 j = ryy_j - &(yy[0]);
210 mtfv[wr] = j+1;
211 wr++;
212 s->mtfFreq[j+1]++;
213 }
214 }
215 }
216
217 if (zPend > 0) {
218 zPend--;
219 while (1) {
220 if (zPend & 1) {
221 mtfv[wr] = BZ_RUNB;
222 wr++;
223 s->mtfFreq[BZ_RUNB]++;
224 } else {
225 mtfv[wr] = BZ_RUNA;
226 wr++;
227 s->mtfFreq[BZ_RUNA]++;
228 }
229 if (zPend < 2)
230 break;
231 zPend = (uint32_t)(zPend - 2) / 2;
232 /* bbox: unsigned div is easier */
233 };
234 zPend = 0;
235 }
236
237 mtfv[wr] = EOB;
238 wr++;
239 s->mtfFreq[EOB]++;
240
241 s->nMTF = wr;
242}
243
244
245/*---------------------------------------------------*/
246#define BZ_LESSER_ICOST 0
247#define BZ_GREATER_ICOST 15
248
249static NOINLINE
250void sendMTFValues(EState* s)
251{
252 int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
253 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
254 int32_t nGroups, nBytes;
255
256 /*
257 * uint8_t len[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258 * is a global since the decoder also needs it.
259 *
260 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
261 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
262 * are also globals only used in this proc.
263 * Made global to keep stack frame size small.
264 */
265#define code sendMTFValues__code
266#define rfreq sendMTFValues__rfreq
267#define len_pack sendMTFValues__len_pack
268
269 uint16_t cost[BZ_N_GROUPS];
270 int32_t fave[BZ_N_GROUPS];
271
272 uint16_t* mtfv = s->mtfv;
273
274 alphaSize = s->nInUse + 2;
275 for (t = 0; t < BZ_N_GROUPS; t++)
276 for (v = 0; v < alphaSize; v++)
277 s->len[t][v] = BZ_GREATER_ICOST;
278
279 /*--- Decide how many coding tables to use ---*/
280 AssertH(s->nMTF > 0, 3001);
281 if (s->nMTF < 200) nGroups = 2; else
282 if (s->nMTF < 600) nGroups = 3; else
283 if (s->nMTF < 1200) nGroups = 4; else
284 if (s->nMTF < 2400) nGroups = 5; else
285 nGroups = 6;
286
287 /*--- Generate an initial set of coding tables ---*/
288 {
289 int32_t nPart, remF, tFreq, aFreq;
290
291 nPart = nGroups;
292 remF = s->nMTF;
293 gs = 0;
294 while (nPart > 0) {
295 tFreq = remF / nPart;
296 ge = gs - 1;
297 aFreq = 0;
298 while (aFreq < tFreq && ge < alphaSize-1) {
299 ge++;
300 aFreq += s->mtfFreq[ge];
301 }
302
303 if (ge > gs
304 && nPart != nGroups && nPart != 1
305 && ((nGroups - nPart) % 2 == 1) /* bbox: can this be replaced by x & 1? */
306 ) {
307 aFreq -= s->mtfFreq[ge];
308 ge--;
309 }
310
311 for (v = 0; v < alphaSize; v++)
312 if (v >= gs && v <= ge)
313 s->len[nPart-1][v] = BZ_LESSER_ICOST;
314 else
315 s->len[nPart-1][v] = BZ_GREATER_ICOST;
316
317 nPart--;
318 gs = ge + 1;
319 remF -= aFreq;
320 }
321 }
322
323 /*
324 * Iterate up to BZ_N_ITERS times to improve the tables.
325 */
326 for (iter = 0; iter < BZ_N_ITERS; iter++) {
327 for (t = 0; t < nGroups; t++)
328 fave[t] = 0;
329
330 for (t = 0; t < nGroups; t++)
331 for (v = 0; v < alphaSize; v++)
332 s->rfreq[t][v] = 0;
333
334#if CONFIG_BZIP2_FEATURE_SPEED >= 5
335 /*
336 * Set up an auxiliary length table which is used to fast-track
337 * the common case (nGroups == 6).
338 */
339 if (nGroups == 6) {
340 for (v = 0; v < alphaSize; v++) {
341 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
342 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
343 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
344 }
345 }
346#endif
347 nSelectors = 0;
348 totc = 0;
349 gs = 0;
350 while (1) {
351 /*--- Set group start & end marks. --*/
352 if (gs >= s->nMTF)
353 break;
354 ge = gs + BZ_G_SIZE - 1;
355 if (ge >= s->nMTF)
356 ge = s->nMTF-1;
357
358 /*
359 * Calculate the cost of this group as coded
360 * by each of the coding tables.
361 */
362 for (t = 0; t < nGroups; t++)
363 cost[t] = 0;
364#if CONFIG_BZIP2_FEATURE_SPEED >= 5
365 if (nGroups == 6 && 50 == ge-gs+1) {
366 /*--- fast track the common case ---*/
367 register uint32_t cost01, cost23, cost45;
368 register uint16_t icv;
369 cost01 = cost23 = cost45 = 0;
370#define BZ_ITER(nn) \
371 icv = mtfv[gs+(nn)]; \
372 cost01 += s->len_pack[icv][0]; \
373 cost23 += s->len_pack[icv][1]; \
374 cost45 += s->len_pack[icv][2];
375 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
376 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
377 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
378 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
379 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
380 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
381 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
382 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
383 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
384 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
385#undef BZ_ITER
386 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
387 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
388 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
389
390 } else
391#endif
392 {
393 /*--- slow version which correctly handles all situations ---*/
394 for (i = gs; i <= ge; i++) {
395 uint16_t icv = mtfv[i];
396 for (t = 0; t < nGroups; t++)
397 cost[t] += s->len[t][icv];
398 }
399 }
400 /*
401 * Find the coding table which is best for this group,
402 * and record its identity in the selector table.
403 */
404 /*bc = 999999999;*/
405 /*bt = -1;*/
406 bc = cost[0];
407 bt = 0;
408 for (t = 1 /*0*/; t < nGroups; t++) {
409 if (cost[t] < bc) {
410 bc = cost[t];
411 bt = t;
412 }
413 }
414 totc += bc;
415 fave[bt]++;
416 s->selector[nSelectors] = bt;
417 nSelectors++;
418
419 /*
420 * Increment the symbol frequencies for the selected table.
421 */
422/* 1% faster compress. +800 bytes */
423#if CONFIG_BZIP2_FEATURE_SPEED >= 4
424 if (nGroups == 6 && 50 == ge-gs+1) {
425 /*--- fast track the common case ---*/
426#define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
427 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
428 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
429 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
430 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
431 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
432 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
433 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
434 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
435 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
436 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
437#undef BZ_ITUR
438 gs = ge + 1;
439 } else
440#endif
441 {
442 /*--- slow version which correctly handles all situations ---*/
443 while (gs <= ge) {
444 s->rfreq[bt][mtfv[gs]]++;
445 gs++;
446 }
447 /* already is: gs = ge + 1; */
448 }
449 }
450
451 /*
452 * Recompute the tables based on the accumulated frequencies.
453 */
454 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
455 * comment in huffman.c for details. */
456 for (t = 0; t < nGroups; t++)
457 BZ2_hbMakeCodeLengths(s, &(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
458 }
459
460 AssertH(nGroups < 8, 3002);
461 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
462
463 /*--- Compute MTF values for the selectors. ---*/
464 {
465 uint8_t pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
466
467 for (i = 0; i < nGroups; i++)
468 pos[i] = i;
469 for (i = 0; i < nSelectors; i++) {
470 ll_i = s->selector[i];
471 j = 0;
472 tmp = pos[j];
473 while (ll_i != tmp) {
474 j++;
475 tmp2 = tmp;
476 tmp = pos[j];
477 pos[j] = tmp2;
478 };
479 pos[0] = tmp;
480 s->selectorMtf[i] = j;
481 }
482 };
483
484 /*--- Assign actual codes for the tables. --*/
485 for (t = 0; t < nGroups; t++) {
486 minLen = 32;
487 maxLen = 0;
488 for (i = 0; i < alphaSize; i++) {
489 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
490 if (s->len[t][i] < minLen) minLen = s->len[t][i];
491 }
492 AssertH(!(maxLen > 17 /*20*/), 3004);
493 AssertH(!(minLen < 1), 3005);
494 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
495 }
496
497 /*--- Transmit the mapping table. ---*/
498 {
499 /* bbox: optimized a bit more than in bzip2 */
500 int inUse16 = 0;
501 for (i = 0; i < 16; i++) {
502 if (sizeof(long) <= 4) {
503 inUse16 = inUse16*2 +
504 ((*(uint32_t*)&(s->inUse[i * 16 + 0])
505 | *(uint32_t*)&(s->inUse[i * 16 + 4])
506 | *(uint32_t*)&(s->inUse[i * 16 + 8])
507 | *(uint32_t*)&(s->inUse[i * 16 + 12])) != 0);
508 } else { /* Our CPU can do better */
509 inUse16 = inUse16*2 +
510 ((*(uint64_t*)&(s->inUse[i * 16 + 0])
511 | *(uint64_t*)&(s->inUse[i * 16 + 8])) != 0);
512 }
513 }
514
515 nBytes = s->numZ;
516 bsW(s, 16, inUse16);
517
518 inUse16 <<= (sizeof(int)*8 - 16); /* move 15th bit into sign bit */
519 for (i = 0; i < 16; i++) {
520 if (inUse16 < 0) {
521 unsigned v16 = 0;
522 for (j = 0; j < 16; j++)
523 v16 = v16*2 + s->inUse[i * 16 + j];
524 bsW(s, 16, v16);
525 }
526 inUse16 <<= 1;
527 }
528 }
529
530 /*--- Now the selectors. ---*/
531 nBytes = s->numZ;
532 bsW(s, 3, nGroups);
533 bsW(s, 15, nSelectors);
534 for (i = 0; i < nSelectors; i++) {
535 for (j = 0; j < s->selectorMtf[i]; j++)
536 bsW(s, 1, 1);
537 bsW(s, 1, 0);
538 }
539
540 /*--- Now the coding tables. ---*/
541 nBytes = s->numZ;
542
543 for (t = 0; t < nGroups; t++) {
544 int32_t curr = s->len[t][0];
545 bsW(s, 5, curr);
546 for (i = 0; i < alphaSize; i++) {
547 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
548 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
549 bsW(s, 1, 0);
550 }
551 }
552
553 /*--- And finally, the block data proper ---*/
554 nBytes = s->numZ;
555 selCtr = 0;
556 gs = 0;
557 while (1) {
558 if (gs >= s->nMTF)
559 break;
560 ge = gs + BZ_G_SIZE - 1;
561 if (ge >= s->nMTF)
562 ge = s->nMTF-1;
563 AssertH(s->selector[selCtr] < nGroups, 3006);
564
565/* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
566#if 0
567 if (nGroups == 6 && 50 == ge-gs+1) {
568 /*--- fast track the common case ---*/
569 uint16_t mtfv_i;
570 uint8_t* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
571 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
572#define BZ_ITAH(nn) \
573 mtfv_i = mtfv[gs+(nn)]; \
574 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
575 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
576 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
577 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
578 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
579 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
580 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
581 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
582 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
583 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
584 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
585#undef BZ_ITAH
586 gs = ge+1;
587 } else
588#endif
589 {
590 /*--- slow version which correctly handles all situations ---*/
591 /* code is bit bigger, but moves multiply out of the loop */
592 uint8_t* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
593 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
594 while (gs <= ge) {
595 bsW(s,
596 s_len_sel_selCtr[mtfv[gs]],
597 s_code_sel_selCtr[mtfv[gs]]
598 );
599 gs++;
600 }
601 /* already is: gs = ge+1; */
602 }
603 selCtr++;
604 }
605 AssertH(selCtr == nSelectors, 3007);
606#undef code
607#undef rfreq
608#undef len_pack
609}
610
611
612/*---------------------------------------------------*/
613static
614void BZ2_compressBlock(EState* s, int is_last_block)
615{
616 if (s->nblock > 0) {
617 BZ_FINALISE_CRC(s->blockCRC);
618 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
619 s->combinedCRC ^= s->blockCRC;
620 if (s->blockNo > 1)
621 s->numZ = 0;
622
623 BZ2_blockSort(s);
624 }
625
626 s->zbits = &((uint8_t*)s->arr2)[s->nblock];
627
628 /*-- If this is the first block, create the stream header. --*/
629 if (s->blockNo == 1) {
630 BZ2_bsInitWrite(s);
631 /*bsPutU8(s, BZ_HDR_B);*/
632 /*bsPutU8(s, BZ_HDR_Z);*/
633 /*bsPutU8(s, BZ_HDR_h);*/
634 /*bsPutU8(s, BZ_HDR_0 + s->blockSize100k);*/
635 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
636 }
637
638 if (s->nblock > 0) {
639 /*bsPutU8(s, 0x31);*/
640 /*bsPutU8(s, 0x41);*/
641 /*bsPutU8(s, 0x59);*/
642 /*bsPutU8(s, 0x26);*/
643 bsPutU32(s, 0x31415926);
644 /*bsPutU8(s, 0x53);*/
645 /*bsPutU8(s, 0x59);*/
646 bsPutU16(s, 0x5359);
647
648 /*-- Now the block's CRC, so it is in a known place. --*/
649 bsPutU32(s, s->blockCRC);
650
651 /*
652 * Now a single bit indicating (non-)randomisation.
653 * As of version 0.9.5, we use a better sorting algorithm
654 * which makes randomisation unnecessary. So always set
655 * the randomised bit to 'no'. Of course, the decoder
656 * still needs to be able to handle randomised blocks
657 * so as to maintain backwards compatibility with
658 * older versions of bzip2.
659 */
660 bsW(s, 1, 0);
661
662 bsW(s, 24, s->origPtr);
663 generateMTFValues(s);
664 sendMTFValues(s);
665 }
666
667 /*-- If this is the last block, add the stream trailer. --*/
668 if (is_last_block) {
669 /*bsPutU8(s, 0x17);*/
670 /*bsPutU8(s, 0x72);*/
671 /*bsPutU8(s, 0x45);*/
672 /*bsPutU8(s, 0x38);*/
673 bsPutU32(s, 0x17724538);
674 /*bsPutU8(s, 0x50);*/
675 /*bsPutU8(s, 0x90);*/
676 bsPutU16(s, 0x5090);
677 bsPutU32(s, s->combinedCRC);
678 bsFinishWrite(s);
679 }
680}
681
682
683/*-------------------------------------------------------------*/
684/*--- end compress.c ---*/
685/*-------------------------------------------------------------*/
diff --git a/archival/bz/huffman.c b/archival/bz/huffman.c
deleted file mode 100644
index 676b1af66..000000000
--- a/archival/bz/huffman.c
+++ /dev/null
@@ -1,229 +0,0 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Huffman coding low-level stuff ---*/
9/*--- huffman.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* #include "bzlib_private.h" */
27
28/*---------------------------------------------------*/
29#define WEIGHTOF(zz0) ((zz0) & 0xffffff00)
30#define DEPTHOF(zz1) ((zz1) & 0x000000ff)
31#define MYMAX(zz2,zz3) ((zz2) > (zz3) ? (zz2) : (zz3))
32
33#define ADDWEIGHTS(zw1,zw2) \
34 (WEIGHTOF(zw1)+WEIGHTOF(zw2)) | \
35 (1 + MYMAX(DEPTHOF(zw1),DEPTHOF(zw2)))
36
37#define UPHEAP(z) \
38{ \
39 int32_t zz, tmp; \
40 zz = z; \
41 tmp = heap[zz]; \
42 while (weight[tmp] < weight[heap[zz >> 1]]) { \
43 heap[zz] = heap[zz >> 1]; \
44 zz >>= 1; \
45 } \
46 heap[zz] = tmp; \
47}
48
49
50/* 90 bytes, 0.3% of overall compress speed */
51#if CONFIG_BZIP2_FEATURE_SPEED >= 1
52
53/* macro works better than inline (gcc 4.2.1) */
54#define DOWNHEAP1(heap, weight, Heap) \
55{ \
56 int32_t zz, yy, tmp; \
57 zz = 1; \
58 tmp = heap[zz]; \
59 while (1) { \
60 yy = zz << 1; \
61 if (yy > nHeap) \
62 break; \
63 if (yy < nHeap \
64 && weight[heap[yy+1]] < weight[heap[yy]]) \
65 yy++; \
66 if (weight[tmp] < weight[heap[yy]]) \
67 break; \
68 heap[zz] = heap[yy]; \
69 zz = yy; \
70 } \
71 heap[zz] = tmp; \
72}
73
74#else
75
76static
77void DOWNHEAP1(int32_t *heap, int32_t *weight, int32_t nHeap)
78{
79 int32_t zz, yy, tmp;
80 zz = 1;
81 tmp = heap[zz];
82 while (1) {
83 yy = zz << 1;
84 if (yy > nHeap)
85 break;
86 if (yy < nHeap
87 && weight[heap[yy + 1]] < weight[heap[yy]])
88 yy++;
89 if (weight[tmp] < weight[heap[yy]])
90 break;
91 heap[zz] = heap[yy];
92 zz = yy;
93 }
94 heap[zz] = tmp;
95}
96
97#endif
98
99/*---------------------------------------------------*/
100static
101void BZ2_hbMakeCodeLengths(EState *s,
102 uint8_t *len,
103 int32_t *freq,
104 int32_t alphaSize,
105 int32_t maxLen)
106{
107 /*
108 * Nodes and heap entries run from 1. Entry 0
109 * for both the heap and nodes is a sentinel.
110 */
111 int32_t nNodes, nHeap, n1, n2, i, j, k;
112 Bool tooLong;
113
114 /* bbox: moved to EState to save stack
115 int32_t heap [BZ_MAX_ALPHA_SIZE + 2];
116 int32_t weight[BZ_MAX_ALPHA_SIZE * 2];
117 int32_t parent[BZ_MAX_ALPHA_SIZE * 2];
118 */
119#define heap (s->BZ2_hbMakeCodeLengths__heap)
120#define weight (s->BZ2_hbMakeCodeLengths__weight)
121#define parent (s->BZ2_hbMakeCodeLengths__parent)
122
123 for (i = 0; i < alphaSize; i++)
124 weight[i+1] = (freq[i] == 0 ? 1 : freq[i]) << 8;
125
126 while (1) {
127 nNodes = alphaSize;
128 nHeap = 0;
129
130 heap[0] = 0;
131 weight[0] = 0;
132 parent[0] = -2;
133
134 for (i = 1; i <= alphaSize; i++) {
135 parent[i] = -1;
136 nHeap++;
137 heap[nHeap] = i;
138 UPHEAP(nHeap);
139 }
140
141 AssertH(nHeap < (BZ_MAX_ALPHA_SIZE+2), 2001);
142
143 while (nHeap > 1) {
144 n1 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP1(heap, weight, nHeap);
145 n2 = heap[1]; heap[1] = heap[nHeap]; nHeap--; DOWNHEAP1(heap, weight, nHeap);
146 nNodes++;
147 parent[n1] = parent[n2] = nNodes;
148 weight[nNodes] = ADDWEIGHTS(weight[n1], weight[n2]);
149 parent[nNodes] = -1;
150 nHeap++;
151 heap[nHeap] = nNodes;
152 UPHEAP(nHeap);
153 }
154
155 AssertH(nNodes < (BZ_MAX_ALPHA_SIZE * 2), 2002);
156
157 tooLong = False;
158 for (i = 1; i <= alphaSize; i++) {
159 j = 0;
160 k = i;
161 while (parent[k] >= 0) {
162 k = parent[k];
163 j++;
164 }
165 len[i-1] = j;
166 if (j > maxLen)
167 tooLong = True;
168 }
169
170 if (!tooLong)
171 break;
172
173 /* 17 Oct 04: keep-going condition for the following loop used
174 to be 'i < alphaSize', which missed the last element,
175 theoretically leading to the possibility of the compressor
176 looping. However, this count-scaling step is only needed if
177 one of the generated Huffman code words is longer than
178 maxLen, which up to and including version 1.0.2 was 20 bits,
179 which is extremely unlikely. In version 1.0.3 maxLen was
180 changed to 17 bits, which has minimal effect on compression
181 ratio, but does mean this scaling step is used from time to
182 time, enough to verify that it works.
183
184 This means that bzip2-1.0.3 and later will only produce
185 Huffman codes with a maximum length of 17 bits. However, in
186 order to preserve backwards compatibility with bitstreams
187 produced by versions pre-1.0.3, the decompressor must still
188 handle lengths of up to 20. */
189
190 for (i = 1; i <= alphaSize; i++) {
191 j = weight[i] >> 8;
192 /* bbox: yes, it is a signed division.
193 * don't replace with shift! */
194 j = 1 + (j / 2);
195 weight[i] = j << 8;
196 }
197 }
198#undef heap
199#undef weight
200#undef parent
201}
202
203
204/*---------------------------------------------------*/
205static
206void BZ2_hbAssignCodes(int32_t *code,
207 uint8_t *length,
208 int32_t minLen,
209 int32_t maxLen,
210 int32_t alphaSize)
211{
212 int32_t n, vec, i;
213
214 vec = 0;
215 for (n = minLen; n <= maxLen; n++) {
216 for (i = 0; i < alphaSize; i++) {
217 if (length[i] == n) {
218 code[i] = vec;
219 vec++;
220 };
221 }
222 vec <<= 1;
223 }
224}
225
226
227/*-------------------------------------------------------------*/
228/*--- end huffman.c ---*/
229/*-------------------------------------------------------------*/