aboutsummaryrefslogtreecommitdiff
path: root/archival/bz/compress.c
diff options
context:
space:
mode:
authorDenis Vlasenko <vda.linux@googlemail.com>2007-10-13 03:36:03 +0000
committerDenis Vlasenko <vda.linux@googlemail.com>2007-10-13 03:36:03 +0000
commit77f1ec1b9bf100e6c10aa0856c7156e321511785 (patch)
treef20e5a9062ecad82a43bde81e3041a19c4292733 /archival/bz/compress.c
parent11c23d7b990eae27357e5a41a97d62b9a214f7db (diff)
downloadbusybox-w32-77f1ec1b9bf100e6c10aa0856c7156e321511785.tar.gz
busybox-w32-77f1ec1b9bf100e6c10aa0856c7156e321511785.tar.bz2
busybox-w32-77f1ec1b9bf100e6c10aa0856c7156e321511785.zip
bzip2: port bzip2 1.0.4 to busybox. note: bzip2 code resides
in separate directory (archival/bz/*) and is covered by BSD-style license. code size: 13k
Diffstat (limited to 'archival/bz/compress.c')
-rw-r--r--archival/bz/compress.c671
1 files changed, 671 insertions, 0 deletions
diff --git a/archival/bz/compress.c b/archival/bz/compress.c
new file mode 100644
index 000000000..54426dcc7
--- /dev/null
+++ b/archival/bz/compress.c
@@ -0,0 +1,671 @@
1/*
2 * bzip2 is written by Julian Seward <jseward@bzip.org>.
3 * Adapted for busybox by Denys Vlasenko <vda.linux@googlemail.com>.
4 * See README and LICENSE files in this directory for more information.
5 */
6
7/*-------------------------------------------------------------*/
8/*--- Compression machinery (not incl block sorting) ---*/
9/*--- compress.c ---*/
10/*-------------------------------------------------------------*/
11
12/* ------------------------------------------------------------------
13This file is part of bzip2/libbzip2, a program and library for
14lossless, block-sorting data compression.
15
16bzip2/libbzip2 version 1.0.4 of 20 December 2006
17Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
18
19Please read the WARNING, DISCLAIMER and PATENTS sections in the
20README file.
21
22This program is released under the terms of the license contained
23in the file LICENSE.
24------------------------------------------------------------------ */
25
26/* CHANGES
27 * 0.9.0 -- original version.
28 * 0.9.0a/b -- no changes in this file.
29 * 0.9.0c -- changed setting of nGroups in sendMTFValues()
30 * so as to do a bit better on small files
31*/
32
33/* #include "bzlib_private.h" */
34
35/*---------------------------------------------------*/
36/*--- Bit stream I/O ---*/
37/*---------------------------------------------------*/
38
39/*---------------------------------------------------*/
40static
41void BZ2_bsInitWrite(EState* s)
42{
43 s->bsLive = 0;
44 s->bsBuff = 0;
45}
46
47
48/*---------------------------------------------------*/
49static NOINLINE
50void bsFinishWrite(EState* s)
51{
52 while (s->bsLive > 0) {
53 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
54 s->numZ++;
55 s->bsBuff <<= 8;
56 s->bsLive -= 8;
57 }
58}
59
60
61/*---------------------------------------------------*/
62static
63/* Forced inlining results in +600 bytes code,
64 * 2% faster compression. Not worth it. */
65/*ALWAYS_INLINE*/
66void bsW(EState* s, int32_t n, uint32_t v)
67{
68 while (s->bsLive >= 8) {
69 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
70 s->numZ++;
71 s->bsBuff <<= 8;
72 s->bsLive -= 8;
73 }
74 s->bsBuff |= (v << (32 - s->bsLive - n));
75 s->bsLive += n;
76}
77
78
79/*---------------------------------------------------*/
80static
81void bsPutU32(EState* s, uint32_t u)
82{
83 bsW(s, 8, (u >> 24) & 0xff);
84 bsW(s, 8, (u >> 16) & 0xff);
85 bsW(s, 8, (u >> 8) & 0xff);
86 bsW(s, 8, u & 0xff);
87}
88
89
90/*---------------------------------------------------*/
91static
92void bsPutUChar(EState* s, UChar c)
93{
94 bsW(s, 8, (uint32_t)c);
95}
96
97
98/*---------------------------------------------------*/
99/*--- The back end proper ---*/
100/*---------------------------------------------------*/
101
102/*---------------------------------------------------*/
103static
104void makeMaps_e(EState* s)
105{
106 int32_t i;
107 s->nInUse = 0;
108 for (i = 0; i < 256; i++) {
109 if (s->inUse[i]) {
110 s->unseqToSeq[i] = s->nInUse;
111 s->nInUse++;
112 }
113 }
114}
115
116
117/*---------------------------------------------------*/
118static NOINLINE
119void generateMTFValues(EState* s)
120{
121 UChar yy[256];
122 int32_t i, j;
123 int32_t zPend;
124 int32_t wr;
125 int32_t EOB;
126
127 /*
128 * After sorting (eg, here),
129 * s->arr1[0 .. s->nblock-1] holds sorted order,
130 * and
131 * ((UChar*)s->arr2)[0 .. s->nblock-1]
132 * holds the original block data.
133 *
134 * The first thing to do is generate the MTF values,
135 * and put them in
136 * ((uint16_t*)s->arr1)[0 .. s->nblock-1].
137 * Because there are strictly fewer or equal MTF values
138 * than block values, ptr values in this area are overwritten
139 * with MTF values only when they are no longer needed.
140 *
141 * The final compressed bitstream is generated into the
142 * area starting at
143 * (UChar*) (&((UChar*)s->arr2)[s->nblock])
144 *
145 * These storage aliases are set up in bzCompressInit(),
146 * except for the last one, which is arranged in
147 * compressBlock().
148 */
149 uint32_t* ptr = s->ptr;
150 UChar* block = s->block;
151 uint16_t* mtfv = s->mtfv;
152
153 makeMaps_e(s);
154 EOB = s->nInUse+1;
155
156 for (i = 0; i <= EOB; i++)
157 s->mtfFreq[i] = 0;
158
159 wr = 0;
160 zPend = 0;
161 for (i = 0; i < s->nInUse; i++)
162 yy[i] = (UChar) i;
163
164 for (i = 0; i < s->nblock; i++) {
165 UChar ll_i;
166 AssertD(wr <= i, "generateMTFValues(1)");
167 j = ptr[i]-1;
168 if (j < 0)
169 j += s->nblock;
170 ll_i = s->unseqToSeq[block[j]];
171 AssertD(ll_i < s->nInUse, "generateMTFValues(2a)");
172
173 if (yy[0] == ll_i) {
174 zPend++;
175 } else {
176 if (zPend > 0) {
177 zPend--;
178 while (1) {
179 if (zPend & 1) {
180 mtfv[wr] = BZ_RUNB; wr++;
181 s->mtfFreq[BZ_RUNB]++;
182 } else {
183 mtfv[wr] = BZ_RUNA; wr++;
184 s->mtfFreq[BZ_RUNA]++;
185 }
186 if (zPend < 2) break;
187 zPend = (zPend - 2) / 2;
188 };
189 zPend = 0;
190 }
191 {
192 register UChar rtmp;
193 register UChar* ryy_j;
194 register UChar rll_i;
195 rtmp = yy[1];
196 yy[1] = yy[0];
197 ryy_j = &(yy[1]);
198 rll_i = ll_i;
199 while (rll_i != rtmp) {
200 register UChar rtmp2;
201 ryy_j++;
202 rtmp2 = rtmp;
203 rtmp = *ryy_j;
204 *ryy_j = rtmp2;
205 };
206 yy[0] = rtmp;
207 j = ryy_j - &(yy[0]);
208 mtfv[wr] = j+1;
209 wr++;
210 s->mtfFreq[j+1]++;
211 }
212
213 }
214 }
215
216 if (zPend > 0) {
217 zPend--;
218 while (1) {
219 if (zPend & 1) {
220 mtfv[wr] = BZ_RUNB; wr++;
221 s->mtfFreq[BZ_RUNB]++;
222 } else {
223 mtfv[wr] = BZ_RUNA; wr++;
224 s->mtfFreq[BZ_RUNA]++;
225 }
226 if (zPend < 2)
227 break;
228 zPend = (zPend - 2) / 2;
229 };
230 zPend = 0;
231 }
232
233 mtfv[wr] = EOB;
234 wr++;
235 s->mtfFreq[EOB]++;
236
237 s->nMTF = wr;
238}
239
240
241/*---------------------------------------------------*/
242#define BZ_LESSER_ICOST 0
243#define BZ_GREATER_ICOST 15
244
245static NOINLINE
246void sendMTFValues(EState* s)
247{
248 int32_t v, t, i, j, gs, ge, totc, bt, bc, iter;
249 int32_t nSelectors, alphaSize, minLen, maxLen, selCtr;
250 int32_t nGroups, nBytes;
251
252 /*
253 * UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
254 * is a global since the decoder also needs it.
255 *
256 * int32_t code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
257 * int32_t rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
258 * are also globals only used in this proc.
259 * Made global to keep stack frame size small.
260 */
261
262 uint16_t cost[BZ_N_GROUPS];
263 int32_t fave[BZ_N_GROUPS];
264
265 uint16_t* mtfv = s->mtfv;
266
267 alphaSize = s->nInUse+2;
268 for (t = 0; t < BZ_N_GROUPS; t++)
269 for (v = 0; v < alphaSize; v++)
270 s->len[t][v] = BZ_GREATER_ICOST;
271
272 /*--- Decide how many coding tables to use ---*/
273 AssertH(s->nMTF > 0, 3001);
274 if (s->nMTF < 200) nGroups = 2; else
275 if (s->nMTF < 600) nGroups = 3; else
276 if (s->nMTF < 1200) nGroups = 4; else
277 if (s->nMTF < 2400) nGroups = 5; else
278 nGroups = 6;
279
280 /*--- Generate an initial set of coding tables ---*/
281 {
282 int32_t nPart, remF, tFreq, aFreq;
283
284 nPart = nGroups;
285 remF = s->nMTF;
286 gs = 0;
287 while (nPart > 0) {
288 tFreq = remF / nPart;
289 ge = gs-1;
290 aFreq = 0;
291 while (aFreq < tFreq && ge < alphaSize-1) {
292 ge++;
293 aFreq += s->mtfFreq[ge];
294 }
295
296 if (ge > gs
297 && nPart != nGroups && nPart != 1
298 && ((nGroups-nPart) % 2 == 1)
299 ) {
300 aFreq -= s->mtfFreq[ge];
301 ge--;
302 }
303
304 for (v = 0; v < alphaSize; v++)
305 if (v >= gs && v <= ge)
306 s->len[nPart-1][v] = BZ_LESSER_ICOST;
307 else
308 s->len[nPart-1][v] = BZ_GREATER_ICOST;
309
310 nPart--;
311 gs = ge+1;
312 remF -= aFreq;
313 }
314 }
315
316 /*
317 * Iterate up to BZ_N_ITERS times to improve the tables.
318 */
319 for (iter = 0; iter < BZ_N_ITERS; iter++) {
320 for (t = 0; t < nGroups; t++)
321 fave[t] = 0;
322
323 for (t = 0; t < nGroups; t++)
324 for (v = 0; v < alphaSize; v++)
325 s->rfreq[t][v] = 0;
326
327#ifdef FAST_GROUP6
328 /*
329 * Set up an auxiliary length table which is used to fast-track
330 * the common case (nGroups == 6).
331 */
332 if (nGroups == 6) {
333 for (v = 0; v < alphaSize; v++) {
334 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
335 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
336 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
337 }
338 }
339#endif
340
341 nSelectors = 0;
342 totc = 0;
343 gs = 0;
344 while (1) {
345 /*--- Set group start & end marks. --*/
346 if (gs >= s->nMTF)
347 break;
348 ge = gs + BZ_G_SIZE - 1;
349 if (ge >= s->nMTF)
350 ge = s->nMTF-1;
351
352 /*
353 * Calculate the cost of this group as coded
354 * by each of the coding tables.
355 */
356 for (t = 0; t < nGroups; t++)
357 cost[t] = 0;
358#ifdef FAST_GROUP6
359 if (nGroups == 6 && 50 == ge-gs+1) {
360 /*--- fast track the common case ---*/
361 register uint32_t cost01, cost23, cost45;
362 register uint16_t icv;
363 cost01 = cost23 = cost45 = 0;
364#define BZ_ITER(nn) \
365 icv = mtfv[gs+(nn)]; \
366 cost01 += s->len_pack[icv][0]; \
367 cost23 += s->len_pack[icv][1]; \
368 cost45 += s->len_pack[icv][2];
369 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
370 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
371 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
372 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
373 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
374 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
375 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
376 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
377 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
378 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
379#undef BZ_ITER
380 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
381 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
382 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
383
384 } else
385#endif
386 {
387 /*--- slow version which correctly handles all situations ---*/
388 for (i = gs; i <= ge; i++) {
389 uint16_t icv = mtfv[i];
390 for (t = 0; t < nGroups; t++)
391 cost[t] += s->len[t][icv];
392 }
393 }
394 /*
395 * Find the coding table which is best for this group,
396 * and record its identity in the selector table.
397 */
398 bc = 999999999;
399 bt = -1;
400 //bc = cost[0];
401 //bt = 0;
402 for (t = 0; t < nGroups; t++) {
403 if (cost[t] < bc) {
404 bc = cost[t];
405 bt = t;
406 }
407 }
408 totc += bc;
409 fave[bt]++;
410 s->selector[nSelectors] = bt;
411 nSelectors++;
412
413 /*
414 * Increment the symbol frequencies for the selected table.
415 */
416/* ~0.5% faster compress. +800 bytes */
417#if 0
418 if (nGroups == 6 && 50 == ge-gs+1) {
419 /*--- fast track the common case ---*/
420#define BZ_ITUR(nn) s->rfreq[bt][mtfv[gs + (nn)]]++
421 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
422 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
423 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
424 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
425 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
426 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
427 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
428 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
429 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
430 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
431#undef BZ_ITUR
432 gs = ge+1;
433 } else
434#endif
435 {
436 /*--- slow version which correctly handles all situations ---*/
437 while (gs <= ge) {
438 s->rfreq[bt][mtfv[gs]]++;
439 gs++;
440 }
441 /* already is: gs = ge+1; */
442 }
443 }
444
445 /*
446 * Recompute the tables based on the accumulated frequencies.
447 */
448 /* maxLen was changed from 20 to 17 in bzip2-1.0.3. See
449 * comment in huffman.c for details. */
450 for (t = 0; t < nGroups; t++)
451 BZ2_hbMakeCodeLengths(&(s->len[t][0]), &(s->rfreq[t][0]), alphaSize, 17 /*20*/);
452 }
453
454 AssertH(nGroups < 8, 3002);
455 AssertH(nSelectors < 32768 && nSelectors <= (2 + (900000 / BZ_G_SIZE)), 3003);
456
457 /*--- Compute MTF values for the selectors. ---*/
458 {
459 UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
460
461 for (i = 0; i < nGroups; i++)
462 pos[i] = i;
463 for (i = 0; i < nSelectors; i++) {
464 ll_i = s->selector[i];
465 j = 0;
466 tmp = pos[j];
467 while (ll_i != tmp) {
468 j++;
469 tmp2 = tmp;
470 tmp = pos[j];
471 pos[j] = tmp2;
472 };
473 pos[0] = tmp;
474 s->selectorMtf[i] = j;
475 }
476 };
477
478 /*--- Assign actual codes for the tables. --*/
479 for (t = 0; t < nGroups; t++) {
480 minLen = 32;
481 maxLen = 0;
482 for (i = 0; i < alphaSize; i++) {
483 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
484 if (s->len[t][i] < minLen) minLen = s->len[t][i];
485 }
486 AssertH(!(maxLen > 17 /*20*/), 3004);
487 AssertH(!(minLen < 1), 3005);
488 BZ2_hbAssignCodes(&(s->code[t][0]), &(s->len[t][0]), minLen, maxLen, alphaSize);
489 }
490
491 /*--- Transmit the mapping table. ---*/
492 {
493 Bool inUse16[16];
494 for (i = 0; i < 16; i++) {
495 inUse16[i] = False;
496 for (j = 0; j < 16; j++)
497 if (s->inUse[i * 16 + j])
498 inUse16[i] = True;
499 }
500
501 nBytes = s->numZ;
502 for (i = 0; i < 16; i++) {
503 if (inUse16[i])
504 bsW(s, 1, 1);
505 else
506 bsW(s, 1, 0);
507 }
508
509 for (i = 0; i < 16; i++) {
510 if (inUse16[i]) {
511 for (j = 0; j < 16; j++) {
512 if (s->inUse[i * 16 + j])
513 bsW(s, 1, 1);
514 else
515 bsW(s, 1, 0);
516 }
517 }
518 }
519 }
520
521 /*--- Now the selectors. ---*/
522 nBytes = s->numZ;
523 bsW(s, 3, nGroups);
524 bsW(s, 15, nSelectors);
525 for (i = 0; i < nSelectors; i++) {
526 for (j = 0; j < s->selectorMtf[i]; j++)
527 bsW(s, 1, 1);
528 bsW(s, 1, 0);
529 }
530
531 /*--- Now the coding tables. ---*/
532 nBytes = s->numZ;
533
534 for (t = 0; t < nGroups; t++) {
535 int32_t curr = s->len[t][0];
536 bsW(s, 5, curr);
537 for (i = 0; i < alphaSize; i++) {
538 while (curr < s->len[t][i]) { bsW(s, 2, 2); curr++; /* 10 */ };
539 while (curr > s->len[t][i]) { bsW(s, 2, 3); curr--; /* 11 */ };
540 bsW(s, 1, 0);
541 }
542 }
543
544 /*--- And finally, the block data proper ---*/
545 nBytes = s->numZ;
546 selCtr = 0;
547 gs = 0;
548 while (1) {
549 if (gs >= s->nMTF)
550 break;
551 ge = gs + BZ_G_SIZE - 1;
552 if (ge >= s->nMTF)
553 ge = s->nMTF-1;
554 AssertH(s->selector[selCtr] < nGroups, 3006);
555
556/* Costs 1300 bytes and is _slower_ (on Intel Core 2) */
557#if 0
558 if (nGroups == 6 && 50 == ge-gs+1) {
559 /*--- fast track the common case ---*/
560 uint16_t mtfv_i;
561 UChar* s_len_sel_selCtr = &(s->len[s->selector[selCtr]][0]);
562 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
563#define BZ_ITAH(nn) \
564 mtfv_i = mtfv[gs+(nn)]; \
565 bsW(s, s_len_sel_selCtr[mtfv_i], s_code_sel_selCtr[mtfv_i])
566 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
567 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
568 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
569 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
570 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
571 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
572 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
573 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
574 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
575 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
576#undef BZ_ITAH
577 gs = ge+1;
578 } else
579#endif
580 {
581 /*--- slow version which correctly handles all situations ---*/
582 /* code is bit bigger, but moves multiply out of the loop */
583 UChar* s_len_sel_selCtr = &(s->len [s->selector[selCtr]][0]);
584 int32_t* s_code_sel_selCtr = &(s->code[s->selector[selCtr]][0]);
585 while (gs <= ge) {
586 bsW(s,
587 s_len_sel_selCtr[mtfv[gs]],
588 s_code_sel_selCtr[mtfv[gs]]
589 );
590 gs++;
591 }
592 /* already is: gs = ge+1; */
593 }
594 selCtr++;
595 }
596 AssertH(selCtr == nSelectors, 3007);
597}
598
599
600/*---------------------------------------------------*/
601static
602void BZ2_compressBlock(EState* s, Bool is_last_block)
603{
604 if (s->nblock > 0) {
605 BZ_FINALISE_CRC(s->blockCRC);
606 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
607 s->combinedCRC ^= s->blockCRC;
608 if (s->blockNo > 1)
609 s->numZ = 0;
610
611 BZ2_blockSort(s);
612 }
613
614 s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
615
616 /*-- If this is the first block, create the stream header. --*/
617 if (s->blockNo == 1) {
618 BZ2_bsInitWrite(s);
619 /*bsPutUChar(s, BZ_HDR_B);*/
620 /*bsPutUChar(s, BZ_HDR_Z);*/
621 /*bsPutUChar(s, BZ_HDR_h);*/
622 /*bsPutUChar(s, (UChar)(BZ_HDR_0 + s->blockSize100k));*/
623 bsPutU32(s, BZ_HDR_BZh0 + s->blockSize100k);
624 }
625
626 if (s->nblock > 0) {
627 /*bsPutUChar(s, 0x31);*/
628 /*bsPutUChar(s, 0x41);*/
629 /*bsPutUChar(s, 0x59);*/
630 /*bsPutUChar(s, 0x26);*/
631 bsPutU32(s, 0x31415926);
632 bsPutUChar(s, 0x53);
633 bsPutUChar(s, 0x59);
634
635 /*-- Now the block's CRC, so it is in a known place. --*/
636 bsPutU32(s, s->blockCRC);
637
638 /*
639 * Now a single bit indicating (non-)randomisation.
640 * As of version 0.9.5, we use a better sorting algorithm
641 * which makes randomisation unnecessary. So always set
642 * the randomised bit to 'no'. Of course, the decoder
643 * still needs to be able to handle randomised blocks
644 * so as to maintain backwards compatibility with
645 * older versions of bzip2.
646 */
647 bsW(s, 1, 0);
648
649 bsW(s, 24, s->origPtr);
650 generateMTFValues(s);
651 sendMTFValues(s);
652 }
653
654 /*-- If this is the last block, add the stream trailer. --*/
655 if (is_last_block) {
656 /*bsPutUChar(s, 0x17);*/
657 /*bsPutUChar(s, 0x72);*/
658 /*bsPutUChar(s, 0x45);*/
659 /*bsPutUChar(s, 0x38);*/
660 bsPutU32(s, 0x17724538);
661 bsPutUChar(s, 0x50);
662 bsPutUChar(s, 0x90);
663 bsPutU32(s, s->combinedCRC);
664 bsFinishWrite(s);
665 }
666}
667
668
669/*-------------------------------------------------------------*/
670/*--- end compress.c ---*/
671/*-------------------------------------------------------------*/