diff options
author | Rob Landley <rob@landley.net> | 2006-03-09 17:51:25 +0000 |
---|---|---|
committer | Rob Landley <rob@landley.net> | 2006-03-09 17:51:25 +0000 |
commit | 49a5599ae95580b05535cb16467ffb0c2a20b66e (patch) | |
tree | 0bbbab7a9b19f6b5c57a4045ed8f79e0c7822d47 | |
parent | 478a188e851ed6fbcd58189929ba4cfc30170aed (diff) | |
download | busybox-w32-49a5599ae95580b05535cb16467ffb0c2a20b66e.tar.gz busybox-w32-49a5599ae95580b05535cb16467ffb0c2a20b66e.tar.bz2 busybox-w32-49a5599ae95580b05535cb16467ffb0c2a20b66e.zip |
Patch from Denis Vlasenko to make unlzma smaller.
-rw-r--r-- | archival/libunarchive/rangecoder.h | 38 |
1 files changed, 29 insertions, 9 deletions
diff --git a/archival/libunarchive/rangecoder.h b/archival/libunarchive/rangecoder.h index bb4159112..43f752257 100644 --- a/archival/libunarchive/rangecoder.h +++ b/archival/libunarchive/rangecoder.h | |||
@@ -56,7 +56,8 @@ typedef struct { | |||
56 | #define RC_MODEL_TOTAL_BITS 11 | 56 | #define RC_MODEL_TOTAL_BITS 11 |
57 | 57 | ||
58 | 58 | ||
59 | static speed_inline void rc_read(rc_t * rc) | 59 | /* Called twice: once at startup and once in rc_normalize() */ |
60 | static void rc_read(rc_t * rc) | ||
60 | { | 61 | { |
61 | rc->buffer_size = read(rc->fd, rc->buffer, rc->buffer_size); | 62 | rc->buffer_size = read(rc->fd, rc->buffer, rc->buffer_size); |
62 | if (rc->buffer_size <= 0) | 63 | if (rc->buffer_size <= 0) |
@@ -65,6 +66,7 @@ static speed_inline void rc_read(rc_t * rc) | |||
65 | rc->buffer_end = rc->buffer + rc->buffer_size; | 66 | rc->buffer_end = rc->buffer + rc->buffer_size; |
66 | } | 67 | } |
67 | 68 | ||
69 | /* Called once */ | ||
68 | static always_inline void rc_init(rc_t * rc, int fd, int buffer_size) | 70 | static always_inline void rc_init(rc_t * rc, int fd, int buffer_size) |
69 | { | 71 | { |
70 | int i; | 72 | int i; |
@@ -84,35 +86,50 @@ static always_inline void rc_init(rc_t * rc, int fd, int buffer_size) | |||
84 | } | 86 | } |
85 | } | 87 | } |
86 | 88 | ||
89 | /* Called once. TODO: bb_maybe_free() */ | ||
87 | static always_inline void rc_free(rc_t * rc) | 90 | static always_inline void rc_free(rc_t * rc) |
88 | { | 91 | { |
89 | if (ENABLE_FEATURE_CLEAN_UP) | 92 | if (ENABLE_FEATURE_CLEAN_UP) |
90 | free(rc->buffer); | 93 | free(rc->buffer); |
91 | } | 94 | } |
92 | 95 | ||
96 | /* Called twice, but one callsite is in speed_inline'd rc_is_bit_0_helper() */ | ||
97 | static void rc_do_normalize(rc_t * rc) | ||
98 | { | ||
99 | if (rc->ptr >= rc->buffer_end) | ||
100 | rc_read(rc); | ||
101 | rc->range <<= 8; | ||
102 | rc->code = (rc->code << 8) | *rc->ptr++; | ||
103 | } | ||
93 | static always_inline void rc_normalize(rc_t * rc) | 104 | static always_inline void rc_normalize(rc_t * rc) |
94 | { | 105 | { |
95 | if (rc->range < (1 << RC_TOP_BITS)) { | 106 | if (rc->range < (1 << RC_TOP_BITS)) { |
96 | if (rc->ptr >= rc->buffer_end) | 107 | rc_do_normalize(rc); |
97 | rc_read(rc); | ||
98 | rc->range <<= 8; | ||
99 | rc->code = (rc->code << 8) | *rc->ptr++; | ||
100 | } | 108 | } |
101 | } | 109 | } |
102 | 110 | ||
103 | static speed_inline int rc_is_bit_0(rc_t * rc, uint16_t * p) | 111 | /* Called 9 times */ |
112 | /* Why rc_is_bit_0_helper exists? | ||
113 | * Because we want to always expose (rc->code < rc->bound) to optimizer | ||
114 | */ | ||
115 | static speed_inline uint32_t rc_is_bit_0_helper(rc_t * rc, uint16_t * p) | ||
104 | { | 116 | { |
105 | rc_normalize(rc); | 117 | rc_normalize(rc); |
106 | rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); | 118 | rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); |
107 | return rc->code < rc->bound; | 119 | return rc->bound; |
120 | } | ||
121 | static always_inline int rc_is_bit_0(rc_t * rc, uint16_t * p) | ||
122 | { | ||
123 | uint32_t t = rc_is_bit_0_helper(rc, p); | ||
124 | return rc->code < t; | ||
108 | } | 125 | } |
109 | 126 | ||
127 | /* Called ~10 times, but very small, thus inlined */ | ||
110 | static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p) | 128 | static speed_inline void rc_update_bit_0(rc_t * rc, uint16_t * p) |
111 | { | 129 | { |
112 | rc->range = rc->bound; | 130 | rc->range = rc->bound; |
113 | *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; | 131 | *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; |
114 | } | 132 | } |
115 | |||
116 | static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p) | 133 | static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p) |
117 | { | 134 | { |
118 | rc->range -= rc->bound; | 135 | rc->range -= rc->bound; |
@@ -120,7 +137,8 @@ static speed_inline void rc_update_bit_1(rc_t * rc, uint16_t * p) | |||
120 | *p -= *p >> RC_MOVE_BITS; | 137 | *p -= *p >> RC_MOVE_BITS; |
121 | } | 138 | } |
122 | 139 | ||
123 | static speed_inline int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol) | 140 | /* Called 4 times in unlzma loop */ |
141 | static int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol) | ||
124 | { | 142 | { |
125 | if (rc_is_bit_0(rc, p)) { | 143 | if (rc_is_bit_0(rc, p)) { |
126 | rc_update_bit_0(rc, p); | 144 | rc_update_bit_0(rc, p); |
@@ -133,6 +151,7 @@ static speed_inline int rc_get_bit(rc_t * rc, uint16_t * p, int *symbol) | |||
133 | } | 151 | } |
134 | } | 152 | } |
135 | 153 | ||
154 | /* Called once */ | ||
136 | static always_inline int rc_direct_bit(rc_t * rc) | 155 | static always_inline int rc_direct_bit(rc_t * rc) |
137 | { | 156 | { |
138 | rc_normalize(rc); | 157 | rc_normalize(rc); |
@@ -144,6 +163,7 @@ static always_inline int rc_direct_bit(rc_t * rc) | |||
144 | return 0; | 163 | return 0; |
145 | } | 164 | } |
146 | 165 | ||
166 | /* Called twice */ | ||
147 | static speed_inline void | 167 | static speed_inline void |
148 | rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol) | 168 | rc_bit_tree_decode(rc_t * rc, uint16_t * p, int num_levels, int *symbol) |
149 | { | 169 | { |