diff options
author | Denys Vlasenko <vda.linux@googlemail.com> | 2016-02-10 06:55:07 +0100 |
---|---|---|
committer | Denys Vlasenko <vda.linux@googlemail.com> | 2016-02-10 06:55:07 +0100 |
commit | fc47fcefb6401605b142c30025c597dc4d110087 (patch) | |
tree | eb3d686493f2c07e22260b0f539992e7f17bb7fc | |
parent | f2c043acfcf9dad9fd3d65821b81f89986bbe54e (diff) | |
download | busybox-w32-fc47fcefb6401605b142c30025c597dc4d110087.tar.gz busybox-w32-fc47fcefb6401605b142c30025c597dc4d110087.tar.bz2 busybox-w32-fc47fcefb6401605b142c30025c597dc4d110087.zip |
ntpd: step when |offset| > 1 sec, not 0.125 sec
update_local_clock 769 820 +51
recv_and_process_peer_pkt 838 862 +24
reset_peer_stats 137 133 -4
------------------------------------------------------------------------------
(add/remove: 0/0 grow/shrink: 2/1 up/down: 75/-4) Total: 71 bytes
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
-rw-r--r-- | networking/ntpd.c | 54 |
1 files changed, 36 insertions, 18 deletions
diff --git a/networking/ntpd.c b/networking/ntpd.c index 9732c9b1a..32590a185 100644 --- a/networking/ntpd.c +++ b/networking/ntpd.c | |||
@@ -112,7 +112,7 @@ | |||
112 | * | 112 | * |
113 | * Made some changes to speed up re-syncing after our clock goes bad | 113 | * Made some changes to speed up re-syncing after our clock goes bad |
114 | * (tested with suspending my laptop): | 114 | * (tested with suspending my laptop): |
115 | * - if largish offset (>= STEP_THRESHOLD * 8 == 1 sec) is seen | 115 | * - if largish offset (>= STEP_THRESHOLD == 1 sec) is seen |
116 | * from a peer, schedule next query for this peer soon | 116 | * from a peer, schedule next query for this peer soon |
117 | * without drastically lowering poll interval for everybody. | 117 | * without drastically lowering poll interval for everybody. |
118 | * This makes us collect enough data for step much faster: | 118 | * This makes us collect enough data for step much faster: |
@@ -131,11 +131,14 @@ | |||
131 | #define RESPONSE_INTERVAL 16 /* wait for reply up to N secs */ | 131 | #define RESPONSE_INTERVAL 16 /* wait for reply up to N secs */ |
132 | 132 | ||
133 | /* Step threshold (sec). std ntpd uses 0.128. | 133 | /* Step threshold (sec). std ntpd uses 0.128. |
134 | */ | ||
135 | #define STEP_THRESHOLD 1 | ||
136 | /* Slew threshold (sec): adjtimex() won't accept offsets larger than this. | ||
134 | * Using exact power of 2 (1/8) results in smaller code | 137 | * Using exact power of 2 (1/8) results in smaller code |
135 | */ | 138 | */ |
136 | #define STEP_THRESHOLD 0.125 | 139 | #define SLEW_THRESHOLD 0.125 |
137 | /* Stepout threshold (sec). std ntpd uses 900 (11 mins (!)) */ | 140 | /* Stepout threshold (sec). std ntpd uses 900 (11 mins (!)) */ |
138 | #define WATCH_THRESHOLD 128 | 141 | #define WATCH_THRESHOLD 128 |
139 | /* NB: set WATCH_THRESHOLD to ~60 when debugging to save time) */ | 142 | /* NB: set WATCH_THRESHOLD to ~60 when debugging to save time) */ |
140 | //UNUSED: #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */ | 143 | //UNUSED: #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */ |
141 | 144 | ||
@@ -143,7 +146,7 @@ | |||
143 | * If we got |offset| > BIGOFF from a peer, cap next query interval | 146 | * If we got |offset| > BIGOFF from a peer, cap next query interval |
144 | * for this peer by this many seconds: | 147 | * for this peer by this many seconds: |
145 | */ | 148 | */ |
146 | #define BIGOFF (STEP_THRESHOLD * 8) | 149 | #define BIGOFF STEP_THRESHOLD |
147 | #define BIGOFF_INTERVAL (1 << 7) /* 128 s */ | 150 | #define BIGOFF_INTERVAL (1 << 7) /* 128 s */ |
148 | 151 | ||
149 | #define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */ | 152 | #define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */ |
@@ -157,10 +160,10 @@ | |||
157 | #define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */ | 160 | #define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */ |
158 | /* | 161 | /* |
159 | * Actively lower poll when we see such big offsets. | 162 | * Actively lower poll when we see such big offsets. |
160 | * With STEP_THRESHOLD = 0.125, it means we try to sync more aggressively | 163 | * With SLEW_THRESHOLD = 0.125, it means we try to sync more aggressively |
161 | * if offset increases over ~0.04 sec | 164 | * if offset increases over ~0.04 sec |
162 | */ | 165 | */ |
163 | //#define POLLDOWN_OFFSET (STEP_THRESHOLD / 3) | 166 | //#define POLLDOWN_OFFSET (SLEW_THRESHOLD / 3) |
164 | #define MINDISP 0.01 /* minimum dispersion (sec) */ | 167 | #define MINDISP 0.01 /* minimum dispersion (sec) */ |
165 | #define MAXDISP 16 /* maximum dispersion (sec) */ | 168 | #define MAXDISP 16 /* maximum dispersion (sec) */ |
166 | #define MAXSTRAT 16 /* maximum stratum (infinity metric) */ | 169 | #define MAXSTRAT 16 /* maximum stratum (infinity metric) */ |
@@ -720,7 +723,7 @@ static void | |||
720 | reset_peer_stats(peer_t *p, double offset) | 723 | reset_peer_stats(peer_t *p, double offset) |
721 | { | 724 | { |
722 | int i; | 725 | int i; |
723 | bool small_ofs = fabs(offset) < 16 * STEP_THRESHOLD; | 726 | bool small_ofs = fabs(offset) < STEP_THRESHOLD; |
724 | 727 | ||
725 | /* Used to set p->filter_datapoint[i].d_dispersion = MAXDISP | 728 | /* Used to set p->filter_datapoint[i].d_dispersion = MAXDISP |
726 | * and clear reachable bits, but this proved to be too agressive: | 729 | * and clear reachable bits, but this proved to be too agressive: |
@@ -771,7 +774,7 @@ add_peers(const char *s) | |||
771 | p->p_fd = -1; | 774 | p->p_fd = -1; |
772 | p->p_xmt_msg.m_status = MODE_CLIENT | (NTP_VERSION << 3); | 775 | p->p_xmt_msg.m_status = MODE_CLIENT | (NTP_VERSION << 3); |
773 | p->next_action_time = G.cur_time; /* = set_next(p, 0); */ | 776 | p->next_action_time = G.cur_time; /* = set_next(p, 0); */ |
774 | reset_peer_stats(p, 16 * STEP_THRESHOLD); | 777 | reset_peer_stats(p, STEP_THRESHOLD); |
775 | 778 | ||
776 | llist_add_to(&G.ntp_peers, p); | 779 | llist_add_to(&G.ntp_peers, p); |
777 | G.peer_cnt++; | 780 | G.peer_cnt++; |
@@ -1638,14 +1641,7 @@ update_local_clock(peer_t *p) | |||
1638 | tmx.freq = G.discipline_freq_drift * 65536e6; | 1641 | tmx.freq = G.discipline_freq_drift * 65536e6; |
1639 | #endif | 1642 | #endif |
1640 | tmx.modes = ADJ_OFFSET | ADJ_STATUS | ADJ_TIMECONST;// | ADJ_MAXERROR | ADJ_ESTERROR; | 1643 | tmx.modes = ADJ_OFFSET | ADJ_STATUS | ADJ_TIMECONST;// | ADJ_MAXERROR | ADJ_ESTERROR; |
1641 | tmx.offset = (offset * 1000000); /* usec */ | 1644 | tmx.constant = (int)G.poll_exp - 4; |
1642 | tmx.status = STA_PLL; | ||
1643 | if (G.ntp_status & LI_PLUSSEC) | ||
1644 | tmx.status |= STA_INS; | ||
1645 | if (G.ntp_status & LI_MINUSSEC) | ||
1646 | tmx.status |= STA_DEL; | ||
1647 | |||
1648 | tmx.constant = (int)G.poll_exp - 4 > 0 ? (int)G.poll_exp - 4 : 0; | ||
1649 | /* EXPERIMENTAL. | 1645 | /* EXPERIMENTAL. |
1650 | * The below if statement should be unnecessary, but... | 1646 | * The below if statement should be unnecessary, but... |
1651 | * It looks like Linux kernel's PLL is far too gentle in changing | 1647 | * It looks like Linux kernel's PLL is far too gentle in changing |
@@ -1656,8 +1652,27 @@ update_local_clock(peer_t *p) | |||
1656 | * To be on a safe side, let's do it only if offset is significantly | 1652 | * To be on a safe side, let's do it only if offset is significantly |
1657 | * larger than jitter. | 1653 | * larger than jitter. |
1658 | */ | 1654 | */ |
1659 | if (tmx.constant > 0 && G.offset_to_jitter_ratio >= TIMECONST_HACK_GATE) | 1655 | if (G.offset_to_jitter_ratio >= TIMECONST_HACK_GATE) |
1660 | tmx.constant--; | 1656 | tmx.constant--; |
1657 | tmx.offset = (long)(offset * 1000000); /* usec */ | ||
1658 | if (SLEW_THRESHOLD < STEP_THRESHOLD) { | ||
1659 | if (tmx.offset > (long)(SLEW_THRESHOLD * 1000000)) { | ||
1660 | tmx.offset = (long)(SLEW_THRESHOLD * 1000000); | ||
1661 | tmx.constant--; | ||
1662 | } | ||
1663 | if (tmx.offset < -(long)(SLEW_THRESHOLD * 1000000)) { | ||
1664 | tmx.offset = -(long)(SLEW_THRESHOLD * 1000000); | ||
1665 | tmx.constant--; | ||
1666 | } | ||
1667 | } | ||
1668 | if (tmx.constant < 0) | ||
1669 | tmx.constant = 0; | ||
1670 | |||
1671 | tmx.status = STA_PLL; | ||
1672 | if (G.ntp_status & LI_PLUSSEC) | ||
1673 | tmx.status |= STA_INS; | ||
1674 | if (G.ntp_status & LI_MINUSSEC) | ||
1675 | tmx.status |= STA_DEL; | ||
1661 | 1676 | ||
1662 | //tmx.esterror = (uint32_t)(clock_jitter * 1e6); | 1677 | //tmx.esterror = (uint32_t)(clock_jitter * 1e6); |
1663 | //tmx.maxerror = (uint32_t)((sys_rootdelay / 2 + sys_rootdisp) * 1e6); | 1678 | //tmx.maxerror = (uint32_t)((sys_rootdelay / 2 + sys_rootdisp) * 1e6); |
@@ -1931,6 +1946,9 @@ recv_and_process_peer_pkt(peer_t *p) | |||
1931 | increase_interval: | 1946 | increase_interval: |
1932 | adjust_poll(MINPOLL); | 1947 | adjust_poll(MINPOLL); |
1933 | } else { | 1948 | } else { |
1949 | VERB3 if (rc > 0) | ||
1950 | bb_error_msg("want smaller poll interval: offset/jitter ratio > %u", | ||
1951 | POLLADJ_GATE); | ||
1934 | adjust_poll(-G.poll_exp * 2); | 1952 | adjust_poll(-G.poll_exp * 2); |
1935 | } | 1953 | } |
1936 | } | 1954 | } |
@@ -1939,7 +1957,7 @@ recv_and_process_peer_pkt(peer_t *p) | |||
1939 | pick_normal_interval: | 1957 | pick_normal_interval: |
1940 | interval = poll_interval(INT_MAX); | 1958 | interval = poll_interval(INT_MAX); |
1941 | if (fabs(offset) >= BIGOFF && interval > BIGOFF_INTERVAL) { | 1959 | if (fabs(offset) >= BIGOFF && interval > BIGOFF_INTERVAL) { |
1942 | /* If we are synced, offsets are less than STEP_THRESHOLD, | 1960 | /* If we are synced, offsets are less than SLEW_THRESHOLD, |
1943 | * or at the very least not much larger than it. | 1961 | * or at the very least not much larger than it. |
1944 | * Now we see a largish one. | 1962 | * Now we see a largish one. |
1945 | * Either this peer is feeling bad, or packet got corrupted, | 1963 | * Either this peer is feeling bad, or packet got corrupted, |