diff options
author | Denys Vlasenko <vda.linux@googlemail.com> | 2009-12-02 01:51:24 +0100 |
---|---|---|
committer | Denys Vlasenko <vda.linux@googlemail.com> | 2009-12-02 01:51:24 +0100 |
commit | 386960a38e5aee4f56a04214f86709c71dff120d (patch) | |
tree | 416cf9f19ee69749fbeb33931bc464edafe74321 /networking/ntpd.c | |
parent | fae9f499b2ab3becb8a672d982c50f318114dee9 (diff) | |
download | busybox-w32-386960a38e5aee4f56a04214f86709c71dff120d.tar.gz busybox-w32-386960a38e5aee4f56a04214f86709c71dff120d.tar.bz2 busybox-w32-386960a38e5aee4f56a04214f86709c71dff120d.zip |
htpd: better logging; removed some unused code and data. -212 bytes
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
Diffstat (limited to 'networking/ntpd.c')
-rw-r--r-- | networking/ntpd.c | 443 |
1 files changed, 222 insertions, 221 deletions
diff --git a/networking/ntpd.c b/networking/ntpd.c index f10a81cee..3b9e7b7ff 100644 --- a/networking/ntpd.c +++ b/networking/ntpd.c | |||
@@ -67,27 +67,21 @@ enum { | |||
67 | }; | 67 | }; |
68 | 68 | ||
69 | typedef struct { | 69 | typedef struct { |
70 | uint8_t status; /* status of local clock and leap info */ | 70 | uint8_t m_status; /* status of local clock and leap info */ |
71 | uint8_t stratum; /* stratum level */ | 71 | uint8_t m_stratum; /* stratum level */ |
72 | uint8_t ppoll; /* poll value */ | 72 | uint8_t m_ppoll; /* poll value */ |
73 | int8_t precision; | 73 | int8_t m_precision; |
74 | s_fixedpt_t rootdelay; | 74 | s_fixedpt_t m_rootdelay; |
75 | s_fixedpt_t dispersion; | 75 | s_fixedpt_t m_dispersion; |
76 | uint32_t refid; | 76 | uint32_t m_refid; |
77 | l_fixedpt_t reftime; | 77 | l_fixedpt_t m_reftime; |
78 | l_fixedpt_t orgtime; | 78 | l_fixedpt_t m_orgtime; |
79 | l_fixedpt_t rectime; | 79 | l_fixedpt_t m_rectime; |
80 | l_fixedpt_t xmttime; | 80 | l_fixedpt_t m_xmttime; |
81 | uint32_t keyid; | 81 | uint32_t m_keyid; |
82 | uint8_t digest[NTP_DIGESTSIZE]; | 82 | uint8_t m_digest[NTP_DIGESTSIZE]; |
83 | } ntp_msg_t; | 83 | } ntp_msg_t; |
84 | 84 | ||
85 | typedef struct { | ||
86 | int fd; | ||
87 | ntp_msg_t msg; | ||
88 | double xmttime; | ||
89 | } ntp_query_t; | ||
90 | |||
91 | enum { | 85 | enum { |
92 | NTP_VERSION = 4, | 86 | NTP_VERSION = 4, |
93 | NTP_MAXSTRATUM = 15, | 87 | NTP_MAXSTRATUM = 15, |
@@ -116,48 +110,38 @@ enum { | |||
116 | 110 | ||
117 | #define OFFSET_1900_1970 2208988800UL /* 1970 - 1900 in seconds */ | 111 | #define OFFSET_1900_1970 2208988800UL /* 1970 - 1900 in seconds */ |
118 | 112 | ||
119 | enum client_state { | ||
120 | STATE_NONE, | ||
121 | STATE_QUERY_SENT, | ||
122 | STATE_REPLY_RECEIVED, | ||
123 | }; | ||
124 | |||
125 | typedef struct { | 113 | typedef struct { |
126 | double rootdelay; | 114 | double o_offset; |
127 | double rootdispersion; | 115 | double o_delay; |
128 | double reftime; | 116 | //UNUSED: double o_error; |
129 | uint32_t refid; | 117 | time_t o_rcvd; |
130 | uint32_t refid4; | 118 | uint32_t o_refid4; |
131 | uint8_t synced; | 119 | uint8_t o_leap; |
132 | uint8_t leap; | 120 | uint8_t o_stratum; |
133 | int8_t precision; | 121 | uint8_t o_good; |
134 | uint8_t poll; | ||
135 | uint8_t stratum; | ||
136 | } ntp_status_t; | ||
137 | |||
138 | typedef struct { | ||
139 | ntp_status_t status; | ||
140 | double offset; | ||
141 | double delay; | ||
142 | double error; | ||
143 | time_t rcvd; | ||
144 | uint8_t good; | ||
145 | } ntp_offset_t; | 122 | } ntp_offset_t; |
146 | 123 | ||
147 | typedef struct { | 124 | typedef struct { |
148 | //TODO: | 125 | //TODO: periodically re-resolve DNS names? |
149 | // (1) store dotted addr str, to avoid constant translations | ||
150 | // (2) periodically re-resolve DNS names | ||
151 | len_and_sockaddr *lsa; | 126 | len_and_sockaddr *lsa; |
152 | ntp_query_t query; | 127 | char *dotted; |
153 | ntp_offset_t reply[OFFSET_ARRAY_SIZE]; | 128 | double xmttime; |
154 | ntp_offset_t update; | ||
155 | enum client_state state; | ||
156 | time_t next; | 129 | time_t next; |
157 | time_t deadline; | 130 | time_t deadline; |
131 | int fd; | ||
132 | uint8_t state; | ||
158 | uint8_t shift; | 133 | uint8_t shift; |
159 | uint8_t trustlevel; | 134 | uint8_t trustlevel; |
135 | ntp_msg_t msg; | ||
136 | ntp_offset_t update; | ||
137 | ntp_offset_t reply[OFFSET_ARRAY_SIZE]; | ||
160 | } ntp_peer_t; | 138 | } ntp_peer_t; |
139 | /* for ntp_peer_t::state */ | ||
140 | enum { | ||
141 | STATE_NONE, | ||
142 | STATE_QUERY_SENT, | ||
143 | STATE_REPLY_RECEIVED, | ||
144 | }; | ||
161 | 145 | ||
162 | enum { | 146 | enum { |
163 | OPT_n = (1 << 0), | 147 | OPT_n = (1 << 0), |
@@ -172,14 +156,21 @@ enum { | |||
172 | 156 | ||
173 | 157 | ||
174 | struct globals { | 158 | struct globals { |
175 | unsigned verbose; | 159 | double rootdelay; |
160 | double reftime; | ||
161 | llist_t *ntp_peers; | ||
176 | #if ENABLE_FEATURE_NTPD_SERVER | 162 | #if ENABLE_FEATURE_NTPD_SERVER |
177 | int listen_fd; | 163 | int listen_fd; |
178 | #endif | 164 | #endif |
165 | unsigned verbose; | ||
179 | unsigned peer_cnt; | 166 | unsigned peer_cnt; |
180 | llist_t *ntp_peers; | 167 | uint32_t refid; |
181 | ntp_status_t status; | 168 | uint32_t refid4; |
182 | uint32_t scale; | 169 | uint32_t scale; |
170 | uint8_t synced; | ||
171 | uint8_t leap; | ||
172 | int8_t precision; | ||
173 | uint8_t stratum; | ||
183 | uint8_t time_is_set; | 174 | uint8_t time_is_set; |
184 | uint8_t first_adj_done; | 175 | uint8_t first_adj_done; |
185 | }; | 176 | }; |
@@ -190,7 +181,7 @@ static const int const_IPTOS_LOWDELAY = IPTOS_LOWDELAY; | |||
190 | 181 | ||
191 | 182 | ||
192 | static void | 183 | static void |
193 | set_next(ntp_peer_t *p, time_t t) | 184 | set_next(ntp_peer_t *p, unsigned t) |
194 | { | 185 | { |
195 | p->next = time(NULL) + t; | 186 | p->next = time(NULL) + t; |
196 | p->deadline = 0; | 187 | p->deadline = 0; |
@@ -204,12 +195,12 @@ add_peers(const char *s) | |||
204 | p = xzalloc(sizeof(*p)); | 195 | p = xzalloc(sizeof(*p)); |
205 | //TODO: big ntpd uses all IPs, not just 1st, do we need to mimic that? | 196 | //TODO: big ntpd uses all IPs, not just 1st, do we need to mimic that? |
206 | p->lsa = xhost2sockaddr(s, 123); | 197 | p->lsa = xhost2sockaddr(s, 123); |
207 | p->query.fd = -1; | 198 | p->dotted = xmalloc_sockaddr2dotted_noport(&p->lsa->u.sa); |
208 | p->query.msg.status = MODE_CLIENT | (NTP_VERSION << 3); | 199 | p->fd = -1; |
200 | p->msg.m_status = MODE_CLIENT | (NTP_VERSION << 3); | ||
209 | if (STATE_NONE != 0) | 201 | if (STATE_NONE != 0) |
210 | p->state = STATE_NONE; | 202 | p->state = STATE_NONE; |
211 | p->trustlevel = TRUSTLEVEL_PATHETIC; | 203 | p->trustlevel = TRUSTLEVEL_PATHETIC; |
212 | p->query.fd = -1; | ||
213 | set_next(p, 0); | 204 | set_next(p, 0); |
214 | 205 | ||
215 | llist_add_to(&G.ntp_peers, p); | 206 | llist_add_to(&G.ntp_peers, p); |
@@ -241,6 +232,7 @@ lfp_to_d(l_fixedpt_t lfp) | |||
241 | return ret; | 232 | return ret; |
242 | } | 233 | } |
243 | 234 | ||
235 | #if 0 //UNUSED | ||
244 | static double | 236 | static double |
245 | sfp_to_d(s_fixedpt_t sfp) | 237 | sfp_to_d(s_fixedpt_t sfp) |
246 | { | 238 | { |
@@ -250,6 +242,7 @@ sfp_to_d(s_fixedpt_t sfp) | |||
250 | ret = (double)sfp.int_parts + ((double)sfp.fractions / USHRT_MAX); | 242 | ret = (double)sfp.int_parts + ((double)sfp.fractions / USHRT_MAX); |
251 | return ret; | 243 | return ret; |
252 | } | 244 | } |
245 | #endif | ||
253 | 246 | ||
254 | #if ENABLE_FEATURE_NTPD_SERVER | 247 | #if ENABLE_FEATURE_NTPD_SERVER |
255 | static l_fixedpt_t | 248 | static l_fixedpt_t |
@@ -282,17 +275,17 @@ set_deadline(ntp_peer_t *p, time_t t) | |||
282 | p->next = 0; | 275 | p->next = 0; |
283 | } | 276 | } |
284 | 277 | ||
285 | static time_t | 278 | static unsigned |
286 | error_interval(void) | 279 | error_interval(void) |
287 | { | 280 | { |
288 | time_t interval, r; | 281 | unsigned interval, r; |
289 | interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN; | 282 | interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN; |
290 | r = (unsigned)random() % (unsigned long)(interval / 10); | 283 | r = (unsigned)random() % (unsigned)(interval / 10); |
291 | return (interval + r); | 284 | return (interval + r); |
292 | } | 285 | } |
293 | 286 | ||
294 | static int | 287 | static int |
295 | sendmsg_wrap(int fd, | 288 | do_sendto(int fd, |
296 | const struct sockaddr *from, const struct sockaddr *to, socklen_t addrlen, | 289 | const struct sockaddr *from, const struct sockaddr *to, socklen_t addrlen, |
297 | ntp_msg_t *msg, ssize_t len) | 290 | ntp_msg_t *msg, ssize_t len) |
298 | { | 291 | { |
@@ -331,16 +324,16 @@ send_query_to_peer(ntp_peer_t *p) | |||
331 | // Uncomment this and use strace to see it in action: | 324 | // Uncomment this and use strace to see it in action: |
332 | #define PROBE_LOCAL_ADDR // { len_and_sockaddr lsa; lsa.len = LSA_SIZEOF_SA; getsockname(p->query.fd, &lsa.u.sa, &lsa.len); } | 325 | #define PROBE_LOCAL_ADDR // { len_and_sockaddr lsa; lsa.len = LSA_SIZEOF_SA; getsockname(p->query.fd, &lsa.u.sa, &lsa.len); } |
333 | 326 | ||
334 | if (p->query.fd == -1) { | 327 | if (p->fd == -1) { |
335 | int fd, family; | 328 | int fd, family; |
336 | len_and_sockaddr *local_lsa; | 329 | len_and_sockaddr *local_lsa; |
337 | 330 | ||
338 | family = p->lsa->u.sa.sa_family; | 331 | family = p->lsa->u.sa.sa_family; |
339 | //was: p->query.fd = xsocket(family, SOCK_DGRAM, 0); | 332 | //was: p->fd = xsocket(family, SOCK_DGRAM, 0); |
340 | p->query.fd = fd = xsocket_type(&local_lsa, family, SOCK_DGRAM); | 333 | p->fd = fd = xsocket_type(&local_lsa, family, SOCK_DGRAM); |
341 | /* local_lsa has "null" address and port 0 now. | 334 | /* local_lsa has "null" address and port 0 now. |
342 | * bind() ensures we have a *particular port* selected by kernel | 335 | * bind() ensures we have a *particular port* selected by kernel |
343 | * and remembered in p->query.fd, thus later recv(p->query.fd) | 336 | * and remembered in p->fd, thus later recv(p->fd) |
344 | * receives only packets sent to this port. | 337 | * receives only packets sent to this port. |
345 | */ | 338 | */ |
346 | PROBE_LOCAL_ADDR | 339 | PROBE_LOCAL_ADDR |
@@ -367,16 +360,19 @@ send_query_to_peer(ntp_peer_t *p) | |||
367 | * Save the real transmit timestamp locally. | 360 | * Save the real transmit timestamp locally. |
368 | */ | 361 | */ |
369 | 362 | ||
370 | p->query.msg.xmttime.int_partl = random(); | 363 | p->msg.m_xmttime.int_partl = random(); |
371 | p->query.msg.xmttime.fractionl = random(); | 364 | p->msg.m_xmttime.fractionl = random(); |
372 | p->query.xmttime = gettime1900fp(); | 365 | p->xmttime = gettime1900fp(); |
373 | 366 | ||
374 | if (sendmsg_wrap(p->query.fd, /*from:*/ NULL, /*to:*/ &p->lsa->u.sa, /*addrlen:*/ p->lsa->len, | 367 | if (do_sendto(p->fd, /*from:*/ NULL, /*to:*/ &p->lsa->u.sa, /*addrlen:*/ p->lsa->len, |
375 | &p->query.msg, NTP_MSGSIZE_NOAUTH) == -1) { | 368 | &p->msg, NTP_MSGSIZE_NOAUTH) == -1 |
369 | ) { | ||
376 | set_next(p, INTERVAL_QUERY_PATHETIC); | 370 | set_next(p, INTERVAL_QUERY_PATHETIC); |
377 | return -1; | 371 | return -1; |
378 | } | 372 | } |
379 | 373 | ||
374 | if (G.verbose) | ||
375 | bb_error_msg("sent request to %s", p->dotted); | ||
380 | p->state = STATE_QUERY_SENT; | 376 | p->state = STATE_QUERY_SENT; |
381 | set_deadline(p, QUERYTIME_MAX); | 377 | set_deadline(p, QUERYTIME_MAX); |
382 | 378 | ||
@@ -384,13 +380,13 @@ send_query_to_peer(ntp_peer_t *p) | |||
384 | } | 380 | } |
385 | 381 | ||
386 | static int | 382 | static int |
387 | offset_compare(const void *aa, const void *bb) | 383 | compare_offsets(const void *aa, const void *bb) |
388 | { | 384 | { |
389 | const ntp_peer_t *const *a = aa; | 385 | const ntp_peer_t *const *a = aa; |
390 | const ntp_peer_t *const *b = bb; | 386 | const ntp_peer_t *const *b = bb; |
391 | if ((*a)->update.offset < (*b)->update.offset) | 387 | if ((*a)->update.o_offset < (*b)->update.o_offset) |
392 | return -1; | 388 | return -1; |
393 | return ((*a)->update.offset > (*b)->update.offset); | 389 | return ((*a)->update.o_offset > (*b)->update.o_offset); |
394 | } | 390 | } |
395 | 391 | ||
396 | static uint32_t | 392 | static uint32_t |
@@ -406,24 +402,20 @@ updated_scale(double offset) | |||
406 | } | 402 | } |
407 | 403 | ||
408 | static void | 404 | static void |
409 | adjtime_wrap(void) | 405 | slew_time(void) |
410 | { | 406 | { |
411 | ntp_peer_t *p; | 407 | ntp_peer_t *p; |
408 | llist_t *item; | ||
412 | unsigned offset_cnt; | 409 | unsigned offset_cnt; |
413 | unsigned middle; | ||
414 | int i = 0; | ||
415 | ntp_peer_t **peers; | ||
416 | double offset_median; | 410 | double offset_median; |
417 | llist_t *item; | 411 | struct timeval tv; |
418 | len_and_sockaddr *lsa; | ||
419 | struct timeval tv, olddelta; | ||
420 | 412 | ||
421 | offset_cnt = 0; | 413 | offset_cnt = 0; |
422 | for (item = G.ntp_peers; item != NULL; item = item->link) { | 414 | for (item = G.ntp_peers; item != NULL; item = item->link) { |
423 | p = (ntp_peer_t *) item->data; | 415 | p = (ntp_peer_t *) item->data; |
424 | if (p->trustlevel < TRUSTLEVEL_BADPEER) | 416 | if (p->trustlevel < TRUSTLEVEL_BADPEER) |
425 | continue; | 417 | continue; |
426 | if (!p->update.good) | 418 | if (!p->update.o_good) |
427 | return; | 419 | return; |
428 | offset_cnt++; | 420 | offset_cnt++; |
429 | } | 421 | } |
@@ -431,68 +423,74 @@ adjtime_wrap(void) | |||
431 | if (offset_cnt == 0) | 423 | if (offset_cnt == 0) |
432 | goto clear_good; | 424 | goto clear_good; |
433 | 425 | ||
434 | peers = xzalloc(sizeof(peers[0]) * offset_cnt); | 426 | { |
435 | for (item = G.ntp_peers; item != NULL; item = item->link) { | 427 | len_and_sockaddr *lsa; |
436 | p = (ntp_peer_t *) item->data; | 428 | unsigned middle; |
437 | if (p->trustlevel < TRUSTLEVEL_BADPEER) | 429 | unsigned i = 0; |
438 | continue; | 430 | ntp_peer_t **peers = xzalloc(sizeof(peers[0]) * offset_cnt); |
439 | peers[i++] = p; | 431 | |
440 | } | 432 | for (item = G.ntp_peers; item != NULL; item = item->link) { |
441 | 433 | p = (ntp_peer_t *) item->data; | |
442 | qsort(peers, offset_cnt, sizeof(peers[0]), offset_compare); | 434 | if (p->trustlevel < TRUSTLEVEL_BADPEER) |
435 | continue; | ||
436 | peers[i++] = p; | ||
437 | } | ||
443 | 438 | ||
444 | middle = offset_cnt / 2; | 439 | qsort(peers, offset_cnt, sizeof(peers[0]), compare_offsets); |
445 | if (middle != 0 && (offset_cnt & 1) == 0) { | 440 | |
446 | offset_median = (peers[middle-1]->update.offset + peers[middle]->update.offset) / 2; | 441 | middle = offset_cnt / 2; |
447 | G.status.rootdelay = (peers[middle-1]->update.delay + peers[middle]->update.delay) / 2; | 442 | if (middle != 0 && (offset_cnt & 1) == 0) { |
448 | G.status.stratum = MAX(peers[middle-1]->update.status.stratum, peers[middle]->update.status.stratum); | 443 | offset_median = (peers[middle-1]->update.o_offset + peers[middle]->update.o_offset) / 2; |
449 | } else { | 444 | G.rootdelay = (peers[middle-1]->update.o_delay + peers[middle]->update.o_delay) / 2; |
450 | offset_median = peers[middle]->update.offset; | 445 | G.stratum = 1 + MAX(peers[middle-1]->update.o_stratum, peers[middle]->update.o_stratum); |
451 | G.status.rootdelay = peers[middle]->update.delay; | 446 | } else { |
452 | G.status.stratum = peers[middle]->update.status.stratum; | 447 | offset_median = peers[middle]->update.o_offset; |
448 | G.rootdelay = peers[middle]->update.o_delay; | ||
449 | G.stratum = 1 + peers[middle]->update.o_stratum; | ||
450 | } | ||
451 | G.leap = peers[middle]->update.o_leap; | ||
452 | G.refid4 = peers[middle]->update.o_refid4; | ||
453 | lsa = peers[middle]->lsa; | ||
454 | G.refid = | ||
455 | #if ENABLE_FEATURE_IPV6 | ||
456 | lsa->u.sa.sa_family != AF_INET ? | ||
457 | G.refid4 : | ||
458 | #endif | ||
459 | lsa->u.sin.sin_addr.s_addr; | ||
460 | free(peers); | ||
453 | } | 461 | } |
454 | G.status.leap = peers[middle]->update.status.leap; | ||
455 | 462 | ||
456 | bb_info_msg("adjusting local clock by %fs", offset_median); | 463 | bb_error_msg("adjusting clock by %fs, our stratum is %u", offset_median, G.stratum); |
464 | |||
457 | errno = 0; | 465 | errno = 0; |
458 | d_to_tv(offset_median, &tv); | 466 | d_to_tv(offset_median, &tv); |
459 | if (adjtime(&tv, &olddelta) == -1) { | 467 | if (adjtime(&tv, &tv) == -1) { |
460 | bb_perror_msg("adjtime failed"); //TODO: maybe _and_die? | 468 | bb_perror_msg("adjtime failed"); //TODO: maybe _and_die? |
461 | } else | 469 | } else { |
462 | if (G.first_adj_done | 470 | if (G.verbose >= 2) |
463 | && olddelta.tv_sec == 0 | 471 | bb_error_msg("old adjust: %d.%06u", (int)tv.tv_sec, (unsigned)tv.tv_usec); |
464 | && olddelta.tv_usec == 0 | 472 | if (G.first_adj_done |
465 | && !G.status.synced | 473 | && tv.tv_sec == 0 |
466 | ) { | 474 | && tv.tv_usec == 0 // TODO: allow for tiny values? |
467 | bb_info_msg("clock synced"); | 475 | && !G.synced |
468 | G.status.synced = 1; | 476 | ) { |
469 | } else | 477 | G.synced = 1; |
470 | if (G.status.synced) { | 478 | bb_error_msg("clock %ssynced", ""); |
471 | bb_info_msg("clock unsynced"); | 479 | } else |
472 | G.status.synced = 0; | 480 | if (G.synced) { |
481 | G.synced = 0; | ||
482 | bb_error_msg("clock %ssynced", "un"); | ||
483 | } | ||
473 | } | 484 | } |
474 | 485 | ||
475 | G.first_adj_done = 1; | 486 | G.first_adj_done = 1; |
476 | G.status.reftime = gettime1900fp(); | 487 | G.reftime = gettime1900fp(); |
477 | G.status.stratum++; /* one more than selected peer */ | ||
478 | G.scale = updated_scale(offset_median); | 488 | G.scale = updated_scale(offset_median); |
479 | 489 | ||
480 | G.status.refid4 = peers[middle]->update.status.refid4; | ||
481 | |||
482 | lsa = peers[middle]->lsa; | ||
483 | G.status.refid = | ||
484 | #if ENABLE_FEATURE_IPV6 | ||
485 | lsa->u.sa.sa_family != AF_INET ? | ||
486 | G.status.refid4 : | ||
487 | #endif | ||
488 | lsa->u.sin.sin_addr.s_addr; | ||
489 | |||
490 | free(peers); | ||
491 | |||
492 | clear_good: | 490 | clear_good: |
493 | for (item = G.ntp_peers; item != NULL; item = item->link) { | 491 | for (item = G.ntp_peers; item != NULL; item = item->link) { |
494 | p = (ntp_peer_t *) item->data; | 492 | p = (ntp_peer_t *) item->data; |
495 | p->update.good = 0; | 493 | p->update.o_good = 0; |
496 | } | 494 | } |
497 | } | 495 | } |
498 | 496 | ||
@@ -530,7 +528,7 @@ step_time_once(double offset) | |||
530 | strftime(buf, sizeof(buf), "%a %b %e %H:%M:%S %Z %Y", localtime(&tval)); | 528 | strftime(buf, sizeof(buf), "%a %b %e %H:%M:%S %Z %Y", localtime(&tval)); |
531 | 529 | ||
532 | /* Do we want to print message below to system log when daemonized? */ | 530 | /* Do we want to print message below to system log when daemonized? */ |
533 | bb_info_msg("set local clock to %s (offset %fs)", buf, offset); | 531 | bb_error_msg("set local clock to %s (offset %fs)", buf, offset); |
534 | 532 | ||
535 | for (item = G.ntp_peers; item != NULL; item = item->link) { | 533 | for (item = G.ntp_peers; item != NULL; item = item->link) { |
536 | p = (ntp_peer_t *) item->data; | 534 | p = (ntp_peer_t *) item->data; |
@@ -548,69 +546,68 @@ step_time_once(double offset) | |||
548 | static void | 546 | static void |
549 | update_peer_data(ntp_peer_t *p) | 547 | update_peer_data(ntp_peer_t *p) |
550 | { | 548 | { |
551 | int i, best = 0, good = 0; | 549 | /* Clock filter. |
552 | 550 | * Find the offset which arrived with the lowest delay. | |
553 | /* | 551 | * Use that as the peer update. |
554 | * clock filter | 552 | * Invalidate it and all older ones. |
555 | * find the offset which arrived with the lowest delay | ||
556 | * use that as the peer update | ||
557 | * invalidate it and all older ones | ||
558 | */ | 553 | */ |
554 | int i; | ||
555 | int best = best; /* for compiler */ | ||
556 | int good; | ||
559 | 557 | ||
560 | for (i = 0; good == 0 && i < OFFSET_ARRAY_SIZE; i++) { | 558 | good = 0; |
561 | if (p->reply[i].good) { | 559 | for (i = 0; i < OFFSET_ARRAY_SIZE; i++) { |
560 | if (p->reply[i].o_good) { | ||
562 | good++; | 561 | good++; |
563 | best = i; | 562 | best = i++; |
563 | break; | ||
564 | } | 564 | } |
565 | } | 565 | } |
566 | 566 | ||
567 | for (; i < OFFSET_ARRAY_SIZE; i++) { | 567 | for (; i < OFFSET_ARRAY_SIZE; i++) { |
568 | if (p->reply[i].good) { | 568 | if (p->reply[i].o_good) { |
569 | good++; | 569 | good++; |
570 | if (p->reply[i].delay < p->reply[best].delay) | 570 | if (p->reply[i].o_delay < p->reply[best].o_delay) |
571 | best = i; | 571 | best = i; |
572 | } | 572 | } |
573 | } | 573 | } |
574 | 574 | ||
575 | if (good < 8) | 575 | if (good < 8) //FIXME: was it meant to be OFFSET_ARRAY_SIZE, not 8? |
576 | return; | 576 | return; |
577 | 577 | ||
578 | memcpy(&p->update, &p->reply[best], sizeof(p->update)); | 578 | memcpy(&p->update, &p->reply[best], sizeof(p->update)); |
579 | adjtime_wrap(); | 579 | slew_time(); |
580 | 580 | ||
581 | for (i = 0; i < OFFSET_ARRAY_SIZE; i++) | 581 | for (i = 0; i < OFFSET_ARRAY_SIZE; i++) |
582 | if (p->reply[i].rcvd <= p->reply[best].rcvd) | 582 | if (p->reply[i].o_rcvd <= p->reply[best].o_rcvd) |
583 | p->reply[i].good = 0; | 583 | p->reply[i].o_good = 0; |
584 | } | 584 | } |
585 | 585 | ||
586 | static time_t | 586 | static unsigned |
587 | scale_interval(time_t requested) | 587 | scale_interval(unsigned requested) |
588 | { | 588 | { |
589 | time_t interval, r; | 589 | unsigned interval, r; |
590 | interval = requested * G.scale; | 590 | interval = requested * G.scale; |
591 | r = (unsigned)random() % (unsigned long)(MAX(5, interval / 10)); | 591 | r = (unsigned)random() % (unsigned)(MAX(5, interval / 10)); |
592 | return (interval + r); | 592 | return (interval + r); |
593 | } | 593 | } |
594 | 594 | ||
595 | static void | 595 | static void |
596 | recv_and_process_peer_pkt(ntp_peer_t *p) | 596 | recv_and_process_peer_pkt(ntp_peer_t *p) |
597 | { | 597 | { |
598 | char *addr; | ||
599 | ssize_t size; | 598 | ssize_t size; |
600 | ntp_msg_t msg; | 599 | ntp_msg_t msg; |
601 | double T1, T2, T3, T4; | 600 | double T1, T2, T3, T4; |
602 | time_t interval; | 601 | unsigned interval; |
603 | ntp_offset_t *offset; | 602 | ntp_offset_t *offset; |
604 | 603 | ||
605 | addr = xmalloc_sockaddr2dotted_noport(&p->lsa->u.sa); | ||
606 | |||
607 | /* We can recvfrom here and check from.IP, but some multihomed | 604 | /* We can recvfrom here and check from.IP, but some multihomed |
608 | * ntp servers reply from their *other IP*. | 605 | * ntp servers reply from their *other IP*. |
609 | * TODO: maybe we should check at least what we can: from.port == 123? | 606 | * TODO: maybe we should check at least what we can: from.port == 123? |
610 | */ | 607 | */ |
611 | size = recv(p->query.fd, &msg, sizeof(msg), MSG_DONTWAIT); | 608 | size = recv(p->fd, &msg, sizeof(msg), MSG_DONTWAIT); |
612 | if (size == -1) { | 609 | if (size == -1) { |
613 | bb_perror_msg("recv(%s) error", addr); | 610 | bb_perror_msg("recv(%s) error", p->dotted); |
614 | if (errno == EHOSTUNREACH || errno == EHOSTDOWN | 611 | if (errno == EHOSTUNREACH || errno == EHOSTDOWN |
615 | || errno == ENETUNREACH || errno == ENETDOWN | 612 | || errno == ENETUNREACH || errno == ENETDOWN |
616 | || errno == ECONNREFUSED || errno == EADDRNOTAVAIL | 613 | || errno == ECONNREFUSED || errno == EADDRNOTAVAIL |
@@ -618,7 +615,7 @@ recv_and_process_peer_pkt(ntp_peer_t *p) | |||
618 | ) { | 615 | ) { |
619 | //TODO: always do this? | 616 | //TODO: always do this? |
620 | set_next(p, error_interval()); | 617 | set_next(p, error_interval()); |
621 | goto bail; | 618 | goto close_sock; |
622 | } | 619 | } |
623 | xfunc_die(); | 620 | xfunc_die(); |
624 | } | 621 | } |
@@ -626,23 +623,23 @@ recv_and_process_peer_pkt(ntp_peer_t *p) | |||
626 | T4 = gettime1900fp(); | 623 | T4 = gettime1900fp(); |
627 | 624 | ||
628 | if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) { | 625 | if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) { |
629 | bb_error_msg("malformed packet received from %s", addr); | 626 | bb_error_msg("malformed packet received from %s", p->dotted); |
630 | goto bail; | 627 | goto bail; |
631 | } | 628 | } |
632 | 629 | ||
633 | if (msg.orgtime.int_partl != p->query.msg.xmttime.int_partl | 630 | if (msg.m_orgtime.int_partl != p->msg.m_xmttime.int_partl |
634 | || msg.orgtime.fractionl != p->query.msg.xmttime.fractionl | 631 | || msg.m_orgtime.fractionl != p->msg.m_xmttime.fractionl |
635 | ) { | 632 | ) { |
636 | goto bail; | 633 | goto bail; |
637 | } | 634 | } |
638 | 635 | ||
639 | if ((msg.status & LI_ALARM) == LI_ALARM | 636 | if ((msg.m_status & LI_ALARM) == LI_ALARM |
640 | || msg.stratum == 0 | 637 | || msg.m_stratum == 0 |
641 | || msg.stratum > NTP_MAXSTRATUM | 638 | || msg.m_stratum > NTP_MAXSTRATUM |
642 | ) { | 639 | ) { |
643 | interval = error_interval(); | 640 | interval = error_interval(); |
644 | bb_info_msg("reply from %s: not synced, next query %ds", addr, (int) interval); | 641 | bb_error_msg("reply from %s: not synced, next query %us", p->dotted, interval); |
645 | goto bail; | 642 | goto close_sock; |
646 | } | 643 | } |
647 | 644 | ||
648 | /* | 645 | /* |
@@ -660,34 +657,34 @@ recv_and_process_peer_pkt(ntp_peer_t *p) | |||
660 | * d = (T4 - T1) - (T3 - T2) t = ((T2 - T1) + (T3 - T4)) / 2. | 657 | * d = (T4 - T1) - (T3 - T2) t = ((T2 - T1) + (T3 - T4)) / 2. |
661 | */ | 658 | */ |
662 | 659 | ||
663 | T1 = p->query.xmttime; | 660 | T1 = p->xmttime; |
664 | T2 = lfp_to_d(msg.rectime); | 661 | T2 = lfp_to_d(msg.m_rectime); |
665 | T3 = lfp_to_d(msg.xmttime); | 662 | T3 = lfp_to_d(msg.m_xmttime); |
666 | 663 | ||
667 | offset = &p->reply[p->shift]; | 664 | offset = &p->reply[p->shift]; |
668 | 665 | ||
669 | offset->offset = ((T2 - T1) + (T3 - T4)) / 2; | 666 | offset->o_offset = ((T2 - T1) + (T3 - T4)) / 2; |
670 | offset->delay = (T4 - T1) - (T3 - T2); | 667 | offset->o_delay = (T4 - T1) - (T3 - T2); |
671 | if (offset->delay < 0) { | 668 | if (offset->o_delay < 0) { |
672 | interval = error_interval(); | 669 | interval = error_interval(); |
673 | set_next(p, interval); | 670 | set_next(p, interval); |
674 | bb_info_msg("reply from %s: negative delay %f", addr, p->reply[p->shift].delay); | 671 | bb_error_msg("reply from %s: negative delay %f", p->dotted, p->reply[p->shift].o_delay); |
675 | goto bail; | 672 | goto close_sock; |
676 | } | 673 | } |
677 | offset->error = (T2 - T1) - (T3 - T4); | 674 | //UNUSED: offset->o_error = (T2 - T1) - (T3 - T4); |
678 | // Can we use (T4 - OFFSET_1900_1970) instead of time(NULL)? | 675 | // Can we use (T4 - OFFSET_1900_1970) instead of time(NULL)? |
679 | offset->rcvd = time(NULL); | 676 | offset->o_rcvd = time(NULL); |
680 | offset->good = 1; | 677 | offset->o_good = 1; |
681 | 678 | ||
682 | offset->status.leap = (msg.status & LI_MASK); | 679 | offset->o_leap = (msg.m_status & LI_MASK); |
683 | offset->status.precision = msg.precision; | 680 | //UNUSED: offset->o_precision = msg.m_precision; |
684 | offset->status.rootdelay = sfp_to_d(msg.rootdelay); | 681 | //UNUSED: offset->o_rootdelay = sfp_to_d(msg.m_rootdelay); |
685 | offset->status.rootdispersion = sfp_to_d(msg.dispersion); | 682 | //UNUSED: offset->o_rootdispersion = sfp_to_d(msg.m_dispersion); |
686 | offset->status.refid = ntohl(msg.refid); | 683 | //UNUSED: offset->o_refid = ntohl(msg.m_refid); |
687 | offset->status.refid4 = msg.xmttime.fractionl; | 684 | offset->o_refid4 = msg.m_xmttime.fractionl; |
688 | offset->status.reftime = lfp_to_d(msg.reftime); | 685 | //UNUSED: offset->o_reftime = lfp_to_d(msg.m_reftime); |
689 | offset->status.poll = msg.ppoll; | 686 | //UNUSED: offset->o_poll = msg.m_ppoll; |
690 | offset->status.stratum = msg.stratum; | 687 | offset->o_stratum = msg.m_stratum; |
691 | 688 | ||
692 | if (p->trustlevel < TRUSTLEVEL_PATHETIC) | 689 | if (p->trustlevel < TRUSTLEVEL_PATHETIC) |
693 | interval = scale_interval(INTERVAL_QUERY_PATHETIC); | 690 | interval = scale_interval(INTERVAL_QUERY_PATHETIC); |
@@ -703,21 +700,29 @@ recv_and_process_peer_pkt(ntp_peer_t *p) | |||
703 | if (p->trustlevel < TRUSTLEVEL_MAX) { | 700 | if (p->trustlevel < TRUSTLEVEL_MAX) { |
704 | p->trustlevel++; | 701 | p->trustlevel++; |
705 | if (p->trustlevel == TRUSTLEVEL_BADPEER) | 702 | if (p->trustlevel == TRUSTLEVEL_BADPEER) |
706 | bb_info_msg("peer %s now valid", addr); | 703 | bb_error_msg("peer %s now valid", p->dotted); |
707 | } | 704 | } |
708 | 705 | ||
709 | bb_info_msg("reply from %s: offset %f delay %f, next query %ds", addr, | 706 | if (G.verbose) |
710 | offset->offset, offset->delay, (int) interval); | 707 | bb_error_msg("reply from %s: offset %f delay %f, next query %us", p->dotted, |
708 | offset->o_offset, offset->o_delay, interval); | ||
711 | 709 | ||
712 | update_peer_data(p); | 710 | update_peer_data(p); |
713 | step_time_once(offset->offset); | 711 | step_time_once(offset->o_offset); |
714 | 712 | ||
715 | p->shift++; | 713 | p->shift++; |
716 | if (p->shift >= OFFSET_ARRAY_SIZE) | 714 | if (p->shift >= OFFSET_ARRAY_SIZE) |
717 | p->shift = 0; | 715 | p->shift = 0; |
718 | 716 | ||
717 | close_sock: | ||
718 | /* We do not expect any more packets for now. | ||
719 | * Closing the socket informs kernel about it. | ||
720 | * We open a new socket when we send a new query. | ||
721 | */ | ||
722 | close(p->fd); | ||
723 | p->fd = -1; | ||
719 | bail: | 724 | bail: |
720 | free(addr); | 725 | return; |
721 | } | 726 | } |
722 | 727 | ||
723 | #if ENABLE_FEATURE_NTPD_SERVER | 728 | #if ENABLE_FEATURE_NTPD_SERVER |
@@ -751,31 +756,31 @@ recv_and_process_client_pkt(void /*int fd*/) | |||
751 | goto bail; | 756 | goto bail; |
752 | } | 757 | } |
753 | 758 | ||
754 | query_status = msg.status; | 759 | query_status = msg.m_status; |
755 | query_ppoll = msg.ppoll; | 760 | query_ppoll = msg.m_ppoll; |
756 | query_xmttime = msg.xmttime; | 761 | query_xmttime = msg.m_xmttime; |
757 | 762 | ||
758 | /* Build a reply packet */ | 763 | /* Build a reply packet */ |
759 | memset(&msg, 0, sizeof(msg)); | 764 | memset(&msg, 0, sizeof(msg)); |
760 | msg.status = G.status.synced ? G.status.leap : LI_ALARM; | 765 | msg.m_status = G.synced ? G.leap : LI_ALARM; |
761 | msg.status |= (query_status & VERSION_MASK); | 766 | msg.m_status |= (query_status & VERSION_MASK); |
762 | msg.status |= ((query_status & MODE_MASK) == MODE_CLIENT) ? | 767 | msg.m_status |= ((query_status & MODE_MASK) == MODE_CLIENT) ? |
763 | MODE_SERVER : MODE_SYM_PAS; | 768 | MODE_SERVER : MODE_SYM_PAS; |
764 | msg.stratum = G.status.stratum; | 769 | msg.m_stratum = G.stratum; |
765 | msg.ppoll = query_ppoll; | 770 | msg.m_ppoll = query_ppoll; |
766 | msg.precision = G.status.precision; | 771 | msg.m_precision = G.precision; |
767 | rectime = gettime1900fp(); | 772 | rectime = gettime1900fp(); |
768 | msg.xmttime = msg.rectime = d_to_lfp(rectime); | 773 | msg.m_xmttime = msg.m_rectime = d_to_lfp(rectime); |
769 | msg.reftime = d_to_lfp(G.status.reftime); | 774 | msg.m_reftime = d_to_lfp(G.reftime); |
770 | //msg.xmttime = d_to_lfp(gettime1900fp()); // = msg.rectime | 775 | //msg.m_xmttime = d_to_lfp(gettime1900fp()); // = msg.m_rectime |
771 | msg.orgtime = query_xmttime; | 776 | msg.m_orgtime = query_xmttime; |
772 | msg.rootdelay = d_to_sfp(G.status.rootdelay); | 777 | msg.m_rootdelay = d_to_sfp(G.rootdelay); |
773 | version = (query_status & VERSION_MASK); /* ... >> VERSION_SHIFT - done below instead */ | 778 | version = (query_status & VERSION_MASK); /* ... >> VERSION_SHIFT - done below instead */ |
774 | msg.refid = (version > (3 << VERSION_SHIFT)) ? G.status.refid4 : G.status.refid; | 779 | msg.m_refid = (version > (3 << VERSION_SHIFT)) ? G.refid4 : G.refid; |
775 | 780 | ||
776 | /* We reply from the local address packet was sent to, | 781 | /* We reply from the local address packet was sent to, |
777 | * this makes to/from look swapped here: */ | 782 | * this makes to/from look swapped here: */ |
778 | sendmsg_wrap(G.listen_fd, | 783 | do_sendto(G.listen_fd, |
779 | /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len, | 784 | /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len, |
780 | &msg, size); | 785 | &msg, size); |
781 | 786 | ||
@@ -931,7 +936,7 @@ static NOINLINE void ntp_init(char **argv) | |||
931 | #endif | 936 | #endif |
932 | while (b > 1) | 937 | while (b > 1) |
933 | prec--, b >>= 1; | 938 | prec--, b >>= 1; |
934 | G.status.precision = prec; | 939 | G.precision = prec; |
935 | } | 940 | } |
936 | G.scale = 1; | 941 | G.scale = 1; |
937 | 942 | ||
@@ -991,18 +996,14 @@ int ntpd_main(int argc UNUSED_PARAM, char **argv) | |||
991 | } | 996 | } |
992 | if (p->deadline != 0 && p->deadline <= cur_time) { | 997 | if (p->deadline != 0 && p->deadline <= cur_time) { |
993 | /* Timed out waiting for reply */ | 998 | /* Timed out waiting for reply */ |
994 | char *addr = xmalloc_sockaddr2dotted_noport(&p->lsa->u.sa); | ||
995 | |||
996 | timeout = error_interval(); | 999 | timeout = error_interval(); |
997 | bb_info_msg("no reply from %s received in time, " | 1000 | bb_error_msg("timed out waiting for %s, " |
998 | "next query %ds", addr, timeout); | 1001 | "next query %us", p->dotted, timeout); |
999 | if (p->trustlevel >= TRUSTLEVEL_BADPEER) { | 1002 | if (p->trustlevel >= TRUSTLEVEL_BADPEER) { |
1000 | p->trustlevel /= 2; | 1003 | p->trustlevel /= 2; |
1001 | if (p->trustlevel < TRUSTLEVEL_BADPEER) | 1004 | if (p->trustlevel < TRUSTLEVEL_BADPEER) |
1002 | bb_info_msg("peer %s now invalid", addr); | 1005 | bb_error_msg("peer %s now invalid", p->dotted); |
1003 | } | 1006 | } |
1004 | free(addr); | ||
1005 | |||
1006 | set_next(p, timeout); | 1007 | set_next(p, timeout); |
1007 | } | 1008 | } |
1008 | 1009 | ||
@@ -1013,7 +1014,7 @@ int ntpd_main(int argc UNUSED_PARAM, char **argv) | |||
1013 | 1014 | ||
1014 | if (p->state == STATE_QUERY_SENT) { | 1015 | if (p->state == STATE_QUERY_SENT) { |
1015 | /* Wait for reply from this peer */ | 1016 | /* Wait for reply from this peer */ |
1016 | pfd[i].fd = p->query.fd; | 1017 | pfd[i].fd = p->fd; |
1017 | pfd[i].events = POLLIN; | 1018 | pfd[i].events = POLLIN; |
1018 | idx2peer[i] = p; | 1019 | idx2peer[i] = p; |
1019 | i++; | 1020 | i++; |
@@ -1024,12 +1025,12 @@ int ntpd_main(int argc UNUSED_PARAM, char **argv) | |||
1024 | step_time_once(0); /* no good peers, don't wait */ | 1025 | step_time_once(0); /* no good peers, don't wait */ |
1025 | 1026 | ||
1026 | timeout = nextaction - cur_time; | 1027 | timeout = nextaction - cur_time; |
1027 | if (timeout < 0) | 1028 | if (timeout < 1) |
1028 | timeout = 0; | 1029 | timeout = 1; |
1029 | 1030 | ||
1030 | /* Here we may block */ | 1031 | /* Here we may block */ |
1031 | if (g.verbose) | 1032 | if (g.verbose >= 2) |
1032 | bb_error_msg("entering poll %u secs", timeout); | 1033 | bb_error_msg("poll %u sec, waiting on %u sockets", timeout, i); |
1033 | nfds = poll(pfd, i, timeout * 1000); | 1034 | nfds = poll(pfd, i, timeout * 1000); |
1034 | if (nfds <= 0) | 1035 | if (nfds <= 0) |
1035 | continue; | 1036 | continue; |