2010-01-01 21:16:17 +05:30
|
|
|
/*
|
|
|
|
* NTP client/server, based on OpenNTPD 3.9p1
|
|
|
|
*
|
2015-01-04 22:16:08 +05:30
|
|
|
* Busybox port author: Adam Tkac (C) 2009 <vonsch@gmail.com>
|
2010-01-01 21:16:17 +05:30
|
|
|
*
|
2015-01-04 22:16:08 +05:30
|
|
|
* OpenNTPd 3.9p1 copyright holders:
|
|
|
|
* Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
|
|
|
|
* Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
|
|
|
|
*
|
|
|
|
* OpenNTPd code is licensed under ISC-style licence:
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER
|
|
|
|
* IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
***********************************************************************
|
2010-01-01 21:16:17 +05:30
|
|
|
*
|
|
|
|
* Parts of OpenNTPD clock syncronization code is replaced by
|
2015-01-04 22:16:08 +05:30
|
|
|
* code which is based on ntp-4.2.6, which carries the following
|
2010-01-01 21:16:17 +05:30
|
|
|
* copyright notice:
|
|
|
|
*
|
2015-01-04 22:16:08 +05:30
|
|
|
* Copyright (c) University of Delaware 1992-2009
|
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software and
|
|
|
|
* its documentation for any purpose with or without fee is hereby
|
|
|
|
* granted, provided that the above copyright notice appears in all
|
|
|
|
* copies and that both the copyright notice and this permission
|
|
|
|
* notice appear in supporting documentation, and that the name
|
|
|
|
* University of Delaware not be used in advertising or publicity
|
|
|
|
* pertaining to distribution of the software without specific,
|
|
|
|
* written prior permission. The University of Delaware makes no
|
|
|
|
* representations about the suitability this software for any
|
|
|
|
* purpose. It is provided "as is" without express or implied warranty.
|
2010-01-01 21:16:17 +05:30
|
|
|
***********************************************************************
|
|
|
|
*/
|
2016-11-23 13:35:14 +05:30
|
|
|
//config:config NTPD
|
2018-12-28 07:50:17 +05:30
|
|
|
//config: bool "ntpd (22 kb)"
|
2016-11-23 13:35:14 +05:30
|
|
|
//config: default y
|
|
|
|
//config: help
|
2017-07-21 13:20:55 +05:30
|
|
|
//config: The NTP client/server daemon.
|
2016-11-23 13:35:14 +05:30
|
|
|
//config:
|
|
|
|
//config:config FEATURE_NTPD_SERVER
|
|
|
|
//config: bool "Make ntpd usable as a NTP server"
|
|
|
|
//config: default y
|
|
|
|
//config: depends on NTPD
|
|
|
|
//config: help
|
2017-07-21 13:20:55 +05:30
|
|
|
//config: Make ntpd usable as a NTP server. If you disable this option
|
|
|
|
//config: ntpd will be usable only as a NTP client.
|
2016-11-23 13:35:14 +05:30
|
|
|
//config:
|
|
|
|
//config:config FEATURE_NTPD_CONF
|
|
|
|
//config: bool "Make ntpd understand /etc/ntp.conf"
|
|
|
|
//config: default y
|
|
|
|
//config: depends on NTPD
|
|
|
|
//config: help
|
2017-07-21 13:20:55 +05:30
|
|
|
//config: Make ntpd look in /etc/ntp.conf for peers. Only "server address"
|
|
|
|
//config: is supported.
|
2018-10-31 03:37:26 +05:30
|
|
|
//config:
|
2018-10-27 22:25:59 +05:30
|
|
|
//config:config FEATURE_NTP_AUTH
|
|
|
|
//config: bool "Support md5/sha1 message authentication codes"
|
2018-10-31 03:37:26 +05:30
|
|
|
//config: default y
|
2018-10-27 22:25:59 +05:30
|
|
|
//config: depends on NTPD
|
2016-11-23 13:35:14 +05:30
|
|
|
|
|
|
|
//applet:IF_NTPD(APPLET(ntpd, BB_DIR_USR_SBIN, BB_SUID_DROP))
|
|
|
|
|
|
|
|
//kbuild:lib-$(CONFIG_NTPD) += ntpd.o
|
2011-04-11 06:59:49 +05:30
|
|
|
|
|
|
|
//usage:#define ntpd_trivial_usage
|
2018-10-27 22:25:59 +05:30
|
|
|
//usage: "[-dnqNw"IF_FEATURE_NTPD_SERVER("l] [-I IFACE")"] [-S PROG]"
|
|
|
|
//usage: IF_NOT_FEATURE_NTP_AUTH(" [-p PEER]...")
|
|
|
|
//usage: IF_FEATURE_NTP_AUTH(" [-k KEYFILE] [-p [keyno:N:]PEER]...")
|
2011-04-11 06:59:49 +05:30
|
|
|
//usage:#define ntpd_full_usage "\n\n"
|
|
|
|
//usage: "NTP client/server\n"
|
2020-12-18 08:42:51 +05:30
|
|
|
//usage: "\n -d[d] Verbose"
|
2011-04-11 06:59:49 +05:30
|
|
|
//usage: "\n -n Do not daemonize"
|
|
|
|
//usage: "\n -q Quit after clock is set"
|
|
|
|
//usage: "\n -N Run at high priority"
|
|
|
|
//usage: "\n -w Do not set time (only query peers), implies -n"
|
2018-10-27 22:25:59 +05:30
|
|
|
//usage: "\n -S PROG Run PROG after stepping time, stratum change, and every 11 min"
|
|
|
|
//usage: IF_NOT_FEATURE_NTP_AUTH(
|
2011-04-11 06:59:49 +05:30
|
|
|
//usage: "\n -p PEER Obtain time from PEER (may be repeated)"
|
2018-10-27 22:25:59 +05:30
|
|
|
//usage: )
|
|
|
|
//usage: IF_FEATURE_NTP_AUTH(
|
|
|
|
//usage: "\n -k FILE Key file (ntp.keys compatible)"
|
|
|
|
//usage: "\n -p [keyno:NUM:]PEER"
|
|
|
|
//usage: "\n Obtain time from PEER (may be repeated)"
|
|
|
|
//usage: "\n Use key NUM for authentication"
|
|
|
|
//usage: )
|
2014-03-23 19:36:38 +05:30
|
|
|
//usage: IF_FEATURE_NTPD_CONF(
|
2015-03-05 18:34:44 +05:30
|
|
|
//usage: "\n If -p is not given, 'server HOST' lines"
|
|
|
|
//usage: "\n from /etc/ntp.conf are used"
|
2014-03-23 19:36:38 +05:30
|
|
|
//usage: )
|
2015-03-03 01:29:13 +05:30
|
|
|
//usage: IF_FEATURE_NTPD_SERVER(
|
|
|
|
//usage: "\n -l Also run as server on port 123"
|
|
|
|
//usage: "\n -I IFACE Bind server to IFACE, implies -l"
|
|
|
|
//usage: )
|
2014-03-23 19:36:38 +05:30
|
|
|
|
|
|
|
// -l and -p options are not compatible with "standard" ntpd:
|
|
|
|
// it has them as "-l logfile" and "-p pidfile".
|
|
|
|
// -S and -w are not compat either, "standard" ntpd has no such opts.
|
2011-04-11 06:59:49 +05:30
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
#include "libbb.h"
|
|
|
|
#include <math.h>
|
2018-04-15 22:07:50 +05:30
|
|
|
#include <netinet/ip.h> /* For IPTOS_DSCP_AF21 definition */
|
2010-01-01 21:16:17 +05:30
|
|
|
#include <sys/timex.h>
|
2018-04-15 22:07:50 +05:30
|
|
|
#ifndef IPTOS_DSCP_AF21
|
|
|
|
# define IPTOS_DSCP_AF21 0x48
|
2010-01-01 21:16:17 +05:30
|
|
|
#endif
|
|
|
|
|
2021-01-04 05:58:39 +05:30
|
|
|
#if defined(__FreeBSD__)
|
|
|
|
/* see sys/timex.h */
|
|
|
|
# define adjtimex ntp_adjtime
|
|
|
|
# define ADJ_OFFSET MOD_OFFSET
|
|
|
|
# define ADJ_STATUS MOD_STATUS
|
|
|
|
# define ADJ_TIMECONST MOD_TIMECONST
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2010-01-01 22:42:06 +05:30
|
|
|
/* Verbosity control (max level of -dddd options accepted).
|
2013-12-04 21:02:09 +05:30
|
|
|
* max 6 is very talkative (and bloated). 3 is non-bloated,
|
2010-01-01 22:42:06 +05:30
|
|
|
* production level setting.
|
|
|
|
*/
|
2013-12-04 21:02:09 +05:30
|
|
|
#define MAX_VERBOSE 3
|
2010-01-01 22:42:06 +05:30
|
|
|
|
2010-01-11 06:44:04 +05:30
|
|
|
/* High-level description of the algorithm:
|
|
|
|
*
|
|
|
|
* We start running with very small poll_exp, BURSTPOLL,
|
2010-10-21 02:06:51 +05:30
|
|
|
* in order to quickly accumulate INITIAL_SAMPLES datapoints
|
2010-01-11 06:44:04 +05:30
|
|
|
* for each peer. Then, time is stepped if the offset is larger
|
2021-03-03 00:24:09 +05:30
|
|
|
* than STEP_THRESHOLD, otherwise it isn't stepped.
|
2010-01-17 05:35:58 +05:30
|
|
|
*
|
2021-03-03 00:24:09 +05:30
|
|
|
* Then poll_exp is set to MINPOLL, and we enter "steady state": we collect
|
|
|
|
* a datapoint, we select the best peer, if this datapoint is not a new one
|
2010-01-11 06:44:04 +05:30
|
|
|
* (IOW: if this datapoint isn't for selected peer), sleep
|
|
|
|
* and collect another one; otherwise, use its offset to update
|
|
|
|
* frequency drift, if offset is somewhat large, reduce poll_exp,
|
|
|
|
* otherwise increase poll_exp.
|
|
|
|
*
|
|
|
|
* If offset is larger than STEP_THRESHOLD, which shouldn't normally
|
|
|
|
* happen, we assume that something "bad" happened (computer
|
|
|
|
* was hibernated, someone set totally wrong date, etc),
|
|
|
|
* then the time is stepped, all datapoints are discarded,
|
|
|
|
* and we go back to steady state.
|
2013-12-08 20:41:04 +05:30
|
|
|
*
|
|
|
|
* Made some changes to speed up re-syncing after our clock goes bad
|
|
|
|
* (tested with suspending my laptop):
|
2016-02-10 11:25:07 +05:30
|
|
|
* - if largish offset (>= STEP_THRESHOLD == 1 sec) is seen
|
2013-12-08 20:41:04 +05:30
|
|
|
* from a peer, schedule next query for this peer soon
|
|
|
|
* without drastically lowering poll interval for everybody.
|
|
|
|
* This makes us collect enough data for step much faster:
|
|
|
|
* e.g. at poll = 10 (1024 secs), step was done within 5 minutes
|
|
|
|
* after first reply which indicated that our clock is 14 seconds off.
|
|
|
|
* - on step, do not discard d_dispersion data of the existing datapoints,
|
|
|
|
* do not clear reachable_bits. This prevents discarding first ~8
|
|
|
|
* datapoints after the step.
|
2010-01-11 06:44:04 +05:30
|
|
|
*/
|
|
|
|
|
2021-03-02 16:37:14 +05:30
|
|
|
#define INITIAL_SAMPLES 3 /* how many samples do we want for init */
|
2019-10-30 16:43:46 +05:30
|
|
|
#define MIN_FREQHOLD 10 /* adjust offset, but not freq in this many first adjustments */
|
2018-08-03 14:33:55 +05:30
|
|
|
#define BAD_DELAY_GROWTH 4 /* drop packet if its delay grew by more than this factor */
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2014-09-28 02:26:09 +05:30
|
|
|
#define RETRY_INTERVAL 32 /* on send/recv error, retry in N secs (need to be power of 2) */
|
|
|
|
#define NOREPLY_INTERVAL 512 /* sent, but got no reply: cap next query by this many seconds */
|
|
|
|
#define RESPONSE_INTERVAL 16 /* wait for reply up to N secs */
|
2017-10-31 17:14:37 +05:30
|
|
|
#define HOSTNAME_INTERVAL 4 /* hostname lookup failed. Wait N * peer->dns_errors secs for next try */
|
|
|
|
#define DNS_ERRORS_CAP 0x3f /* peer->dns_errors is in [0..63] */
|
2010-01-17 05:35:58 +05:30
|
|
|
|
|
|
|
/* Step threshold (sec). std ntpd uses 0.128.
|
2016-02-10 11:25:07 +05:30
|
|
|
*/
|
|
|
|
#define STEP_THRESHOLD 1
|
|
|
|
/* Slew threshold (sec): adjtimex() won't accept offsets larger than this.
|
2019-02-15 19:02:08 +05:30
|
|
|
* Using exact power of 2 (1/8, 1/2 etc) results in smaller code
|
2014-09-28 02:26:09 +05:30
|
|
|
*/
|
2019-02-15 19:02:08 +05:30
|
|
|
#define SLEW_THRESHOLD 0.5
|
|
|
|
// ^^^^ used to be 0.125.
|
|
|
|
// Since Linux 2.6.26 (circa 2006), kernel accepts (-0.5s, +0.5s) range
|
2018-03-11 01:55:53 +05:30
|
|
|
|
2010-01-11 06:01:59 +05:30
|
|
|
|
2021-03-03 00:24:09 +05:30
|
|
|
// #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */
|
|
|
|
|
|
|
|
/* If we got |offset| > BIGOFF from a peer, cap next query interval
|
2014-09-28 02:26:09 +05:30
|
|
|
* for this peer by this many seconds:
|
|
|
|
*/
|
2016-02-10 11:25:07 +05:30
|
|
|
#define BIGOFF STEP_THRESHOLD
|
2014-09-28 02:26:09 +05:30
|
|
|
#define BIGOFF_INTERVAL (1 << 7) /* 128 s */
|
|
|
|
|
2010-01-11 06:01:59 +05:30
|
|
|
#define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */
|
2010-10-29 15:16:52 +05:30
|
|
|
#define BURSTPOLL 0 /* initial poll */
|
2010-01-17 05:35:58 +05:30
|
|
|
#define MINPOLL 5 /* minimum poll interval. std ntpd uses 6 (6: 64 sec) */
|
2021-03-03 00:24:09 +05:30
|
|
|
/* If offset > discipline_jitter * POLLADJ_GATE, and poll interval is > 2^BIGPOLL,
|
2014-10-02 20:48:43 +05:30
|
|
|
* then it is decreased _at once_. (If <= 2^BIGPOLL, it will be decreased _eventually_).
|
2012-03-03 16:45:46 +05:30
|
|
|
*/
|
2014-10-02 20:48:43 +05:30
|
|
|
#define BIGPOLL 9 /* 2^9 sec ~= 8.5 min */
|
2010-01-17 05:35:58 +05:30
|
|
|
#define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */
|
2021-03-03 00:24:09 +05:30
|
|
|
/* Actively lower poll when we see such big offsets.
|
2016-02-10 11:25:07 +05:30
|
|
|
* With SLEW_THRESHOLD = 0.125, it means we try to sync more aggressively
|
2014-09-28 02:26:09 +05:30
|
|
|
* if offset increases over ~0.04 sec
|
|
|
|
*/
|
2021-03-03 00:24:09 +05:30
|
|
|
// #define POLLDOWN_OFFSET (SLEW_THRESHOLD / 3)
|
2010-01-17 05:35:58 +05:30
|
|
|
#define MINDISP 0.01 /* minimum dispersion (sec) */
|
|
|
|
#define MAXDISP 16 /* maximum dispersion (sec) */
|
2010-01-11 06:01:59 +05:30
|
|
|
#define MAXSTRAT 16 /* maximum stratum (infinity metric) */
|
2010-01-17 05:35:58 +05:30
|
|
|
#define MAXDIST 1 /* distance threshold (sec) */
|
2010-01-11 06:01:59 +05:30
|
|
|
#define MIN_SELECTED 1 /* minimum intersection survivors */
|
|
|
|
#define MIN_CLUSTERED 3 /* minimum cluster survivors */
|
|
|
|
|
2021-03-02 16:37:14 +05:30
|
|
|
/* Correct frequency ourself (0) or let kernel do it (1)? */
|
|
|
|
#define USING_KERNEL_PLL_LOOP 1
|
|
|
|
// /* frequency drift we can correct (500 PPM) */
|
|
|
|
// #define MAXDRIFT 0.000500
|
|
|
|
// /* Compromise Allan intercept (sec). doc uses 1500, std ntpd uses 512 */
|
|
|
|
// #define ALLAN 512
|
|
|
|
// /* PLL loop gain */
|
|
|
|
// #define PLL 65536
|
|
|
|
// /* FLL loop gain [why it depends on MAXPOLL??] */
|
|
|
|
// #define FLL (MAXPOLL + 1)
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Poll-adjust threshold.
|
|
|
|
* When we see that offset is small enough compared to discipline jitter,
|
2012-03-03 16:45:46 +05:30
|
|
|
* we grow a counter: += MINPOLL. When counter goes over POLLADJ_LIMIT,
|
2010-01-02 00:26:16 +05:30
|
|
|
* we poll_exp++. If offset isn't small, counter -= poll_exp*2,
|
2012-03-03 16:45:46 +05:30
|
|
|
* and when it goes below -POLLADJ_LIMIT, we poll_exp--.
|
|
|
|
* (Bumped from 30 to 40 since otherwise I often see poll_exp going *2* steps down)
|
2010-01-01 21:16:17 +05:30
|
|
|
*/
|
2012-02-28 07:15:00 +05:30
|
|
|
#define POLLADJ_LIMIT 40
|
2012-03-03 16:45:46 +05:30
|
|
|
/* If offset < discipline_jitter * POLLADJ_GATE, then we decide to increase
|
2010-01-01 21:16:17 +05:30
|
|
|
* poll interval (we think we can't improve timekeeping
|
|
|
|
* by staying at smaller poll).
|
|
|
|
*/
|
2010-01-02 00:26:16 +05:30
|
|
|
#define POLLADJ_GATE 4
|
2012-03-05 05:21:48 +05:30
|
|
|
#define TIMECONST_HACK_GATE 2
|
2010-01-01 21:16:17 +05:30
|
|
|
/* Parameter averaging constant */
|
2010-01-02 00:26:16 +05:30
|
|
|
#define AVG 4
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#define MAX_KEY_NUMBER 65535
|
|
|
|
#define KEYID_SIZE sizeof(uint32_t)
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
enum {
|
|
|
|
NTP_VERSION = 4,
|
|
|
|
NTP_MAXSTRATUM = 15,
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
NTP_MD5_DIGESTSIZE = 16,
|
|
|
|
NTP_MSGSIZE_NOAUTH = 48,
|
|
|
|
NTP_MSGSIZE_MD5_AUTH = NTP_MSGSIZE_NOAUTH + KEYID_SIZE + NTP_MD5_DIGESTSIZE,
|
|
|
|
NTP_SHA1_DIGESTSIZE = 20,
|
|
|
|
NTP_MSGSIZE_SHA1_AUTH = NTP_MSGSIZE_NOAUTH + KEYID_SIZE + NTP_SHA1_DIGESTSIZE,
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Status Masks */
|
|
|
|
MODE_MASK = (7 << 0),
|
|
|
|
VERSION_MASK = (7 << 3),
|
|
|
|
VERSION_SHIFT = 3,
|
|
|
|
LI_MASK = (3 << 6),
|
|
|
|
|
|
|
|
/* Leap Second Codes (high order two bits of m_status) */
|
|
|
|
LI_NOWARNING = (0 << 6), /* no warning */
|
|
|
|
LI_PLUSSEC = (1 << 6), /* add a second (61 seconds) */
|
|
|
|
LI_MINUSSEC = (2 << 6), /* minus a second (59 seconds) */
|
|
|
|
LI_ALARM = (3 << 6), /* alarm condition */
|
|
|
|
|
|
|
|
/* Mode values */
|
|
|
|
MODE_RES0 = 0, /* reserved */
|
|
|
|
MODE_SYM_ACT = 1, /* symmetric active */
|
|
|
|
MODE_SYM_PAS = 2, /* symmetric passive */
|
|
|
|
MODE_CLIENT = 3, /* client */
|
|
|
|
MODE_SERVER = 4, /* server */
|
|
|
|
MODE_BROADCAST = 5, /* broadcast */
|
|
|
|
MODE_RES1 = 6, /* reserved for NTP control message */
|
|
|
|
MODE_RES2 = 7, /* reserved for private use */
|
|
|
|
};
|
|
|
|
|
|
|
|
//TODO: better base selection
|
|
|
|
#define OFFSET_1900_1970 2208988800UL /* 1970 - 1900 in seconds */
|
|
|
|
|
|
|
|
#define NUM_DATAPOINTS 8
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint32_t int_partl;
|
|
|
|
uint32_t fractionl;
|
|
|
|
} l_fixedpt_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint16_t int_parts;
|
|
|
|
uint16_t fractions;
|
|
|
|
} s_fixedpt_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint8_t m_status; /* status of local clock and leap info */
|
|
|
|
uint8_t m_stratum;
|
|
|
|
uint8_t m_ppoll; /* poll value */
|
|
|
|
int8_t m_precision_exp;
|
|
|
|
s_fixedpt_t m_rootdelay;
|
|
|
|
s_fixedpt_t m_rootdisp;
|
|
|
|
uint32_t m_refid;
|
|
|
|
l_fixedpt_t m_reftime;
|
|
|
|
l_fixedpt_t m_orgtime;
|
|
|
|
l_fixedpt_t m_rectime;
|
|
|
|
l_fixedpt_t m_xmttime;
|
|
|
|
uint32_t m_keyid;
|
2018-10-27 22:25:59 +05:30
|
|
|
uint8_t m_digest[ENABLE_FEATURE_NTP_AUTH ? NTP_SHA1_DIGESTSIZE : NTP_MD5_DIGESTSIZE];
|
2010-01-01 21:16:17 +05:30
|
|
|
} msg_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
double d_offset;
|
2012-03-08 07:57:49 +05:30
|
|
|
double d_recv_time;
|
2010-01-01 21:16:17 +05:30
|
|
|
double d_dispersion;
|
|
|
|
} datapoint_t;
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
enum {
|
|
|
|
HASH_MD5,
|
|
|
|
HASH_SHA1,
|
|
|
|
};
|
|
|
|
typedef struct {
|
|
|
|
unsigned id; //try uint16_t?
|
|
|
|
smalluint type;
|
|
|
|
smalluint msg_size;
|
|
|
|
smalluint key_length;
|
|
|
|
char key[0];
|
|
|
|
} key_entry_t;
|
|
|
|
#endif
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
typedef struct {
|
|
|
|
len_and_sockaddr *p_lsa;
|
|
|
|
char *p_dotted;
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
key_entry_t *key_entry;
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
int p_fd;
|
|
|
|
int datapoint_idx;
|
2020-07-20 03:34:33 +05:30
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
|
|
|
uint32_t p_refid;
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
uint32_t lastpkt_refid;
|
2010-01-02 20:27:07 +05:30
|
|
|
uint8_t lastpkt_status;
|
2010-01-01 21:16:17 +05:30
|
|
|
uint8_t lastpkt_stratum;
|
2010-01-03 13:29:59 +05:30
|
|
|
uint8_t reachable_bits;
|
2017-10-31 17:14:37 +05:30
|
|
|
uint8_t dns_errors;
|
2013-07-30 15:22:58 +05:30
|
|
|
/* when to send new query (if p_fd == -1)
|
|
|
|
* or when receive times out (if p_fd >= 0): */
|
2010-01-03 13:29:59 +05:30
|
|
|
double next_action_time;
|
2010-01-01 21:16:17 +05:30
|
|
|
double p_xmttime;
|
2014-04-19 22:30:16 +05:30
|
|
|
double p_raw_delay;
|
|
|
|
/* p_raw_delay is set even by "high delay" packets */
|
|
|
|
/* lastpkt_delay isn't */
|
2010-01-01 21:16:17 +05:30
|
|
|
double lastpkt_recv_time;
|
|
|
|
double lastpkt_delay;
|
|
|
|
double lastpkt_rootdelay;
|
|
|
|
double lastpkt_rootdisp;
|
|
|
|
/* produced by filter algorithm: */
|
|
|
|
double filter_offset;
|
|
|
|
double filter_dispersion;
|
|
|
|
double filter_jitter;
|
|
|
|
datapoint_t filter_datapoint[NUM_DATAPOINTS];
|
|
|
|
/* last sent packet: */
|
|
|
|
msg_t p_xmt_msg;
|
2016-06-06 05:56:49 +05:30
|
|
|
char p_hostname[1];
|
2010-01-01 21:16:17 +05:30
|
|
|
} peer_t;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
OPT_n = (1 << 0),
|
|
|
|
OPT_q = (1 << 1),
|
|
|
|
OPT_N = (1 << 2),
|
|
|
|
OPT_x = (1 << 3),
|
2018-10-27 22:25:59 +05:30
|
|
|
OPT_k = (1 << 4) * ENABLE_FEATURE_NTP_AUTH,
|
2010-01-01 21:16:17 +05:30
|
|
|
/* Insert new options above this line. */
|
|
|
|
/* Non-compat options: */
|
2018-10-27 22:25:59 +05:30
|
|
|
OPT_w = (1 << (4+ENABLE_FEATURE_NTP_AUTH)),
|
|
|
|
OPT_p = (1 << (5+ENABLE_FEATURE_NTP_AUTH)),
|
|
|
|
OPT_S = (1 << (6+ENABLE_FEATURE_NTP_AUTH)),
|
|
|
|
OPT_l = (1 << (7+ENABLE_FEATURE_NTP_AUTH)) * ENABLE_FEATURE_NTPD_SERVER,
|
|
|
|
OPT_I = (1 << (8+ENABLE_FEATURE_NTP_AUTH)) * ENABLE_FEATURE_NTPD_SERVER,
|
2011-04-07 05:15:20 +05:30
|
|
|
/* We hijack some bits for other purposes */
|
2012-02-23 18:58:47 +05:30
|
|
|
OPT_qq = (1 << 31),
|
2010-01-01 21:16:17 +05:30
|
|
|
};
|
|
|
|
|
|
|
|
struct globals {
|
2010-01-03 13:29:59 +05:30
|
|
|
double cur_time;
|
2010-01-01 21:16:17 +05:30
|
|
|
/* total round trip delay to currently selected reference clock */
|
|
|
|
double rootdelay;
|
|
|
|
/* reference timestamp: time when the system clock was last set or corrected */
|
|
|
|
double reftime;
|
|
|
|
/* total dispersion to currently selected reference clock */
|
|
|
|
double rootdisp;
|
2010-01-06 16:57:47 +05:30
|
|
|
|
|
|
|
double last_script_run;
|
|
|
|
char *script_name;
|
2010-01-01 21:16:17 +05:30
|
|
|
llist_t *ntp_peers;
|
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
|
|
|
int listen_fd;
|
2014-07-15 18:36:54 +05:30
|
|
|
char *if_name;
|
2012-04-01 20:01:04 +05:30
|
|
|
# define G_listen_fd (G.listen_fd)
|
|
|
|
#else
|
|
|
|
# define G_listen_fd (-1)
|
2010-01-01 21:16:17 +05:30
|
|
|
#endif
|
|
|
|
unsigned verbose;
|
|
|
|
unsigned peer_cnt;
|
|
|
|
/* refid: 32-bit code identifying the particular server or reference clock
|
2012-03-02 05:52:40 +05:30
|
|
|
* in stratum 0 packets this is a four-character ASCII string,
|
|
|
|
* called the kiss code, used for debugging and monitoring
|
|
|
|
* in stratum 1 packets this is a four-character ASCII string
|
|
|
|
* assigned to the reference clock by IANA. Example: "GPS "
|
|
|
|
* in stratum 2+ packets, it's IPv4 address or 4 first bytes
|
|
|
|
* of MD5 hash of IPv6
|
2010-01-01 21:16:17 +05:30
|
|
|
*/
|
2020-07-20 03:34:33 +05:30
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
2010-01-01 21:16:17 +05:30
|
|
|
uint32_t refid;
|
2020-07-20 03:34:33 +05:30
|
|
|
#endif
|
2010-01-02 20:27:07 +05:30
|
|
|
uint8_t ntp_status;
|
2010-01-01 21:16:17 +05:30
|
|
|
/* precision is defined as the larger of the resolution and time to
|
|
|
|
* read the clock, in log2 units. For instance, the precision of a
|
|
|
|
* mains-frequency clock incrementing at 60 Hz is 16 ms, even when the
|
|
|
|
* system clock hardware representation is to the nanosecond.
|
|
|
|
*
|
2012-03-02 05:52:40 +05:30
|
|
|
* Delays, jitters of various kinds are clamped down to precision.
|
2010-01-01 21:16:17 +05:30
|
|
|
*
|
|
|
|
* If precision_sec is too large, discipline_jitter gets clamped to it
|
2012-03-02 05:52:40 +05:30
|
|
|
* and if offset is smaller than discipline_jitter * POLLADJ_GATE, poll
|
|
|
|
* interval grows even though we really can benefit from staying at
|
|
|
|
* smaller one, collecting non-lagged datapoits and correcting offset.
|
2010-01-01 21:16:17 +05:30
|
|
|
* (Lagged datapoits exist when poll_exp is large but we still have
|
|
|
|
* systematic offset error - the time distance between datapoints
|
2012-03-02 05:52:40 +05:30
|
|
|
* is significant and older datapoints have smaller offsets.
|
2010-01-01 21:16:17 +05:30
|
|
|
* This makes our offset estimation a bit smaller than reality)
|
|
|
|
* Due to this effect, setting G_precision_sec close to
|
|
|
|
* STEP_THRESHOLD isn't such a good idea - offsets may grow
|
|
|
|
* too big and we will step. I observed it with -6.
|
|
|
|
*
|
2012-03-02 05:52:40 +05:30
|
|
|
* OTOH, setting precision_sec far too small would result in futile
|
2017-04-17 19:43:32 +05:30
|
|
|
* attempts to synchronize to an unachievable precision.
|
2010-01-01 21:16:17 +05:30
|
|
|
*
|
|
|
|
* -6 is 1/64 sec, -7 is 1/128 sec and so on.
|
2012-03-02 05:52:40 +05:30
|
|
|
* -8 is 1/256 ~= 0.003906 (worked well for me --vda)
|
|
|
|
* -9 is 1/512 ~= 0.001953 (let's try this for some time)
|
2010-01-01 21:16:17 +05:30
|
|
|
*/
|
2012-03-02 05:52:40 +05:30
|
|
|
#define G_precision_exp -9
|
|
|
|
/*
|
2021-03-02 16:37:14 +05:30
|
|
|
* G_precision_exp is used only for constructing outgoing packets.
|
2012-03-02 05:52:40 +05:30
|
|
|
* It's ok to set G_precision_sec to a slightly different value
|
|
|
|
* (One which is "nicer looking" in logs).
|
|
|
|
* Exact value would be (1.0 / (1 << (- G_precision_exp))):
|
|
|
|
*/
|
|
|
|
#define G_precision_sec 0.002
|
2010-01-01 21:16:17 +05:30
|
|
|
uint8_t stratum;
|
|
|
|
|
2021-02-21 13:43:05 +05:30
|
|
|
//uint8_t discipline_state; // doc calls it c.state
|
2010-01-01 21:16:17 +05:30
|
|
|
uint8_t poll_exp; // s.poll
|
|
|
|
int polladj_count; // c.count
|
2018-03-11 01:55:53 +05:30
|
|
|
int FREQHOLD_cnt;
|
2010-01-02 00:26:16 +05:30
|
|
|
long kernel_freq_drift;
|
2010-01-17 07:21:33 +05:30
|
|
|
peer_t *last_update_peer;
|
2010-01-01 21:16:17 +05:30
|
|
|
double last_update_offset; // c.last
|
2010-01-02 00:26:16 +05:30
|
|
|
double last_update_recv_time; // s.t
|
|
|
|
double discipline_jitter; // c.jitter
|
2012-03-05 14:48:00 +05:30
|
|
|
/* Since we only compare it with ints, can simplify code
|
|
|
|
* by not making this variable floating point:
|
|
|
|
*/
|
|
|
|
unsigned offset_to_jitter_ratio;
|
2010-01-17 07:21:33 +05:30
|
|
|
//double cluster_offset; // s.offset
|
|
|
|
//double cluster_jitter; // s.jitter
|
2010-01-02 00:26:16 +05:30
|
|
|
#if !USING_KERNEL_PLL_LOOP
|
2010-01-01 21:16:17 +05:30
|
|
|
double discipline_freq_drift; // c.freq
|
2010-01-17 07:21:33 +05:30
|
|
|
/* Maybe conditionally calculate wander? it's used only for logging */
|
2010-01-01 21:16:17 +05:30
|
|
|
double discipline_wander; // c.wander
|
2010-01-02 00:26:16 +05:30
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
};
|
|
|
|
#define G (*ptr_to_globals)
|
|
|
|
|
2010-01-01 22:42:06 +05:30
|
|
|
#define VERB1 if (MAX_VERBOSE && G.verbose)
|
|
|
|
#define VERB2 if (MAX_VERBOSE >= 2 && G.verbose >= 2)
|
|
|
|
#define VERB3 if (MAX_VERBOSE >= 3 && G.verbose >= 3)
|
|
|
|
#define VERB4 if (MAX_VERBOSE >= 4 && G.verbose >= 4)
|
|
|
|
#define VERB5 if (MAX_VERBOSE >= 5 && G.verbose >= 5)
|
2013-12-04 21:02:09 +05:30
|
|
|
#define VERB6 if (MAX_VERBOSE >= 6 && G.verbose >= 6)
|
2010-01-01 22:42:06 +05:30
|
|
|
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
static double LOG2D(int a)
|
|
|
|
{
|
|
|
|
if (a < 0)
|
|
|
|
return 1.0 / (1UL << -a);
|
|
|
|
return 1UL << a;
|
|
|
|
}
|
|
|
|
static ALWAYS_INLINE double SQUARE(double x)
|
|
|
|
{
|
|
|
|
return x * x;
|
|
|
|
}
|
|
|
|
static ALWAYS_INLINE double MAXD(double a, double b)
|
|
|
|
{
|
|
|
|
if (a > b)
|
|
|
|
return a;
|
|
|
|
return b;
|
|
|
|
}
|
2019-10-25 16:35:15 +05:30
|
|
|
#if !USING_KERNEL_PLL_LOOP
|
2010-01-01 21:16:17 +05:30
|
|
|
static ALWAYS_INLINE double MIND(double a, double b)
|
|
|
|
{
|
|
|
|
if (a < b)
|
|
|
|
return a;
|
|
|
|
return b;
|
|
|
|
}
|
2019-10-25 16:35:15 +05:30
|
|
|
#endif
|
2010-01-04 01:36:27 +05:30
|
|
|
static NOINLINE double my_SQRT(double X)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
float f;
|
|
|
|
int32_t i;
|
|
|
|
} v;
|
|
|
|
double invsqrt;
|
|
|
|
double Xhalf = X * 0.5;
|
|
|
|
|
|
|
|
/* Fast and good approximation to 1/sqrt(X), black magic */
|
|
|
|
v.f = X;
|
|
|
|
/*v.i = 0x5f3759df - (v.i >> 1);*/
|
|
|
|
v.i = 0x5f375a86 - (v.i >> 1); /* - this constant is slightly better */
|
|
|
|
invsqrt = v.f; /* better than 0.2% accuracy */
|
|
|
|
|
|
|
|
/* Refining it using Newton's method: x1 = x0 - f(x0)/f'(x0)
|
|
|
|
* f(x) = 1/(x*x) - X (f==0 when x = 1/sqrt(X))
|
|
|
|
* f'(x) = -2/(x*x*x)
|
|
|
|
* f(x)/f'(x) = (X - 1/(x*x)) / (2/(x*x*x)) = X*x*x*x/2 - x/2
|
|
|
|
* x1 = x0 - (X*x0*x0*x0/2 - x0/2) = 1.5*x0 - X*x0*x0*x0/2 = x0*(1.5 - (X/2)*x0*x0)
|
|
|
|
*/
|
|
|
|
invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); /* ~0.05% accuracy */
|
|
|
|
/* invsqrt = invsqrt * (1.5 - Xhalf * invsqrt * invsqrt); 2nd iter: ~0.0001% accuracy */
|
|
|
|
/* With 4 iterations, more than half results will be exact,
|
|
|
|
* at 6th iterations result stabilizes with about 72% results exact.
|
|
|
|
* We are well satisfied with 0.05% accuracy.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return X * invsqrt; /* X * 1/sqrt(X) ~= sqrt(X) */
|
|
|
|
}
|
|
|
|
static ALWAYS_INLINE double SQRT(double X)
|
|
|
|
{
|
|
|
|
/* If this arch doesn't use IEEE 754 floats, fall back to using libm */
|
|
|
|
if (sizeof(float) != 4)
|
|
|
|
return sqrt(X);
|
|
|
|
|
2010-01-04 02:22:46 +05:30
|
|
|
/* This avoids needing libm, saves about 0.5k on x86-32 */
|
2010-01-04 01:36:27 +05:30
|
|
|
return my_SQRT(X);
|
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
static double
|
|
|
|
gettime1900d(void)
|
|
|
|
{
|
|
|
|
struct timeval tv;
|
2020-12-31 04:18:01 +05:30
|
|
|
xgettimeofday(&tv);
|
2010-01-03 13:29:59 +05:30
|
|
|
G.cur_time = tv.tv_sec + (1.0e-6 * tv.tv_usec) + OFFSET_1900_1970;
|
|
|
|
return G.cur_time;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2021-03-26 16:32:08 +05:30
|
|
|
d_to_tv(struct timeval *tv, double d)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
tv->tv_sec = (long)d;
|
|
|
|
tv->tv_usec = (d - tv->tv_sec) * 1000000;
|
|
|
|
}
|
|
|
|
|
2021-03-26 16:32:08 +05:30
|
|
|
static NOINLINE double
|
2010-01-01 21:16:17 +05:30
|
|
|
lfp_to_d(l_fixedpt_t lfp)
|
|
|
|
{
|
|
|
|
double ret;
|
|
|
|
lfp.int_partl = ntohl(lfp.int_partl);
|
|
|
|
lfp.fractionl = ntohl(lfp.fractionl);
|
|
|
|
ret = (double)lfp.int_partl + ((double)lfp.fractionl / UINT_MAX);
|
|
|
|
return ret;
|
|
|
|
}
|
2021-03-26 16:32:08 +05:30
|
|
|
static NOINLINE double
|
2010-01-01 21:16:17 +05:30
|
|
|
sfp_to_d(s_fixedpt_t sfp)
|
|
|
|
{
|
|
|
|
double ret;
|
|
|
|
sfp.int_parts = ntohs(sfp.int_parts);
|
|
|
|
sfp.fractions = ntohs(sfp.fractions);
|
|
|
|
ret = (double)sfp.int_parts + ((double)sfp.fractions / USHRT_MAX);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
2021-03-26 16:32:08 +05:30
|
|
|
static void
|
|
|
|
d_to_lfp(l_fixedpt_t *lfp, double d)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
2021-03-26 16:32:08 +05:30
|
|
|
uint32_t intl;
|
|
|
|
uint32_t frac;
|
|
|
|
intl = (uint32_t)d;
|
|
|
|
frac = (uint32_t)((d - intl) * UINT_MAX);
|
|
|
|
lfp->int_partl = htonl(intl);
|
|
|
|
lfp->fractionl = htonl(frac);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
2021-03-26 16:32:08 +05:30
|
|
|
static NOINLINE void
|
|
|
|
d_to_sfp(s_fixedpt_t *sfp, double d)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
2021-03-26 16:32:08 +05:30
|
|
|
uint16_t ints;
|
|
|
|
uint16_t frac;
|
|
|
|
ints = (uint16_t)d;
|
|
|
|
frac = (uint16_t)((d - ints) * USHRT_MAX);
|
|
|
|
sfp->int_parts = htons(ints);
|
|
|
|
sfp->fractions = htons(frac);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static double
|
2010-01-03 13:29:59 +05:30
|
|
|
dispersion(const datapoint_t *dp)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
2010-01-03 13:29:59 +05:30
|
|
|
return dp->d_dispersion + FREQ_TOLERANCE * (G.cur_time - dp->d_recv_time);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
static double
|
2010-01-03 13:29:59 +05:30
|
|
|
root_distance(peer_t *p)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
/* The root synchronization distance is the maximum error due to
|
|
|
|
* all causes of the local clock relative to the primary server.
|
|
|
|
* It is defined as half the total delay plus total dispersion
|
|
|
|
* plus peer jitter.
|
|
|
|
*/
|
|
|
|
return MAXD(MINDISP, p->lastpkt_rootdelay + p->lastpkt_delay) / 2
|
|
|
|
+ p->lastpkt_rootdisp
|
|
|
|
+ p->filter_dispersion
|
2010-01-03 13:29:59 +05:30
|
|
|
+ FREQ_TOLERANCE * (G.cur_time - p->lastpkt_recv_time)
|
2010-01-01 21:16:17 +05:30
|
|
|
+ p->filter_jitter;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_next(peer_t *p, unsigned t)
|
|
|
|
{
|
2010-01-03 13:29:59 +05:30
|
|
|
p->next_action_time = G.cur_time + t;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Peer clock filter and its helpers
|
|
|
|
*/
|
|
|
|
static void
|
2010-01-03 13:29:59 +05:30
|
|
|
filter_datapoints(peer_t *p)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
int i, idx;
|
2012-03-08 07:57:49 +05:30
|
|
|
double sum, wavg;
|
|
|
|
datapoint_t *fdp;
|
|
|
|
|
|
|
|
/* Simulations have shown that use of *averaged* offset for p->filter_offset
|
|
|
|
* is in fact worse than simply using last received one: with large poll intervals
|
|
|
|
* (>= 2048) averaging code uses offset values which are outdated by hours,
|
|
|
|
* and time/frequency correction goes totally wrong when fed essentially bogus offsets.
|
|
|
|
*/
|
|
|
|
fdp = p->filter_datapoint;
|
|
|
|
idx = p->datapoint_idx; /* most recent datapoint's index */
|
|
|
|
|
|
|
|
/* filter_offset: simply use the most recent value */
|
|
|
|
p->filter_offset = fdp[idx].d_offset;
|
|
|
|
|
|
|
|
/* n-1
|
|
|
|
* --- dispersion(i)
|
|
|
|
* filter_dispersion = \ -------------
|
|
|
|
* / (i+1)
|
|
|
|
* --- 2
|
|
|
|
* i=0
|
|
|
|
*/
|
|
|
|
wavg = 0;
|
|
|
|
sum = 0;
|
|
|
|
for (i = 0; i < NUM_DATAPOINTS; i++) {
|
|
|
|
sum += dispersion(&fdp[idx]) / (2 << i);
|
|
|
|
wavg += fdp[idx].d_offset;
|
|
|
|
idx = (idx - 1) & (NUM_DATAPOINTS - 1);
|
|
|
|
}
|
|
|
|
wavg /= NUM_DATAPOINTS;
|
|
|
|
p->filter_dispersion = sum;
|
|
|
|
|
2010-01-02 20:27:07 +05:30
|
|
|
/* +----- -----+ ^ 1/2
|
|
|
|
* | n-1 |
|
|
|
|
* | --- |
|
|
|
|
* | 1 \ 2 |
|
|
|
|
* filter_jitter = | --- * / (avg-offset_j) |
|
|
|
|
* | n --- |
|
|
|
|
* | j=0 |
|
|
|
|
* +----- -----+
|
|
|
|
* where n is the number of valid datapoints in the filter (n > 1);
|
|
|
|
* if filter_jitter < precision then filter_jitter = precision
|
|
|
|
*/
|
2010-01-01 21:16:17 +05:30
|
|
|
sum = 0;
|
|
|
|
for (i = 0; i < NUM_DATAPOINTS; i++) {
|
2012-03-08 07:57:49 +05:30
|
|
|
sum += SQUARE(wavg - fdp[i].d_offset);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
2010-01-02 20:27:07 +05:30
|
|
|
sum = SQRT(sum / NUM_DATAPOINTS);
|
2010-01-01 21:16:17 +05:30
|
|
|
p->filter_jitter = sum > G_precision_sec ? sum : G_precision_sec;
|
|
|
|
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("filter offset:%+f disp:%f jitter:%f",
|
2012-03-08 07:57:49 +05:30
|
|
|
p->filter_offset,
|
2010-01-02 05:06:43 +05:30
|
|
|
p->filter_dispersion,
|
|
|
|
p->filter_jitter);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2010-01-03 13:29:59 +05:30
|
|
|
reset_peer_stats(peer_t *p, double offset)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
int i;
|
2016-02-10 11:25:07 +05:30
|
|
|
bool small_ofs = fabs(offset) < STEP_THRESHOLD;
|
2010-01-17 05:35:58 +05:30
|
|
|
|
2013-12-07 21:59:03 +05:30
|
|
|
/* Used to set p->filter_datapoint[i].d_dispersion = MAXDISP
|
2017-04-17 19:43:32 +05:30
|
|
|
* and clear reachable bits, but this proved to be too aggressive:
|
2016-03-04 11:36:53 +05:30
|
|
|
* after step (tested with suspending laptop for ~30 secs),
|
2013-12-07 21:59:03 +05:30
|
|
|
* this caused all previous data to be considered invalid,
|
2017-01-19 19:12:34 +05:30
|
|
|
* making us needing to collect full ~8 datapoints per peer
|
2013-12-07 21:59:03 +05:30
|
|
|
* after step in order to start trusting them.
|
|
|
|
* In turn, this was making poll interval decrease even after
|
|
|
|
* step was done. (Poll interval decreases already before step
|
|
|
|
* in this scenario, because we see large offsets and end up with
|
|
|
|
* no good peer to select).
|
|
|
|
*/
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
for (i = 0; i < NUM_DATAPOINTS; i++) {
|
2010-01-17 05:35:58 +05:30
|
|
|
if (small_ofs) {
|
2010-06-24 23:53:40 +05:30
|
|
|
p->filter_datapoint[i].d_recv_time += offset;
|
2010-01-01 21:16:17 +05:30
|
|
|
if (p->filter_datapoint[i].d_offset != 0) {
|
2012-02-28 07:15:00 +05:30
|
|
|
p->filter_datapoint[i].d_offset -= offset;
|
|
|
|
//bb_error_msg("p->filter_datapoint[%d].d_offset %f -> %f",
|
|
|
|
// i,
|
|
|
|
// p->filter_datapoint[i].d_offset + offset,
|
|
|
|
// p->filter_datapoint[i].d_offset);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
} else {
|
2010-01-03 13:29:59 +05:30
|
|
|
p->filter_datapoint[i].d_recv_time = G.cur_time;
|
2010-01-01 21:16:17 +05:30
|
|
|
p->filter_datapoint[i].d_offset = 0;
|
2013-12-07 21:59:03 +05:30
|
|
|
/*p->filter_datapoint[i].d_dispersion = MAXDISP;*/
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
}
|
2010-01-17 05:35:58 +05:30
|
|
|
if (small_ofs) {
|
2010-06-24 23:53:40 +05:30
|
|
|
p->lastpkt_recv_time += offset;
|
2010-01-01 21:16:17 +05:30
|
|
|
} else {
|
2013-12-07 21:59:03 +05:30
|
|
|
/*p->reachable_bits = 0;*/
|
2010-01-03 13:29:59 +05:30
|
|
|
p->lastpkt_recv_time = G.cur_time;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
2010-01-03 13:29:59 +05:30
|
|
|
filter_datapoints(p); /* recalc p->filter_xxx */
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB6 bb_error_msg("%s->lastpkt_recv_time=%f", p->p_dotted, p->lastpkt_recv_time);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2020-07-20 03:34:33 +05:30
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
|
|
|
static uint32_t calculate_refid(len_and_sockaddr *lsa)
|
|
|
|
{
|
|
|
|
# if ENABLE_FEATURE_IPV6
|
|
|
|
if (lsa->u.sa.sa_family == AF_INET6) {
|
|
|
|
md5_ctx_t md5;
|
|
|
|
uint32_t res[MD5_OUTSIZE / 4];
|
|
|
|
|
|
|
|
md5_begin(&md5);
|
|
|
|
md5_hash(&md5, &lsa->u.sin6.sin6_addr, sizeof(lsa->u.sin6.sin6_addr));
|
|
|
|
md5_end(&md5, res);
|
|
|
|
return res[0];
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
return lsa->u.sin.sin_addr.s_addr;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-06 20:48:45 +05:30
|
|
|
static len_and_sockaddr*
|
|
|
|
resolve_peer_hostname(peer_t *p)
|
2016-06-06 05:56:49 +05:30
|
|
|
{
|
2017-01-06 20:48:45 +05:30
|
|
|
len_and_sockaddr *lsa = host2sockaddr(p->p_hostname, 123);
|
|
|
|
if (lsa) {
|
|
|
|
free(p->p_lsa);
|
|
|
|
free(p->p_dotted);
|
|
|
|
p->p_lsa = lsa;
|
|
|
|
p->p_dotted = xmalloc_sockaddr2dotted_noport(&lsa->u.sa);
|
2017-01-19 19:12:34 +05:30
|
|
|
VERB1 if (strcmp(p->p_hostname, p->p_dotted) != 0)
|
|
|
|
bb_error_msg("'%s' is %s", p->p_hostname, p->p_dotted);
|
2020-07-20 03:34:33 +05:30
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
|
|
|
p->p_refid = calculate_refid(p->p_lsa);
|
|
|
|
#endif
|
2017-10-31 17:14:37 +05:30
|
|
|
p->dns_errors = 0;
|
2017-10-31 21:23:23 +05:30
|
|
|
return lsa;
|
2016-06-06 05:56:49 +05:30
|
|
|
}
|
2017-10-31 17:14:37 +05:30
|
|
|
p->dns_errors = ((p->dns_errors << 1) | 1) & DNS_ERRORS_CAP;
|
2017-01-06 20:48:45 +05:30
|
|
|
return lsa;
|
2016-06-06 05:56:49 +05:30
|
|
|
}
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#if !ENABLE_FEATURE_NTP_AUTH
|
|
|
|
#define add_peers(s, key_entry) \
|
|
|
|
add_peers(s)
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
static void
|
2018-10-27 22:25:59 +05:30
|
|
|
add_peers(const char *s, key_entry_t *key_entry)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
2016-03-04 11:36:53 +05:30
|
|
|
llist_t *item;
|
2010-01-01 21:16:17 +05:30
|
|
|
peer_t *p;
|
|
|
|
|
2016-06-06 05:56:49 +05:30
|
|
|
p = xzalloc(sizeof(*p) + strlen(s));
|
|
|
|
strcpy(p->p_hostname, s);
|
2017-01-06 20:48:45 +05:30
|
|
|
p->p_fd = -1;
|
|
|
|
p->p_xmt_msg.m_status = MODE_CLIENT | (NTP_VERSION << 3);
|
|
|
|
p->next_action_time = G.cur_time; /* = set_next(p, 0); */
|
|
|
|
reset_peer_stats(p, STEP_THRESHOLD);
|
2016-03-04 11:36:53 +05:30
|
|
|
|
|
|
|
/* Names like N.<country2chars>.pool.ntp.org are randomly resolved
|
|
|
|
* to a pool of machines. Sometimes different N's resolve to the same IP.
|
|
|
|
* It is not useful to have two peers with same IP. We skip duplicates.
|
|
|
|
*/
|
2017-01-06 20:48:45 +05:30
|
|
|
if (resolve_peer_hostname(p)) {
|
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
|
|
|
peer_t *pp = (peer_t *) item->data;
|
|
|
|
if (pp->p_dotted && strcmp(p->p_dotted, pp->p_dotted) == 0) {
|
|
|
|
bb_error_msg("duplicate peer %s (%s)", s, p->p_dotted);
|
|
|
|
free(p->p_lsa);
|
|
|
|
free(p->p_dotted);
|
|
|
|
free(p);
|
|
|
|
return;
|
|
|
|
}
|
2016-03-04 11:36:53 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
IF_FEATURE_NTP_AUTH(p->key_entry = key_entry;)
|
2010-01-01 21:16:17 +05:30
|
|
|
llist_add_to(&G.ntp_peers, p);
|
|
|
|
G.peer_cnt++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
do_sendto(int fd,
|
|
|
|
const struct sockaddr *from, const struct sockaddr *to, socklen_t addrlen,
|
|
|
|
msg_t *msg, ssize_t len)
|
|
|
|
{
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
errno = 0;
|
|
|
|
if (!from) {
|
|
|
|
ret = sendto(fd, msg, len, MSG_DONTWAIT, to, addrlen);
|
|
|
|
} else {
|
|
|
|
ret = send_to_from(fd, msg, len, MSG_DONTWAIT, to, from, addrlen);
|
|
|
|
}
|
|
|
|
if (ret != len) {
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
bb_simple_perror_msg("send failed");
|
2010-01-01 21:16:17 +05:30
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
static void
|
|
|
|
hash(key_entry_t *key_entry, const msg_t *msg, uint8_t *output)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
md5_ctx_t m;
|
|
|
|
sha1_ctx_t s;
|
|
|
|
} ctx;
|
|
|
|
unsigned hash_size = sizeof(*msg) - sizeof(msg->m_keyid) - sizeof(msg->m_digest);
|
|
|
|
|
|
|
|
switch (key_entry->type) {
|
|
|
|
case HASH_MD5:
|
|
|
|
md5_begin(&ctx.m);
|
|
|
|
md5_hash(&ctx.m, key_entry->key, key_entry->key_length);
|
|
|
|
md5_hash(&ctx.m, msg, hash_size);
|
|
|
|
md5_end(&ctx.m, output);
|
|
|
|
break;
|
|
|
|
default: /* it's HASH_SHA1 */
|
|
|
|
sha1_begin(&ctx.s);
|
|
|
|
sha1_hash(&ctx.s, key_entry->key, key_entry->key_length);
|
|
|
|
sha1_hash(&ctx.s, msg, hash_size);
|
|
|
|
sha1_end(&ctx.s, output);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hash_peer(peer_t *p)
|
|
|
|
{
|
|
|
|
p->p_xmt_msg.m_keyid = htonl(p->key_entry->id);
|
|
|
|
hash(p->key_entry, &p->p_xmt_msg, p->p_xmt_msg.m_digest);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
hashes_differ(peer_t *p, const msg_t *msg)
|
|
|
|
{
|
|
|
|
uint8_t digest[NTP_SHA1_DIGESTSIZE];
|
|
|
|
hash(p->key_entry, msg, digest);
|
|
|
|
return memcmp(digest, msg->m_digest, p->key_entry->msg_size - NTP_MSGSIZE_NOAUTH - KEYID_SIZE);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
static void
|
2010-01-01 21:16:17 +05:30
|
|
|
send_query_to_peer(peer_t *p)
|
|
|
|
{
|
2017-10-28 21:24:24 +05:30
|
|
|
if (!p->p_lsa)
|
|
|
|
return;
|
2017-01-06 20:48:45 +05:30
|
|
|
|
2010-01-02 20:27:07 +05:30
|
|
|
/* Why do we need to bind()?
|
|
|
|
* See what happens when we don't bind:
|
|
|
|
*
|
|
|
|
* socket(PF_INET, SOCK_DGRAM, IPPROTO_IP) = 3
|
|
|
|
* setsockopt(3, SOL_IP, IP_TOS, [16], 4) = 0
|
|
|
|
* gettimeofday({1259071266, 327885}, NULL) = 0
|
|
|
|
* sendto(3, "xxx", 48, MSG_DONTWAIT, {sa_family=AF_INET, sin_port=htons(123), sin_addr=inet_addr("10.34.32.125")}, 16) = 48
|
|
|
|
* ^^^ we sent it from some source port picked by kernel.
|
|
|
|
* time(NULL) = 1259071266
|
|
|
|
* write(2, "ntpd: entering poll 15 secs\n", 28) = 28
|
|
|
|
* poll([{fd=3, events=POLLIN}], 1, 15000) = 1 ([{fd=3, revents=POLLIN}])
|
|
|
|
* recv(3, "yyy", 68, MSG_DONTWAIT) = 48
|
|
|
|
* ^^^ this recv will receive packets to any local port!
|
|
|
|
*
|
|
|
|
* Uncomment this and use strace to see it in action:
|
|
|
|
*/
|
|
|
|
#define PROBE_LOCAL_ADDR /* { len_and_sockaddr lsa; lsa.len = LSA_SIZEOF_SA; getsockname(p->query.fd, &lsa.u.sa, &lsa.len); } */
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
if (p->p_fd == -1) {
|
|
|
|
int fd, family;
|
|
|
|
len_and_sockaddr *local_lsa;
|
|
|
|
|
|
|
|
family = p->p_lsa->u.sa.sa_family;
|
|
|
|
p->p_fd = fd = xsocket_type(&local_lsa, family, SOCK_DGRAM);
|
|
|
|
/* local_lsa has "null" address and port 0 now.
|
|
|
|
* bind() ensures we have a *particular port* selected by kernel
|
|
|
|
* and remembered in p->p_fd, thus later recv(p->p_fd)
|
|
|
|
* receives only packets sent to this port.
|
|
|
|
*/
|
|
|
|
PROBE_LOCAL_ADDR
|
|
|
|
xbind(fd, &local_lsa->u.sa, local_lsa->len);
|
|
|
|
PROBE_LOCAL_ADDR
|
|
|
|
#if ENABLE_FEATURE_IPV6
|
|
|
|
if (family == AF_INET)
|
|
|
|
#endif
|
2018-04-15 22:07:50 +05:30
|
|
|
setsockopt_int(fd, IPPROTO_IP, IP_TOS, IPTOS_DSCP_AF21);
|
2010-01-01 21:16:17 +05:30
|
|
|
free(local_lsa);
|
|
|
|
}
|
|
|
|
|
2012-03-03 16:45:46 +05:30
|
|
|
/* Emit message _before_ attempted send. Think of a very short
|
|
|
|
* roundtrip networks: we need to go back to recv loop ASAP,
|
|
|
|
* to reduce delay. Printing messages after send works against that.
|
|
|
|
*/
|
|
|
|
VERB1 bb_error_msg("sending query to %s", p->p_dotted);
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
/*
|
|
|
|
* Send out a random 64-bit number as our transmit time. The NTP
|
|
|
|
* server will copy said number into the originate field on the
|
|
|
|
* response that it sends us. This is totally legal per the SNTP spec.
|
|
|
|
*
|
|
|
|
* The impact of this is two fold: we no longer send out the current
|
|
|
|
* system time for the world to see (which may aid an attacker), and
|
|
|
|
* it gives us a (not very secure) way of knowing that we're not
|
|
|
|
* getting spoofed by an attacker that can't capture our traffic
|
|
|
|
* but can spoof packets from the NTP server we're communicating with.
|
|
|
|
*
|
|
|
|
* Save the real transmit timestamp locally.
|
|
|
|
*/
|
2014-03-05 23:28:15 +05:30
|
|
|
p->p_xmt_msg.m_xmttime.int_partl = rand();
|
|
|
|
p->p_xmt_msg.m_xmttime.fractionl = rand();
|
2010-01-01 21:16:17 +05:30
|
|
|
p->p_xmttime = gettime1900d();
|
|
|
|
|
2013-05-23 19:36:59 +05:30
|
|
|
/* Were doing it only if sendto worked, but
|
2013-05-22 21:46:34 +05:30
|
|
|
* loss of sync detection needs reachable_bits updated
|
|
|
|
* even if sending fails *locally*:
|
|
|
|
* "network is unreachable" because cable was pulled?
|
|
|
|
* We still need to declare "unsync" if this condition persists.
|
|
|
|
*/
|
|
|
|
p->reachable_bits <<= 1;
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
if (p->key_entry)
|
|
|
|
hash_peer(p);
|
2010-01-01 21:16:17 +05:30
|
|
|
if (do_sendto(p->p_fd, /*from:*/ NULL, /*to:*/ &p->p_lsa->u.sa, /*addrlen:*/ p->p_lsa->len,
|
2018-10-27 22:25:59 +05:30
|
|
|
&p->p_xmt_msg, !p->key_entry ? NTP_MSGSIZE_NOAUTH : p->key_entry->msg_size) == -1
|
|
|
|
)
|
|
|
|
#else
|
|
|
|
if (do_sendto(p->p_fd, /*from:*/ NULL, /*to:*/ &p->p_lsa->u.sa, /*addrlen:*/ p->p_lsa->len,
|
|
|
|
&p->p_xmt_msg, NTP_MSGSIZE_NOAUTH) == -1
|
|
|
|
)
|
|
|
|
#endif
|
|
|
|
{
|
2010-01-01 21:16:17 +05:30
|
|
|
close(p->p_fd);
|
|
|
|
p->p_fd = -1;
|
2013-05-23 19:36:59 +05:30
|
|
|
/*
|
|
|
|
* We know that we sent nothing.
|
|
|
|
* We can retry *soon* without fearing
|
|
|
|
* that we are flooding the peer.
|
|
|
|
*/
|
2010-01-01 21:16:17 +05:30
|
|
|
set_next(p, RETRY_INTERVAL);
|
2010-01-03 13:29:59 +05:30
|
|
|
return;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
set_next(p, RESPONSE_INTERVAL);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2010-01-26 00:00:16 +05:30
|
|
|
/* Note that there is no provision to prevent several run_scripts
|
2013-05-23 19:36:59 +05:30
|
|
|
* to be started in quick succession. In fact, it happens rather often
|
2010-01-26 00:00:16 +05:30
|
|
|
* if initial syncronization results in a step.
|
|
|
|
* You will see "step" and then "stratum" script runs, sometimes
|
|
|
|
* as close as only 0.002 seconds apart.
|
|
|
|
* Script should be ready to deal with this.
|
|
|
|
*/
|
2010-01-11 06:01:59 +05:30
|
|
|
static void run_script(const char *action, double offset)
|
2010-01-06 16:57:47 +05:30
|
|
|
{
|
|
|
|
char *argv[3];
|
2010-01-11 06:01:59 +05:30
|
|
|
char *env1, *env2, *env3, *env4;
|
2010-01-06 16:57:47 +05:30
|
|
|
|
2013-05-22 21:48:51 +05:30
|
|
|
G.last_script_run = G.cur_time;
|
|
|
|
|
2010-01-06 16:57:47 +05:30
|
|
|
if (!G.script_name)
|
|
|
|
return;
|
|
|
|
|
|
|
|
argv[0] = (char*) G.script_name;
|
|
|
|
argv[1] = (char*) action;
|
|
|
|
argv[2] = NULL;
|
|
|
|
|
|
|
|
VERB1 bb_error_msg("executing '%s %s'", G.script_name, action);
|
|
|
|
|
2010-01-07 16:21:13 +05:30
|
|
|
env1 = xasprintf("%s=%u", "stratum", G.stratum);
|
2010-01-06 16:57:47 +05:30
|
|
|
putenv(env1);
|
2010-01-07 16:21:13 +05:30
|
|
|
env2 = xasprintf("%s=%ld", "freq_drift_ppm", G.kernel_freq_drift);
|
2010-01-06 16:57:47 +05:30
|
|
|
putenv(env2);
|
2010-01-07 16:21:13 +05:30
|
|
|
env3 = xasprintf("%s=%u", "poll_interval", 1 << G.poll_exp);
|
|
|
|
putenv(env3);
|
2010-01-11 06:01:59 +05:30
|
|
|
env4 = xasprintf("%s=%f", "offset", offset);
|
|
|
|
putenv(env4);
|
2010-01-06 16:57:47 +05:30
|
|
|
/* Other items of potential interest: selected peer,
|
2010-01-07 16:21:13 +05:30
|
|
|
* rootdelay, reftime, rootdisp, refid, ntp_status,
|
2010-01-11 06:01:59 +05:30
|
|
|
* last_update_offset, last_update_recv_time, discipline_jitter,
|
|
|
|
* how many peers have reachable_bits = 0?
|
2010-01-06 16:57:47 +05:30
|
|
|
*/
|
|
|
|
|
2010-01-07 13:01:46 +05:30
|
|
|
/* Don't want to wait: it may run hwclock --systohc, and that
|
|
|
|
* may take some time (seconds): */
|
2010-03-19 03:14:00 +05:30
|
|
|
/*spawn_and_wait(argv);*/
|
2010-01-07 13:01:46 +05:30
|
|
|
spawn(argv);
|
2010-01-06 16:57:47 +05:30
|
|
|
|
|
|
|
unsetenv("stratum");
|
|
|
|
unsetenv("freq_drift_ppm");
|
2010-01-07 16:21:13 +05:30
|
|
|
unsetenv("poll_interval");
|
2010-01-11 06:01:59 +05:30
|
|
|
unsetenv("offset");
|
2010-01-06 16:57:47 +05:30
|
|
|
free(env1);
|
|
|
|
free(env2);
|
2010-01-07 16:21:13 +05:30
|
|
|
free(env3);
|
2010-01-11 06:01:59 +05:30
|
|
|
free(env4);
|
2010-01-06 16:57:47 +05:30
|
|
|
}
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
static NOINLINE void
|
2010-01-01 21:16:17 +05:30
|
|
|
step_time(double offset)
|
|
|
|
{
|
2010-01-03 13:29:59 +05:30
|
|
|
llist_t *item;
|
2010-01-01 21:16:17 +05:30
|
|
|
double dtime;
|
2012-02-28 07:15:00 +05:30
|
|
|
struct timeval tvc, tvn;
|
|
|
|
char buf[sizeof("yyyy-mm-dd hh:mm:ss") + /*paranoia:*/ 4];
|
2010-01-01 21:16:17 +05:30
|
|
|
time_t tval;
|
|
|
|
|
2020-12-31 04:18:01 +05:30
|
|
|
xgettimeofday(&tvc);
|
2012-02-28 07:15:00 +05:30
|
|
|
dtime = tvc.tv_sec + (1.0e-6 * tvc.tv_usec) + offset;
|
2021-03-26 16:32:08 +05:30
|
|
|
d_to_tv(&tvn, dtime);
|
2020-12-17 02:06:36 +05:30
|
|
|
xsettimeofday(&tvn);
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2012-02-28 07:15:00 +05:30
|
|
|
VERB2 {
|
|
|
|
tval = tvc.tv_sec;
|
2013-03-29 17:00:33 +05:30
|
|
|
strftime_YYYYMMDDHHMMSS(buf, sizeof(buf), &tval);
|
2012-02-28 07:15:00 +05:30
|
|
|
bb_error_msg("current time is %s.%06u", buf, (unsigned)tvc.tv_usec);
|
|
|
|
}
|
|
|
|
tval = tvn.tv_sec;
|
2013-03-29 17:00:33 +05:30
|
|
|
strftime_YYYYMMDDHHMMSS(buf, sizeof(buf), &tval);
|
2019-04-12 22:31:51 +05:30
|
|
|
bb_info_msg("setting time to %s.%06u (offset %+fs)", buf, (unsigned)tvn.tv_usec, offset);
|
2018-03-11 01:55:53 +05:30
|
|
|
//maybe? G.FREQHOLD_cnt = 0;
|
2010-01-03 13:29:59 +05:30
|
|
|
|
|
|
|
/* Correct various fields which contain time-relative values: */
|
|
|
|
|
2012-06-11 15:11:46 +05:30
|
|
|
/* Globals: */
|
|
|
|
G.cur_time += offset;
|
|
|
|
G.last_update_recv_time += offset;
|
|
|
|
G.last_script_run += offset;
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
/* p->lastpkt_recv_time, p->next_action_time and such: */
|
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
|
|
|
peer_t *pp = (peer_t *) item->data;
|
|
|
|
reset_peer_stats(pp, offset);
|
2012-02-23 18:58:47 +05:30
|
|
|
//bb_error_msg("offset:%+f pp->next_action_time:%f -> %f",
|
2010-06-24 23:53:40 +05:30
|
|
|
// offset, pp->next_action_time, pp->next_action_time + offset);
|
|
|
|
pp->next_action_time += offset;
|
2012-06-11 15:11:46 +05:30
|
|
|
if (pp->p_fd >= 0) {
|
|
|
|
/* We wait for reply from this peer too.
|
|
|
|
* But due to step we are doing, reply's data is no longer
|
|
|
|
* useful (in fact, it'll be bogus). Stop waiting for it.
|
|
|
|
*/
|
|
|
|
close(pp->p_fd);
|
|
|
|
pp->p_fd = -1;
|
|
|
|
set_next(pp, RETRY_INTERVAL);
|
|
|
|
}
|
2010-01-03 13:29:59 +05:30
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2014-09-28 02:26:09 +05:30
|
|
|
static void clamp_pollexp_and_set_MAXSTRAT(void)
|
|
|
|
{
|
|
|
|
if (G.poll_exp < MINPOLL)
|
|
|
|
G.poll_exp = MINPOLL;
|
2014-10-02 20:48:43 +05:30
|
|
|
if (G.poll_exp > BIGPOLL)
|
|
|
|
G.poll_exp = BIGPOLL;
|
2014-09-28 02:26:09 +05:30
|
|
|
G.polladj_count = 0;
|
|
|
|
G.stratum = MAXSTRAT;
|
|
|
|
}
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/*
|
|
|
|
* Selection and clustering, and their helpers
|
|
|
|
*/
|
|
|
|
typedef struct {
|
|
|
|
peer_t *p;
|
|
|
|
int type;
|
|
|
|
double edge;
|
2010-01-17 07:21:33 +05:30
|
|
|
double opt_rd; /* optimization */
|
2010-01-01 21:16:17 +05:30
|
|
|
} point_t;
|
|
|
|
static int
|
|
|
|
compare_point_edge(const void *aa, const void *bb)
|
|
|
|
{
|
|
|
|
const point_t *a = aa;
|
|
|
|
const point_t *b = bb;
|
|
|
|
if (a->edge < b->edge) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return (a->edge > b->edge);
|
|
|
|
}
|
|
|
|
typedef struct {
|
|
|
|
peer_t *p;
|
|
|
|
double metric;
|
|
|
|
} survivor_t;
|
|
|
|
static int
|
|
|
|
compare_survivor_metric(const void *aa, const void *bb)
|
|
|
|
{
|
|
|
|
const survivor_t *a = aa;
|
|
|
|
const survivor_t *b = bb;
|
2010-01-03 16:30:26 +05:30
|
|
|
if (a->metric < b->metric) {
|
2010-01-01 21:16:17 +05:30
|
|
|
return -1;
|
2010-01-03 16:30:26 +05:30
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
return (a->metric > b->metric);
|
|
|
|
}
|
|
|
|
static int
|
|
|
|
fit(peer_t *p, double rd)
|
|
|
|
{
|
2010-01-03 13:29:59 +05:30
|
|
|
if ((p->reachable_bits & (p->reachable_bits-1)) == 0) {
|
|
|
|
/* One or zero bits in reachable_bits */
|
2018-09-13 15:45:36 +05:30
|
|
|
VERB4 bb_error_msg("peer %s unfit for selection: "
|
|
|
|
"unreachable", p->p_dotted);
|
2010-01-01 21:16:17 +05:30
|
|
|
return 0;
|
|
|
|
}
|
2010-10-29 15:16:52 +05:30
|
|
|
#if 0 /* we filter out such packets earlier */
|
2010-01-02 20:27:07 +05:30
|
|
|
if ((p->lastpkt_status & LI_ALARM) == LI_ALARM
|
2010-01-01 21:16:17 +05:30
|
|
|
|| p->lastpkt_stratum >= MAXSTRAT
|
|
|
|
) {
|
2018-09-13 15:45:36 +05:30
|
|
|
VERB4 bb_error_msg("peer %s unfit for selection: "
|
|
|
|
"bad status/stratum", p->p_dotted);
|
2010-01-01 21:16:17 +05:30
|
|
|
return 0;
|
|
|
|
}
|
2010-01-02 20:27:07 +05:30
|
|
|
#endif
|
2010-01-03 13:29:59 +05:30
|
|
|
/* rd is root_distance(p) */
|
2010-01-01 21:16:17 +05:30
|
|
|
if (rd > MAXDIST + FREQ_TOLERANCE * (1 << G.poll_exp)) {
|
2018-09-13 15:45:36 +05:30
|
|
|
VERB3 bb_error_msg("peer %s unfit for selection: "
|
|
|
|
"root distance %f too high, jitter:%f",
|
|
|
|
p->p_dotted, rd, p->filter_jitter
|
|
|
|
);
|
2010-01-01 21:16:17 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
//TODO
|
|
|
|
// /* Do we have a loop? */
|
|
|
|
// if (p->refid == p->dstaddr || p->refid == s.refid)
|
|
|
|
// return 0;
|
2011-02-03 04:35:48 +05:30
|
|
|
return 1;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
static peer_t*
|
2010-01-03 13:29:59 +05:30
|
|
|
select_and_cluster(void)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
2010-01-17 07:21:33 +05:30
|
|
|
peer_t *p;
|
2010-01-01 21:16:17 +05:30
|
|
|
llist_t *item;
|
|
|
|
int i, j;
|
|
|
|
int size = 3 * G.peer_cnt;
|
|
|
|
/* for selection algorithm */
|
|
|
|
point_t point[size];
|
|
|
|
unsigned num_points, num_candidates;
|
|
|
|
double low, high;
|
|
|
|
unsigned num_falsetickers;
|
|
|
|
/* for cluster algorithm */
|
|
|
|
survivor_t survivor[size];
|
|
|
|
unsigned num_survivors;
|
|
|
|
|
|
|
|
/* Selection */
|
|
|
|
|
|
|
|
num_points = 0;
|
|
|
|
item = G.ntp_peers;
|
2015-01-29 21:01:36 +05:30
|
|
|
while (item != NULL) {
|
2010-01-17 07:21:33 +05:30
|
|
|
double rd, offset;
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2010-01-17 07:21:33 +05:30
|
|
|
p = (peer_t *) item->data;
|
|
|
|
rd = root_distance(p);
|
|
|
|
offset = p->filter_offset;
|
2010-01-01 21:16:17 +05:30
|
|
|
if (!fit(p, rd)) {
|
|
|
|
item = item->link;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB5 bb_error_msg("interval: [%f %f %f] %s",
|
2010-01-01 21:16:17 +05:30
|
|
|
offset - rd,
|
|
|
|
offset,
|
|
|
|
offset + rd,
|
|
|
|
p->p_dotted
|
|
|
|
);
|
|
|
|
point[num_points].p = p;
|
|
|
|
point[num_points].type = -1;
|
|
|
|
point[num_points].edge = offset - rd;
|
2010-01-17 07:21:33 +05:30
|
|
|
point[num_points].opt_rd = rd;
|
2010-01-01 21:16:17 +05:30
|
|
|
num_points++;
|
|
|
|
point[num_points].p = p;
|
|
|
|
point[num_points].type = 0;
|
|
|
|
point[num_points].edge = offset;
|
2010-01-17 07:21:33 +05:30
|
|
|
point[num_points].opt_rd = rd;
|
2010-01-01 21:16:17 +05:30
|
|
|
num_points++;
|
|
|
|
point[num_points].p = p;
|
|
|
|
point[num_points].type = 1;
|
|
|
|
point[num_points].edge = offset + rd;
|
2010-01-17 07:21:33 +05:30
|
|
|
point[num_points].opt_rd = rd;
|
2010-01-01 21:16:17 +05:30
|
|
|
num_points++;
|
|
|
|
item = item->link;
|
|
|
|
}
|
|
|
|
num_candidates = num_points / 3;
|
|
|
|
if (num_candidates == 0) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB3 bb_error_msg("no valid datapoints%s", ", no peer selected");
|
2010-01-03 13:29:59 +05:30
|
|
|
return NULL;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
//TODO: sorting does not seem to be done in reference code
|
|
|
|
qsort(point, num_points, sizeof(point[0]), compare_point_edge);
|
|
|
|
|
|
|
|
/* Start with the assumption that there are no falsetickers.
|
|
|
|
* Attempt to find a nonempty intersection interval containing
|
|
|
|
* the midpoints of all truechimers.
|
|
|
|
* If a nonempty interval cannot be found, increase the number
|
|
|
|
* of assumed falsetickers by one and try again.
|
|
|
|
* If a nonempty interval is found and the number of falsetickers
|
|
|
|
* is less than the number of truechimers, a majority has been found
|
|
|
|
* and the midpoint of each truechimer represents
|
|
|
|
* the candidates available to the cluster algorithm.
|
|
|
|
*/
|
|
|
|
num_falsetickers = 0;
|
|
|
|
while (1) {
|
|
|
|
int c;
|
|
|
|
unsigned num_midpoints = 0;
|
|
|
|
|
|
|
|
low = 1 << 9;
|
|
|
|
high = - (1 << 9);
|
|
|
|
c = 0;
|
|
|
|
for (i = 0; i < num_points; i++) {
|
|
|
|
/* We want to do:
|
|
|
|
* if (point[i].type == -1) c++;
|
|
|
|
* if (point[i].type == 1) c--;
|
|
|
|
* and it's simpler to do it this way:
|
|
|
|
*/
|
|
|
|
c -= point[i].type;
|
|
|
|
if (c >= num_candidates - num_falsetickers) {
|
|
|
|
/* If it was c++ and it got big enough... */
|
|
|
|
low = point[i].edge;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (point[i].type == 0)
|
|
|
|
num_midpoints++;
|
|
|
|
}
|
|
|
|
c = 0;
|
|
|
|
for (i = num_points-1; i >= 0; i--) {
|
|
|
|
c += point[i].type;
|
|
|
|
if (c >= num_candidates - num_falsetickers) {
|
|
|
|
high = point[i].edge;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (point[i].type == 0)
|
|
|
|
num_midpoints++;
|
|
|
|
}
|
|
|
|
/* If the number of midpoints is greater than the number
|
|
|
|
* of allowed falsetickers, the intersection contains at
|
|
|
|
* least one truechimer with no midpoint - bad.
|
|
|
|
* Also, interval should be nonempty.
|
|
|
|
*/
|
|
|
|
if (num_midpoints <= num_falsetickers && low < high)
|
|
|
|
break;
|
|
|
|
num_falsetickers++;
|
|
|
|
if (num_falsetickers * 2 >= num_candidates) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB3 bb_error_msg("falsetickers:%d, candidates:%d%s",
|
|
|
|
num_falsetickers, num_candidates,
|
|
|
|
", no peer selected");
|
2010-01-01 21:16:17 +05:30
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("selected interval: [%f, %f]; candidates:%d falsetickers:%d",
|
2010-01-01 21:16:17 +05:30
|
|
|
low, high, num_candidates, num_falsetickers);
|
|
|
|
|
|
|
|
/* Clustering */
|
|
|
|
|
|
|
|
/* Construct a list of survivors (p, metric)
|
|
|
|
* from the chime list, where metric is dominated
|
|
|
|
* first by stratum and then by root distance.
|
|
|
|
* All other things being equal, this is the order of preference.
|
|
|
|
*/
|
|
|
|
num_survivors = 0;
|
|
|
|
for (i = 0; i < num_points; i++) {
|
|
|
|
if (point[i].edge < low || point[i].edge > high)
|
|
|
|
continue;
|
|
|
|
p = point[i].p;
|
|
|
|
survivor[num_survivors].p = p;
|
2010-01-17 07:21:33 +05:30
|
|
|
/* x.opt_rd == root_distance(p); */
|
|
|
|
survivor[num_survivors].metric = MAXDIST * p->lastpkt_stratum + point[i].opt_rd;
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB5 bb_error_msg("survivor[%d] metric:%f peer:%s",
|
2010-01-01 21:16:17 +05:30
|
|
|
num_survivors, survivor[num_survivors].metric, p->p_dotted);
|
|
|
|
num_survivors++;
|
|
|
|
}
|
|
|
|
/* There must be at least MIN_SELECTED survivors to satisfy the
|
|
|
|
* correctness assertions. Ordinarily, the Byzantine criteria
|
|
|
|
* require four survivors, but for the demonstration here, one
|
|
|
|
* is acceptable.
|
|
|
|
*/
|
|
|
|
if (num_survivors < MIN_SELECTED) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB3 bb_error_msg("survivors:%d%s",
|
|
|
|
num_survivors,
|
|
|
|
", no peer selected");
|
2010-01-01 21:16:17 +05:30
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
//looks like this is ONLY used by the fact that later we pick survivor[0].
|
|
|
|
//we can avoid sorting then, just find the minimum once!
|
|
|
|
qsort(survivor, num_survivors, sizeof(survivor[0]), compare_survivor_metric);
|
|
|
|
|
|
|
|
/* For each association p in turn, calculate the selection
|
|
|
|
* jitter p->sjitter as the square root of the sum of squares
|
|
|
|
* (p->offset - q->offset) over all q associations. The idea is
|
|
|
|
* to repeatedly discard the survivor with maximum selection
|
|
|
|
* jitter until a termination condition is met.
|
|
|
|
*/
|
|
|
|
while (1) {
|
|
|
|
unsigned max_idx = max_idx;
|
|
|
|
double max_selection_jitter = max_selection_jitter;
|
|
|
|
double min_jitter = min_jitter;
|
|
|
|
|
|
|
|
if (num_survivors <= MIN_CLUSTERED) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("num_survivors %d <= %d, not discarding more",
|
2010-01-01 21:16:17 +05:30
|
|
|
num_survivors, MIN_CLUSTERED);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* To make sure a few survivors are left
|
|
|
|
* for the clustering algorithm to chew on,
|
|
|
|
* we stop if the number of survivors
|
|
|
|
* is less than or equal to MIN_CLUSTERED (3).
|
|
|
|
*/
|
|
|
|
for (i = 0; i < num_survivors; i++) {
|
|
|
|
double selection_jitter_sq;
|
|
|
|
|
2010-01-17 07:21:33 +05:30
|
|
|
p = survivor[i].p;
|
2010-01-01 21:16:17 +05:30
|
|
|
if (i == 0 || p->filter_jitter < min_jitter)
|
|
|
|
min_jitter = p->filter_jitter;
|
|
|
|
|
|
|
|
selection_jitter_sq = 0;
|
|
|
|
for (j = 0; j < num_survivors; j++) {
|
|
|
|
peer_t *q = survivor[j].p;
|
|
|
|
selection_jitter_sq += SQUARE(p->filter_offset - q->filter_offset);
|
|
|
|
}
|
|
|
|
if (i == 0 || selection_jitter_sq > max_selection_jitter) {
|
|
|
|
max_selection_jitter = selection_jitter_sq;
|
|
|
|
max_idx = i;
|
|
|
|
}
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB6 bb_error_msg("survivor %d selection_jitter^2:%f",
|
2010-01-01 21:16:17 +05:30
|
|
|
i, selection_jitter_sq);
|
|
|
|
}
|
2010-01-02 20:27:07 +05:30
|
|
|
max_selection_jitter = SQRT(max_selection_jitter / num_survivors);
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB5 bb_error_msg("max_selection_jitter (at %d):%f min_jitter:%f",
|
2010-01-01 21:16:17 +05:30
|
|
|
max_idx, max_selection_jitter, min_jitter);
|
|
|
|
|
|
|
|
/* If the maximum selection jitter is less than the
|
|
|
|
* minimum peer jitter, then tossing out more survivors
|
|
|
|
* will not lower the minimum peer jitter, so we might
|
|
|
|
* as well stop.
|
|
|
|
*/
|
|
|
|
if (max_selection_jitter < min_jitter) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("max_selection_jitter:%f < min_jitter:%f, num_survivors:%d, not discarding more",
|
2010-01-01 21:16:17 +05:30
|
|
|
max_selection_jitter, min_jitter, num_survivors);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete survivor[max_idx] from the list
|
|
|
|
* and go around again.
|
|
|
|
*/
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB6 bb_error_msg("dropping survivor %d", max_idx);
|
2010-01-01 21:16:17 +05:30
|
|
|
num_survivors--;
|
|
|
|
while (max_idx < num_survivors) {
|
|
|
|
survivor[max_idx] = survivor[max_idx + 1];
|
|
|
|
max_idx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-17 07:21:33 +05:30
|
|
|
if (0) {
|
|
|
|
/* Combine the offsets of the clustering algorithm survivors
|
|
|
|
* using a weighted average with weight determined by the root
|
|
|
|
* distance. Compute the selection jitter as the weighted RMS
|
|
|
|
* difference between the first survivor and the remaining
|
|
|
|
* survivors. In some cases the inherent clock jitter can be
|
|
|
|
* reduced by not using this algorithm, especially when frequent
|
|
|
|
* clockhopping is involved. bbox: thus we don't do it.
|
|
|
|
*/
|
|
|
|
double x, y, z, w;
|
|
|
|
y = z = w = 0;
|
|
|
|
for (i = 0; i < num_survivors; i++) {
|
|
|
|
p = survivor[i].p;
|
|
|
|
x = root_distance(p);
|
|
|
|
y += 1 / x;
|
|
|
|
z += p->filter_offset / x;
|
|
|
|
w += SQUARE(p->filter_offset - survivor[0].p->filter_offset) / x;
|
|
|
|
}
|
|
|
|
//G.cluster_offset = z / y;
|
|
|
|
//G.cluster_jitter = SQRT(w / y);
|
|
|
|
}
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
/* Pick the best clock. If the old system peer is on the list
|
|
|
|
* and at the same stratum as the first survivor on the list,
|
|
|
|
* then don't do a clock hop. Otherwise, select the first
|
|
|
|
* survivor on the list as the new system peer.
|
|
|
|
*/
|
2010-01-17 07:21:33 +05:30
|
|
|
p = survivor[0].p;
|
|
|
|
if (G.last_update_peer
|
|
|
|
&& G.last_update_peer->lastpkt_stratum <= p->lastpkt_stratum
|
|
|
|
) {
|
|
|
|
/* Starting from 1 is ok here */
|
|
|
|
for (i = 1; i < num_survivors; i++) {
|
|
|
|
if (G.last_update_peer == survivor[i].p) {
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
VERB5 bb_simple_error_msg("keeping old synced peer");
|
2010-01-17 07:21:33 +05:30
|
|
|
p = G.last_update_peer;
|
|
|
|
goto keep_old;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
G.last_update_peer = p;
|
|
|
|
keep_old:
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("selected peer %s filter_offset:%+f age:%f",
|
2010-01-17 07:21:33 +05:30
|
|
|
p->p_dotted,
|
|
|
|
p->filter_offset,
|
|
|
|
G.cur_time - p->lastpkt_recv_time
|
2010-01-01 21:16:17 +05:30
|
|
|
);
|
2010-01-17 07:21:33 +05:30
|
|
|
return p;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local clock discipline and its helpers
|
|
|
|
*/
|
|
|
|
static void
|
2021-02-21 13:43:05 +05:30
|
|
|
set_new_values(double offset, double recv_time)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
/* Enter new state and set state variables. Note we use the time
|
|
|
|
* of the last clock filter sample, which must be earlier than
|
|
|
|
* the current time.
|
|
|
|
*/
|
2021-02-21 13:43:05 +05:30
|
|
|
VERB4 bb_error_msg("last update offset=%f recv_time=%f",
|
|
|
|
offset, recv_time);
|
2010-01-01 21:16:17 +05:30
|
|
|
G.last_update_offset = offset;
|
|
|
|
G.last_update_recv_time = recv_time;
|
|
|
|
}
|
|
|
|
/* Return: -1: decrease poll interval, 0: leave as is, 1: increase */
|
2010-01-03 13:29:59 +05:30
|
|
|
static NOINLINE int
|
|
|
|
update_local_clock(peer_t *p)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct timex tmx;
|
2010-01-17 07:21:33 +05:30
|
|
|
/* Note: can use G.cluster_offset instead: */
|
2010-01-01 21:16:17 +05:30
|
|
|
double offset = p->filter_offset;
|
|
|
|
double recv_time = p->lastpkt_recv_time;
|
|
|
|
double abs_offset;
|
2010-01-02 20:27:07 +05:30
|
|
|
#if !USING_KERNEL_PLL_LOOP
|
2010-01-01 21:16:17 +05:30
|
|
|
double freq_drift;
|
|
|
|
double since_last_update;
|
2014-01-18 20:06:27 +05:30
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
double etemp, dtemp;
|
|
|
|
|
|
|
|
abs_offset = fabs(offset);
|
|
|
|
|
2010-01-11 06:01:59 +05:30
|
|
|
#if 0
|
2010-01-26 00:00:16 +05:30
|
|
|
/* If needed, -S script can do it by looking at $offset
|
|
|
|
* env var and killing parent */
|
2010-01-01 21:16:17 +05:30
|
|
|
/* If the offset is too large, give up and go home */
|
|
|
|
if (abs_offset > PANIC_THRESHOLD) {
|
|
|
|
bb_error_msg_and_die("offset %f far too big, exiting", offset);
|
|
|
|
}
|
2010-01-11 06:01:59 +05:30
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* If this is an old update, for instance as the result
|
|
|
|
* of a system peer change, avoid it. We never use
|
|
|
|
* an old sample or the same sample twice.
|
|
|
|
*/
|
|
|
|
if (recv_time <= G.last_update_recv_time) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB3 bb_error_msg("update from %s: same or older datapoint, not using it",
|
|
|
|
p->p_dotted);
|
2010-01-01 21:16:17 +05:30
|
|
|
return 0; /* "leave poll interval as is" */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clock state machine transition function. This is where the
|
|
|
|
* action is and defines how the system reacts to large time
|
|
|
|
* and frequency errors.
|
|
|
|
*/
|
2010-01-02 20:27:07 +05:30
|
|
|
#if !USING_KERNEL_PLL_LOOP
|
2021-02-21 13:35:48 +05:30
|
|
|
since_last_update = recv_time - G.reftime;
|
2010-01-01 21:16:17 +05:30
|
|
|
freq_drift = 0;
|
2010-01-02 20:27:07 +05:30
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* There are two main regimes: when the
|
|
|
|
* offset exceeds the step threshold and when it does not.
|
|
|
|
*/
|
|
|
|
if (abs_offset > STEP_THRESHOLD) {
|
|
|
|
/* Step the time and clamp down the poll interval.
|
|
|
|
*
|
|
|
|
* In NSET state an initial frequency correction is
|
|
|
|
* not available, usually because the frequency file has
|
|
|
|
* not yet been written. Since the time is outside the
|
|
|
|
* capture range, the clock is stepped. The frequency
|
|
|
|
* will be set directly following the stepout interval.
|
|
|
|
*
|
|
|
|
* In FSET state the initial frequency has been set
|
|
|
|
* from the frequency file. Since the time is outside
|
|
|
|
* the capture range, the clock is stepped immediately,
|
|
|
|
* rather than after the stepout interval. Guys get
|
|
|
|
* nervous if it takes 17 minutes to set the clock for
|
|
|
|
* the first time.
|
|
|
|
*
|
|
|
|
* In SPIK state the stepout threshold has expired and
|
|
|
|
* the phase is still above the step threshold. Note
|
|
|
|
* that a single spike greater than the step threshold
|
|
|
|
* is always suppressed, even at the longer poll
|
|
|
|
* intervals.
|
|
|
|
*/
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("stepping time by %+f; poll_exp=MINPOLL", offset);
|
2010-01-01 21:16:17 +05:30
|
|
|
step_time(offset);
|
|
|
|
if (option_mask32 & OPT_q) {
|
|
|
|
/* We were only asked to set time once. Done. */
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2014-09-28 02:26:09 +05:30
|
|
|
clamp_pollexp_and_set_MAXSTRAT();
|
2010-01-06 16:57:47 +05:30
|
|
|
|
2010-01-11 06:01:59 +05:30
|
|
|
run_script("step", offset);
|
2010-01-06 16:57:47 +05:30
|
|
|
|
2014-01-08 21:47:52 +05:30
|
|
|
recv_time += offset;
|
|
|
|
|
2012-03-05 14:48:00 +05:30
|
|
|
abs_offset = offset = 0;
|
2021-02-21 13:43:05 +05:30
|
|
|
set_new_values(offset, recv_time);
|
2010-01-01 21:16:17 +05:30
|
|
|
} else { /* abs_offset <= STEP_THRESHOLD */
|
|
|
|
|
2021-02-21 13:43:05 +05:30
|
|
|
if (option_mask32 & OPT_q) {
|
|
|
|
/* We were only asked to set time once.
|
|
|
|
* The clock is precise enough, no need to step.
|
|
|
|
*/
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2014-10-05 06:40:15 +05:30
|
|
|
/* The ratio is calculated before jitter is updated to make
|
|
|
|
* poll adjust code more sensitive to large offsets.
|
|
|
|
*/
|
|
|
|
G.offset_to_jitter_ratio = abs_offset / G.discipline_jitter;
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
/* Compute the clock jitter as the RMS of exponentially
|
|
|
|
* weighted offset differences. Used by the poll adjust code.
|
|
|
|
*/
|
|
|
|
etemp = SQUARE(G.discipline_jitter);
|
2012-03-02 05:52:40 +05:30
|
|
|
dtemp = SQUARE(offset - G.last_update_offset);
|
2010-01-01 21:16:17 +05:30
|
|
|
G.discipline_jitter = SQRT(etemp + (dtemp - etemp) / AVG);
|
2014-10-05 06:40:15 +05:30
|
|
|
if (G.discipline_jitter < G_precision_sec)
|
|
|
|
G.discipline_jitter = G_precision_sec;
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2010-01-02 20:27:07 +05:30
|
|
|
#if !USING_KERNEL_PLL_LOOP
|
2021-02-21 13:43:05 +05:30
|
|
|
/* Compute freq_drift due to PLL and FLL contributions.
|
|
|
|
*
|
|
|
|
* The FLL and PLL frequency gain constants
|
|
|
|
* depend on the poll interval and Allan
|
|
|
|
* intercept. The FLL is not used below one-half
|
|
|
|
* the Allan intercept. Above that the loop gain
|
|
|
|
* increases in steps to 1 / AVG.
|
|
|
|
*/
|
|
|
|
if ((1 << G.poll_exp) > ALLAN / 2) {
|
|
|
|
etemp = FLL - G.poll_exp;
|
|
|
|
if (etemp < AVG)
|
|
|
|
etemp = AVG;
|
|
|
|
freq_drift += (offset - G.last_update_offset) / (MAXD(since_last_update, ALLAN) * etemp);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
2021-02-21 13:43:05 +05:30
|
|
|
/* For the PLL the integration interval
|
|
|
|
* (numerator) is the minimum of the update
|
|
|
|
* interval and poll interval. This allows
|
|
|
|
* oversampling, but not undersampling.
|
|
|
|
*/
|
|
|
|
etemp = MIND(since_last_update, (1 << G.poll_exp));
|
|
|
|
dtemp = (4 * PLL) << G.poll_exp;
|
|
|
|
freq_drift += offset * etemp / SQUARE(dtemp);
|
|
|
|
#endif
|
|
|
|
set_new_values(offset, recv_time);
|
2010-01-06 16:57:47 +05:30
|
|
|
if (G.stratum != p->lastpkt_stratum + 1) {
|
|
|
|
G.stratum = p->lastpkt_stratum + 1;
|
2010-01-11 06:01:59 +05:30
|
|
|
run_script("stratum", offset);
|
2010-01-06 16:57:47 +05:30
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
G.reftime = G.cur_time;
|
2010-01-02 20:27:07 +05:30
|
|
|
G.ntp_status = p->lastpkt_status;
|
2020-07-20 03:34:33 +05:30
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
|
|
|
/* Our current refid is the IPv4 (or md5-hashed IPv6) address of the peer we took time from: */
|
|
|
|
G.refid = p->p_refid;
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
G.rootdelay = p->lastpkt_rootdelay + p->lastpkt_delay;
|
2010-01-17 07:21:33 +05:30
|
|
|
dtemp = p->filter_jitter; // SQRT(SQUARE(p->filter_jitter) + SQUARE(G.cluster_jitter));
|
2010-01-03 13:29:59 +05:30
|
|
|
dtemp += MAXD(p->filter_dispersion + FREQ_TOLERANCE * (G.cur_time - p->lastpkt_recv_time) + abs_offset, MINDISP);
|
2010-01-01 21:16:17 +05:30
|
|
|
G.rootdisp = p->lastpkt_rootdisp + dtemp;
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("updating leap/refid/reftime/rootdisp from peer %s", p->p_dotted);
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2021-02-21 13:43:05 +05:30
|
|
|
/* By this time, freq_drift and offset are set
|
2010-01-01 21:16:17 +05:30
|
|
|
* to values suitable for adjtimex.
|
2010-01-02 00:26:16 +05:30
|
|
|
*/
|
|
|
|
#if !USING_KERNEL_PLL_LOOP
|
|
|
|
/* Calculate the new frequency drift and frequency stability (wander).
|
2010-01-01 21:16:17 +05:30
|
|
|
* Compute the clock wander as the RMS of exponentially weighted
|
|
|
|
* frequency differences. This is not used directly, but can,
|
|
|
|
* along with the jitter, be a highly useful monitoring and
|
|
|
|
* debugging tool.
|
|
|
|
*/
|
|
|
|
dtemp = G.discipline_freq_drift + freq_drift;
|
2010-01-02 00:26:16 +05:30
|
|
|
G.discipline_freq_drift = MAXD(MIND(MAXDRIFT, dtemp), -MAXDRIFT);
|
2010-01-01 21:16:17 +05:30
|
|
|
etemp = SQUARE(G.discipline_wander);
|
|
|
|
dtemp = SQUARE(dtemp);
|
|
|
|
G.discipline_wander = SQRT(etemp + (dtemp - etemp) / AVG);
|
|
|
|
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("discipline freq_drift=%.9f(int:%ld corr:%e) wander=%f",
|
2010-01-02 00:26:16 +05:30
|
|
|
G.discipline_freq_drift,
|
|
|
|
(long)(G.discipline_freq_drift * 65536e6),
|
|
|
|
freq_drift,
|
|
|
|
G.discipline_wander);
|
|
|
|
#endif
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 {
|
2010-01-01 21:16:17 +05:30
|
|
|
memset(&tmx, 0, sizeof(tmx));
|
|
|
|
if (adjtimex(&tmx) < 0)
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
bb_simple_perror_msg_and_die("adjtimex");
|
2012-03-06 23:46:50 +05:30
|
|
|
bb_error_msg("p adjtimex freq:%ld offset:%+ld status:0x%x tc:%ld",
|
|
|
|
tmx.freq, tmx.offset, tmx.status, tmx.constant);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
memset(&tmx, 0, sizeof(tmx));
|
|
|
|
#if 0
|
2010-01-02 00:26:16 +05:30
|
|
|
//doesn't work, offset remains 0 (!) in kernel:
|
2010-01-01 21:16:17 +05:30
|
|
|
//ntpd: set adjtimex freq:1786097 tmx.offset:77487
|
|
|
|
//ntpd: prev adjtimex freq:1786097 tmx.offset:0
|
|
|
|
//ntpd: cur adjtimex freq:1786097 tmx.offset:0
|
|
|
|
tmx.modes = ADJ_FREQUENCY | ADJ_OFFSET;
|
|
|
|
/* 65536 is one ppm */
|
|
|
|
tmx.freq = G.discipline_freq_drift * 65536e6;
|
|
|
|
#endif
|
|
|
|
tmx.modes = ADJ_OFFSET | ADJ_STATUS | ADJ_TIMECONST;// | ADJ_MAXERROR | ADJ_ESTERROR;
|
2018-03-11 21:38:02 +05:30
|
|
|
|
|
|
|
tmx.offset = (long)(offset * 1000000); /* usec */
|
|
|
|
if (SLEW_THRESHOLD < STEP_THRESHOLD) {
|
|
|
|
if (tmx.offset > (long)(SLEW_THRESHOLD * 1000000)) {
|
|
|
|
tmx.offset = (long)(SLEW_THRESHOLD * 1000000);
|
|
|
|
}
|
|
|
|
if (tmx.offset < -(long)(SLEW_THRESHOLD * 1000000)) {
|
|
|
|
tmx.offset = -(long)(SLEW_THRESHOLD * 1000000);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-11 01:55:53 +05:30
|
|
|
tmx.status = STA_PLL;
|
|
|
|
if (G.FREQHOLD_cnt != 0) {
|
|
|
|
/* man adjtimex on STA_FREQHOLD:
|
|
|
|
* "Normally adjustments made via ADJ_OFFSET result in dampened
|
|
|
|
* frequency adjustments also being made.
|
|
|
|
* This flag prevents the small frequency adjustment from being
|
|
|
|
* made when correcting for an ADJ_OFFSET value."
|
|
|
|
*
|
|
|
|
* Use this flag for a few first adjustments at the beginning
|
|
|
|
* of ntpd execution, otherwise even relatively small initial
|
|
|
|
* offset tend to cause largish changes to in-kernel tmx.freq.
|
|
|
|
* If ntpd was restarted due to e.g. switch to another network,
|
|
|
|
* this destroys already well-established tmx.freq value.
|
|
|
|
*/
|
2018-03-11 21:38:02 +05:30
|
|
|
if (G.FREQHOLD_cnt < 0) {
|
|
|
|
/* Initialize it */
|
|
|
|
// Example: a laptop whose clock runs slower when hibernated,
|
|
|
|
// after wake up it still has good tmx.freq, but accumulated ~0.5 sec offset:
|
|
|
|
// Run with code where initial G.FREQHOLD_cnt was always 8:
|
|
|
|
//15:17:52.947 no valid datapoints, no peer selected
|
|
|
|
//15:17:56.515 update from:<IP> offset:+0.485133 delay:0.157762 jitter:0.209310 clock drift:-1.393ppm tc:4
|
|
|
|
//15:17:57.719 update from:<IP> offset:+0.483825 delay:0.158070 jitter:0.181159 clock drift:-1.393ppm tc:4
|
|
|
|
//15:17:59.925 update from:<IP> offset:+0.479504 delay:0.158147 jitter:0.156657 clock drift:-1.393ppm tc:4
|
|
|
|
//15:18:33.322 update from:<IP> offset:+0.428119 delay:0.158317 jitter:0.138071 clock drift:-1.393ppm tc:4
|
|
|
|
//15:19:06.718 update from:<IP> offset:+0.376932 delay:0.158276 jitter:0.122075 clock drift:-1.393ppm tc:4
|
|
|
|
//15:19:39.114 update from:<IP> offset:+0.327022 delay:0.158384 jitter:0.108538 clock drift:-1.393ppm tc:4
|
|
|
|
//15:20:12.715 update from:<IP> offset:+0.275596 delay:0.158297 jitter:0.097292 clock drift:-1.393ppm tc:4
|
|
|
|
//15:20:45.111 update from:<IP> offset:+0.225715 delay:0.158271 jitter:0.087841 clock drift:-1.393ppm tc:4
|
2018-07-02 19:47:07 +05:30
|
|
|
// If allowed to continue, it would start increasing tmx.freq now.
|
2018-03-11 21:38:02 +05:30
|
|
|
// Instead, it was ^Ced, and started anew:
|
|
|
|
//15:21:15.043 no valid datapoints, no peer selected
|
|
|
|
//15:21:17.408 update from:<IP> offset:+0.175910 delay:0.158314 jitter:0.076683 clock drift:-1.393ppm tc:4
|
|
|
|
//15:21:19.774 update from:<IP> offset:+0.171784 delay:0.158401 jitter:0.066436 clock drift:-1.393ppm tc:4
|
|
|
|
//15:21:22.140 update from:<IP> offset:+0.171660 delay:0.158592 jitter:0.057536 clock drift:-1.393ppm tc:4
|
|
|
|
//15:21:22.140 update from:<IP> offset:+0.167126 delay:0.158507 jitter:0.049792 clock drift:-1.393ppm tc:4
|
|
|
|
//15:21:55.696 update from:<IP> offset:+0.115223 delay:0.158277 jitter:0.050240 clock drift:-1.393ppm tc:4
|
|
|
|
//15:22:29.093 update from:<IP> offset:+0.068051 delay:0.158243 jitter:0.049405 clock drift:-1.393ppm tc:5
|
|
|
|
//15:23:02.490 update from:<IP> offset:+0.051632 delay:0.158215 jitter:0.043545 clock drift:-1.393ppm tc:5
|
|
|
|
//15:23:34.726 update from:<IP> offset:+0.039984 delay:0.158157 jitter:0.038106 clock drift:-1.393ppm tc:5
|
|
|
|
// STA_FREQHOLD no longer set, started increasing tmx.freq now:
|
|
|
|
//15:24:06.961 update from:<IP> offset:+0.030968 delay:0.158190 jitter:0.033306 clock drift:+2.387ppm tc:5
|
|
|
|
//15:24:40.357 update from:<IP> offset:+0.023648 delay:0.158211 jitter:0.029072 clock drift:+5.454ppm tc:5
|
|
|
|
//15:25:13.774 update from:<IP> offset:+0.018068 delay:0.157660 jitter:0.025288 clock drift:+7.728ppm tc:5
|
|
|
|
//15:26:19.173 update from:<IP> offset:+0.010057 delay:0.157969 jitter:0.022255 clock drift:+8.361ppm tc:6
|
|
|
|
//15:27:26.602 update from:<IP> offset:+0.006737 delay:0.158103 jitter:0.019316 clock drift:+8.792ppm tc:6
|
|
|
|
//15:28:33.030 update from:<IP> offset:+0.004513 delay:0.158294 jitter:0.016765 clock drift:+9.080ppm tc:6
|
|
|
|
//15:29:40.617 update from:<IP> offset:+0.002787 delay:0.157745 jitter:0.014543 clock drift:+9.258ppm tc:6
|
|
|
|
//15:30:47.045 update from:<IP> offset:+0.001324 delay:0.157709 jitter:0.012594 clock drift:+9.342ppm tc:6
|
|
|
|
//15:31:53.473 update from:<IP> offset:+0.000007 delay:0.158142 jitter:0.010922 clock drift:+9.343ppm tc:6
|
|
|
|
//15:32:58.902 update from:<IP> offset:-0.000728 delay:0.158222 jitter:0.009454 clock drift:+9.298ppm tc:6
|
|
|
|
/*
|
2019-10-30 16:43:46 +05:30
|
|
|
* This expression would choose MIN_FREQHOLD + 14 in the above example
|
|
|
|
* (off_032 is +1 for each 0.032768 seconds of offset).
|
2018-03-11 21:38:02 +05:30
|
|
|
*/
|
2019-10-30 16:43:46 +05:30
|
|
|
unsigned off_032 = abs((int)(tmx.offset >> 15));
|
|
|
|
G.FREQHOLD_cnt = 1 + MIN_FREQHOLD + off_032;
|
2018-03-11 21:38:02 +05:30
|
|
|
}
|
|
|
|
G.FREQHOLD_cnt--;
|
2018-03-11 01:55:53 +05:30
|
|
|
tmx.status |= STA_FREQHOLD;
|
|
|
|
}
|
|
|
|
if (G.ntp_status & LI_PLUSSEC)
|
|
|
|
tmx.status |= STA_INS;
|
|
|
|
if (G.ntp_status & LI_MINUSSEC)
|
|
|
|
tmx.status |= STA_DEL;
|
|
|
|
|
2016-02-10 11:25:07 +05:30
|
|
|
tmx.constant = (int)G.poll_exp - 4;
|
2012-03-05 05:21:48 +05:30
|
|
|
/* EXPERIMENTAL.
|
|
|
|
* The below if statement should be unnecessary, but...
|
|
|
|
* It looks like Linux kernel's PLL is far too gentle in changing
|
|
|
|
* tmx.freq in response to clock offset. Offset keeps growing
|
|
|
|
* and eventually we fall back to smaller poll intervals.
|
2017-04-17 19:43:32 +05:30
|
|
|
* We can make correction more aggressive (about x2) by supplying
|
2012-03-05 05:21:48 +05:30
|
|
|
* PLL time constant which is one less than the real one.
|
|
|
|
* To be on a safe side, let's do it only if offset is significantly
|
|
|
|
* larger than jitter.
|
|
|
|
*/
|
2016-02-10 11:25:07 +05:30
|
|
|
if (G.offset_to_jitter_ratio >= TIMECONST_HACK_GATE)
|
2012-03-05 05:21:48 +05:30
|
|
|
tmx.constant--;
|
2018-03-11 01:55:53 +05:30
|
|
|
if (tmx.constant < 0)
|
|
|
|
tmx.constant = 0;
|
|
|
|
|
2012-03-05 05:21:48 +05:30
|
|
|
//tmx.esterror = (uint32_t)(clock_jitter * 1e6);
|
|
|
|
//tmx.maxerror = (uint32_t)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
|
2010-01-01 21:16:17 +05:30
|
|
|
rc = adjtimex(&tmx);
|
|
|
|
if (rc < 0)
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
bb_simple_perror_msg_and_die("adjtimex");
|
2010-01-02 05:06:43 +05:30
|
|
|
/* NB: here kernel returns constant == G.poll_exp, not == G.poll_exp - 4.
|
|
|
|
* Not sure why. Perhaps it is normal.
|
|
|
|
*/
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("adjtimex:%d freq:%ld offset:%+ld status:0x%x",
|
2012-03-05 05:21:48 +05:30
|
|
|
rc, tmx.freq, tmx.offset, tmx.status);
|
2010-01-11 06:01:59 +05:30
|
|
|
G.kernel_freq_drift = tmx.freq / 65536;
|
2016-02-24 05:52:45 +05:30
|
|
|
VERB2 bb_error_msg("update from:%s offset:%+f delay:%f jitter:%f clock drift:%+.3fppm tc:%d",
|
|
|
|
p->p_dotted,
|
|
|
|
offset,
|
2018-08-03 14:33:55 +05:30
|
|
|
p->p_raw_delay,
|
2016-02-24 05:52:45 +05:30
|
|
|
G.discipline_jitter,
|
|
|
|
(double)tmx.freq / 65536,
|
|
|
|
(int)tmx.constant
|
|
|
|
);
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
return 1; /* "ok to increase poll interval" */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We've got a new reply packet from a peer, process it
|
|
|
|
* (helpers first)
|
|
|
|
*/
|
|
|
|
static unsigned
|
2014-09-28 02:26:09 +05:30
|
|
|
poll_interval(int upper_bound)
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
2014-02-09 20:05:04 +05:30
|
|
|
unsigned interval, r, mask;
|
2014-09-28 02:26:09 +05:30
|
|
|
interval = 1 << G.poll_exp;
|
|
|
|
if (interval > upper_bound)
|
|
|
|
interval = upper_bound;
|
2014-02-09 20:05:04 +05:30
|
|
|
mask = ((interval-1) >> 4) | 1;
|
2014-03-05 23:28:15 +05:30
|
|
|
r = rand();
|
2014-02-09 20:05:04 +05:30
|
|
|
interval += r & mask; /* ~ random(0..1) * interval/16 */
|
2014-09-28 02:26:09 +05:30
|
|
|
VERB4 bb_error_msg("chose poll interval:%u (poll_exp:%d)", interval, G.poll_exp);
|
2010-01-01 21:16:17 +05:30
|
|
|
return interval;
|
|
|
|
}
|
2014-10-02 20:48:43 +05:30
|
|
|
static void
|
2014-09-18 19:49:05 +05:30
|
|
|
adjust_poll(int count)
|
|
|
|
{
|
|
|
|
G.polladj_count += count;
|
|
|
|
if (G.polladj_count > POLLADJ_LIMIT) {
|
|
|
|
G.polladj_count = 0;
|
|
|
|
if (G.poll_exp < MAXPOLL) {
|
|
|
|
G.poll_exp++;
|
|
|
|
VERB4 bb_error_msg("polladj: discipline_jitter:%f ++poll_exp=%d",
|
|
|
|
G.discipline_jitter, G.poll_exp);
|
|
|
|
}
|
2014-10-02 20:48:43 +05:30
|
|
|
} else if (G.polladj_count < -POLLADJ_LIMIT || (count < 0 && G.poll_exp > BIGPOLL)) {
|
2014-09-18 19:49:05 +05:30
|
|
|
G.polladj_count = 0;
|
|
|
|
if (G.poll_exp > MINPOLL) {
|
|
|
|
llist_t *item;
|
|
|
|
|
|
|
|
G.poll_exp--;
|
|
|
|
/* Correct p->next_action_time in each peer
|
|
|
|
* which waits for sending, so that they send earlier.
|
|
|
|
* Old pp->next_action_time are on the order
|
|
|
|
* of t + (1 << old_poll_exp) + small_random,
|
|
|
|
* we simply need to subtract ~half of that.
|
|
|
|
*/
|
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
|
|
|
peer_t *pp = (peer_t *) item->data;
|
|
|
|
if (pp->p_fd < 0)
|
|
|
|
pp->next_action_time -= (1 << G.poll_exp);
|
|
|
|
}
|
|
|
|
VERB4 bb_error_msg("polladj: discipline_jitter:%f --poll_exp=%d",
|
|
|
|
G.discipline_jitter, G.poll_exp);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
VERB4 bb_error_msg("polladj: count:%d", G.polladj_count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static NOINLINE void
|
2010-01-01 21:16:17 +05:30
|
|
|
recv_and_process_peer_pkt(peer_t *p)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
ssize_t size;
|
|
|
|
msg_t msg;
|
|
|
|
double T1, T2, T3, T4;
|
2014-04-19 22:30:16 +05:30
|
|
|
double offset;
|
|
|
|
double prev_delay, delay;
|
2010-01-01 21:16:17 +05:30
|
|
|
unsigned interval;
|
|
|
|
datapoint_t *datapoint;
|
|
|
|
peer_t *q;
|
|
|
|
|
2013-12-08 20:41:04 +05:30
|
|
|
offset = 0;
|
|
|
|
|
2020-09-29 17:49:17 +05:30
|
|
|
/* The below can happen as follows:
|
|
|
|
* = we receive two peer rsponses at once.
|
|
|
|
* = recv_and_process_peer_pkt(PEER1) -> update_local_clock()
|
|
|
|
* -> step_time() and it closes all other fds, sets all ->fd to -1.
|
|
|
|
* = recv_and_process_peer_pkt(PEER2) sees PEER2->fd == -1
|
|
|
|
*/
|
|
|
|
if (p->p_fd < 0)
|
|
|
|
return;
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
/* We can recvfrom here and check from.IP, but some multihomed
|
|
|
|
* ntp servers reply from their *other IP*.
|
|
|
|
* TODO: maybe we should check at least what we can: from.port == 123?
|
|
|
|
*/
|
2014-10-02 20:48:43 +05:30
|
|
|
recv_again:
|
2010-01-01 21:16:17 +05:30
|
|
|
size = recv(p->p_fd, &msg, sizeof(msg), MSG_DONTWAIT);
|
2014-10-02 20:48:43 +05:30
|
|
|
if (size < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
/* Signal caught */
|
|
|
|
goto recv_again;
|
|
|
|
if (errno == EAGAIN)
|
|
|
|
/* There was no packet after all
|
|
|
|
* (poll() returning POLLIN for a fd
|
|
|
|
* is not a ironclad guarantee that data is there)
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* If you need a different handling for a specific
|
|
|
|
* errno, always explain it in comment.
|
|
|
|
*/
|
|
|
|
bb_perror_msg_and_die("recv(%s) error", p->p_dotted);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE_MD5_AUTH && size != NTP_MSGSIZE_SHA1_AUTH) {
|
2019-07-02 16:40:19 +05:30
|
|
|
bb_error_msg("malformed packet received from %s: size %u", p->p_dotted, (int)size);
|
2018-10-27 22:25:59 +05:30
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (p->key_entry && hashes_differ(p, &msg)) {
|
|
|
|
bb_error_msg("invalid cryptographic hash received from %s", p->p_dotted);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE_MD5_AUTH) {
|
2019-07-02 16:40:19 +05:30
|
|
|
bb_error_msg("malformed packet received from %s: size %u", p->p_dotted, (int)size);
|
2012-06-11 15:11:46 +05:30
|
|
|
return;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
2018-10-27 22:25:59 +05:30
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
if (msg.m_orgtime.int_partl != p->p_xmt_msg.m_xmttime.int_partl
|
|
|
|
|| msg.m_orgtime.fractionl != p->p_xmt_msg.m_xmttime.fractionl
|
|
|
|
) {
|
2012-06-11 15:11:46 +05:30
|
|
|
/* Somebody else's packet */
|
|
|
|
return;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
2012-06-11 15:11:46 +05:30
|
|
|
/* We do not expect any more packets from this peer for now.
|
|
|
|
* Closing the socket informs kernel about it.
|
|
|
|
* We open a new socket when we send a new query.
|
|
|
|
*/
|
|
|
|
close(p->p_fd);
|
|
|
|
p->p_fd = -1;
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
if ((msg.m_status & LI_ALARM) == LI_ALARM
|
|
|
|
|| msg.m_stratum == 0
|
|
|
|
|| msg.m_stratum > NTP_MAXSTRATUM
|
|
|
|
) {
|
2013-05-22 21:18:19 +05:30
|
|
|
bb_error_msg("reply from %s: peer is unsynced", p->p_dotted);
|
2014-10-02 20:48:43 +05:30
|
|
|
/*
|
|
|
|
* Stratum 0 responses may have commands in 32-bit m_refid field:
|
|
|
|
* "DENY", "RSTR" - peer does not like us at all,
|
|
|
|
* "RATE" - peer is overloaded, reduce polling freq.
|
|
|
|
* If poll interval is small, increase it.
|
|
|
|
*/
|
|
|
|
if (G.poll_exp < BIGPOLL)
|
|
|
|
goto increase_interval;
|
2013-05-22 21:18:19 +05:30
|
|
|
goto pick_normal_interval;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
// /* Verify valid root distance */
|
|
|
|
// if (msg.m_rootdelay / 2 + msg.m_rootdisp >= MAXDISP || p->lastpkt_reftime > msg.m_xmt)
|
|
|
|
// return; /* invalid header values */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* From RFC 2030 (with a correction to the delay math):
|
|
|
|
*
|
|
|
|
* Timestamp Name ID When Generated
|
|
|
|
* ------------------------------------------------------------
|
|
|
|
* Originate Timestamp T1 time request sent by client
|
|
|
|
* Receive Timestamp T2 time request received by server
|
|
|
|
* Transmit Timestamp T3 time reply sent by server
|
|
|
|
* Destination Timestamp T4 time reply received by client
|
|
|
|
*
|
|
|
|
* The roundtrip delay and local clock offset are defined as
|
|
|
|
*
|
|
|
|
* delay = (T4 - T1) - (T3 - T2); offset = ((T2 - T1) + (T3 - T4)) / 2
|
|
|
|
*/
|
|
|
|
T1 = p->p_xmttime;
|
|
|
|
T2 = lfp_to_d(msg.m_rectime);
|
|
|
|
T3 = lfp_to_d(msg.m_xmttime);
|
2010-01-03 13:29:59 +05:30
|
|
|
T4 = G.cur_time;
|
2014-04-19 22:30:16 +05:30
|
|
|
delay = (T4 - T1) - (T3 - T2);
|
2018-08-03 14:33:55 +05:30
|
|
|
|
2013-05-22 21:18:19 +05:30
|
|
|
/*
|
|
|
|
* If this packet's delay is much bigger than the last one,
|
|
|
|
* it's better to just ignore it than use its much less precise value.
|
|
|
|
*/
|
2014-04-19 22:30:16 +05:30
|
|
|
prev_delay = p->p_raw_delay;
|
2018-08-03 14:33:55 +05:30
|
|
|
p->p_raw_delay = (delay < 0 ? 0.0 : delay);
|
|
|
|
if (p->reachable_bits
|
|
|
|
&& delay > prev_delay * BAD_DELAY_GROWTH
|
|
|
|
&& delay > 1.0 / (8 * 1024) /* larger than ~0.000122 */
|
|
|
|
) {
|
2014-04-20 16:34:23 +05:30
|
|
|
bb_error_msg("reply from %s: delay %f is too high, ignoring", p->p_dotted, delay);
|
2013-05-22 21:18:19 +05:30
|
|
|
goto pick_normal_interval;
|
|
|
|
}
|
|
|
|
|
2018-08-03 14:33:55 +05:30
|
|
|
/* The delay calculation is a special case. In cases where the
|
|
|
|
* server and client clocks are running at different rates and
|
|
|
|
* with very fast networks, the delay can appear negative. In
|
|
|
|
* order to avoid violating the Principle of Least Astonishment,
|
|
|
|
* the delay is clamped not less than the system precision.
|
|
|
|
*/
|
|
|
|
if (delay < G_precision_sec)
|
|
|
|
delay = G_precision_sec;
|
2014-04-19 22:30:16 +05:30
|
|
|
p->lastpkt_delay = delay;
|
|
|
|
p->lastpkt_recv_time = T4;
|
|
|
|
VERB6 bb_error_msg("%s->lastpkt_recv_time=%f", p->p_dotted, p->lastpkt_recv_time);
|
|
|
|
p->lastpkt_status = msg.m_status;
|
|
|
|
p->lastpkt_stratum = msg.m_stratum;
|
|
|
|
p->lastpkt_rootdelay = sfp_to_d(msg.m_rootdelay);
|
|
|
|
p->lastpkt_rootdisp = sfp_to_d(msg.m_rootdisp);
|
|
|
|
p->lastpkt_refid = msg.m_refid;
|
|
|
|
|
2013-05-22 21:18:19 +05:30
|
|
|
p->datapoint_idx = p->reachable_bits ? (p->datapoint_idx + 1) % NUM_DATAPOINTS : 0;
|
|
|
|
datapoint = &p->filter_datapoint[p->datapoint_idx];
|
|
|
|
datapoint->d_recv_time = T4;
|
2013-12-08 20:41:04 +05:30
|
|
|
datapoint->d_offset = offset = ((T2 - T1) + (T3 - T4)) / 2;
|
2010-01-01 21:16:17 +05:30
|
|
|
datapoint->d_dispersion = LOG2D(msg.m_precision_exp) + G_precision_sec;
|
2010-01-03 13:29:59 +05:30
|
|
|
if (!p->reachable_bits) {
|
2010-01-01 21:16:17 +05:30
|
|
|
/* 1st datapoint ever - replicate offset in every element */
|
|
|
|
int i;
|
2012-03-05 05:21:48 +05:30
|
|
|
for (i = 0; i < NUM_DATAPOINTS; i++) {
|
2013-12-08 20:41:04 +05:30
|
|
|
p->filter_datapoint[i].d_offset = offset;
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
p->reachable_bits |= 1;
|
2010-01-05 04:28:13 +05:30
|
|
|
if ((MAX_VERBOSE && G.verbose) || (option_mask32 & OPT_w)) {
|
2019-04-12 22:31:51 +05:30
|
|
|
bb_info_msg("reply from %s: offset:%+f delay:%f status:0x%02x strat:%d refid:0x%08x rootdelay:%f reach:0x%02x",
|
2010-01-01 21:16:17 +05:30
|
|
|
p->p_dotted,
|
2013-12-08 20:41:04 +05:30
|
|
|
offset,
|
2018-08-03 14:33:55 +05:30
|
|
|
p->p_raw_delay,
|
2010-01-04 04:49:13 +05:30
|
|
|
p->lastpkt_status,
|
|
|
|
p->lastpkt_stratum,
|
|
|
|
p->lastpkt_refid,
|
2012-03-08 07:57:49 +05:30
|
|
|
p->lastpkt_rootdelay,
|
|
|
|
p->reachable_bits
|
2010-01-04 04:49:13 +05:30
|
|
|
/* not shown: m_ppoll, m_precision_exp, m_rootdisp,
|
|
|
|
* m_reftime, m_orgtime, m_rectime, m_xmttime
|
|
|
|
*/
|
|
|
|
);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
/* Muck with statictics and update the clock */
|
2010-01-03 13:29:59 +05:30
|
|
|
filter_datapoints(p);
|
|
|
|
q = select_and_cluster();
|
2014-10-02 20:48:43 +05:30
|
|
|
rc = 0;
|
2010-01-04 04:49:13 +05:30
|
|
|
if (q) {
|
2010-01-11 06:01:59 +05:30
|
|
|
if (!(option_mask32 & OPT_w)) {
|
2010-01-04 04:49:13 +05:30
|
|
|
rc = update_local_clock(q);
|
2014-10-02 20:48:43 +05:30
|
|
|
#if 0
|
|
|
|
//Disabled this because there is a case where largish offsets
|
|
|
|
//are unavoidable: if network round-trip delay is, say, ~0.6s,
|
|
|
|
//error in offset estimation would be ~delay/2 ~= 0.3s.
|
|
|
|
//Thus, offsets will be usually in -0.3...0.3s range.
|
|
|
|
//In this case, this code would keep poll interval small,
|
|
|
|
//but it won't be helping.
|
|
|
|
//BIGOFF check below deals with a case of seeing multi-second offsets.
|
|
|
|
|
2010-01-11 06:01:59 +05:30
|
|
|
/* If drift is dangerously large, immediately
|
|
|
|
* drop poll interval one step down.
|
|
|
|
*/
|
2010-01-17 05:35:58 +05:30
|
|
|
if (fabs(q->filter_offset) >= POLLDOWN_OFFSET) {
|
2013-12-04 21:02:09 +05:30
|
|
|
VERB4 bb_error_msg("offset:%+f > POLLDOWN_OFFSET", q->filter_offset);
|
2014-09-18 19:49:05 +05:30
|
|
|
adjust_poll(-POLLADJ_LIMIT * 3);
|
|
|
|
rc = 0;
|
2010-01-11 06:01:59 +05:30
|
|
|
}
|
2014-10-02 20:48:43 +05:30
|
|
|
#endif
|
2010-01-11 06:01:59 +05:30
|
|
|
}
|
2014-10-02 20:48:43 +05:30
|
|
|
} else {
|
|
|
|
/* No peer selected.
|
|
|
|
* If poll interval is small, increase it.
|
|
|
|
*/
|
|
|
|
if (G.poll_exp < BIGPOLL)
|
|
|
|
goto increase_interval;
|
2010-01-04 04:49:13 +05:30
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
if (rc != 0) {
|
|
|
|
/* Adjust the poll interval by comparing the current offset
|
|
|
|
* with the clock jitter. If the offset is less than
|
|
|
|
* the clock jitter times a constant, then the averaging interval
|
|
|
|
* is increased, otherwise it is decreased. A bit of hysteresis
|
|
|
|
* helps calm the dance. Works best using burst mode.
|
|
|
|
*/
|
2012-03-05 14:48:00 +05:30
|
|
|
if (rc > 0 && G.offset_to_jitter_ratio <= POLLADJ_GATE) {
|
2010-01-01 22:42:06 +05:30
|
|
|
/* was += G.poll_exp but it is a bit
|
|
|
|
* too optimistic for my taste at high poll_exp's */
|
2014-10-02 20:48:43 +05:30
|
|
|
increase_interval:
|
2014-09-18 19:49:05 +05:30
|
|
|
adjust_poll(MINPOLL);
|
2010-01-01 21:16:17 +05:30
|
|
|
} else {
|
2016-02-10 11:25:07 +05:30
|
|
|
VERB3 if (rc > 0)
|
2016-03-04 02:31:23 +05:30
|
|
|
bb_error_msg("want smaller interval: offset/jitter = %u",
|
|
|
|
G.offset_to_jitter_ratio);
|
2014-09-18 19:49:05 +05:30
|
|
|
adjust_poll(-G.poll_exp * 2);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decide when to send new query for this peer */
|
2013-05-22 21:18:19 +05:30
|
|
|
pick_normal_interval:
|
2014-09-28 02:26:09 +05:30
|
|
|
interval = poll_interval(INT_MAX);
|
|
|
|
if (fabs(offset) >= BIGOFF && interval > BIGOFF_INTERVAL) {
|
2016-02-10 11:25:07 +05:30
|
|
|
/* If we are synced, offsets are less than SLEW_THRESHOLD,
|
2013-12-08 20:41:04 +05:30
|
|
|
* or at the very least not much larger than it.
|
|
|
|
* Now we see a largish one.
|
|
|
|
* Either this peer is feeling bad, or packet got corrupted,
|
|
|
|
* or _our_ clock is wrong now and _all_ peers will show similar
|
|
|
|
* largish offsets too.
|
|
|
|
* I observed this with laptop suspend stopping clock.
|
|
|
|
* In any case, it makes sense to make next request soonish:
|
|
|
|
* cases 1 and 2: get a better datapoint,
|
|
|
|
* case 3: allows to resync faster.
|
|
|
|
*/
|
|
|
|
interval = BIGOFF_INTERVAL;
|
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2010-01-04 04:49:13 +05:30
|
|
|
set_next(p, interval);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
2010-01-03 13:29:59 +05:30
|
|
|
static NOINLINE void
|
2010-01-01 21:16:17 +05:30
|
|
|
recv_and_process_client_pkt(void /*int fd*/)
|
|
|
|
{
|
|
|
|
ssize_t size;
|
2011-05-16 07:23:00 +05:30
|
|
|
//uint8_t version;
|
2010-01-01 21:16:17 +05:30
|
|
|
len_and_sockaddr *to;
|
|
|
|
struct sockaddr *from;
|
|
|
|
msg_t msg;
|
|
|
|
uint8_t query_status;
|
|
|
|
l_fixedpt_t query_xmttime;
|
|
|
|
|
2012-04-01 20:01:04 +05:30
|
|
|
to = get_sock_lsa(G_listen_fd);
|
2010-01-01 21:16:17 +05:30
|
|
|
from = xzalloc(to->len);
|
|
|
|
|
2012-04-01 20:01:04 +05:30
|
|
|
size = recv_from_to(G_listen_fd, &msg, sizeof(msg), MSG_DONTWAIT, from, &to->u.sa, to->len);
|
2019-07-02 18:33:47 +05:30
|
|
|
|
|
|
|
/* "ntpq -p" (4.2.8p13) sends a 12-byte NTPv2 request:
|
|
|
|
* m_status is 0x16: leap:0 version:2 mode:6(reserved1)
|
|
|
|
* https://docs.ntpsec.org/latest/mode6.html
|
|
|
|
* We don't support this.
|
|
|
|
*/
|
|
|
|
|
2020-07-20 03:34:33 +05:30
|
|
|
# if ENABLE_FEATURE_NTP_AUTH
|
2018-10-27 22:25:59 +05:30
|
|
|
if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE_MD5_AUTH && size != NTP_MSGSIZE_SHA1_AUTH)
|
2020-07-20 03:34:33 +05:30
|
|
|
# else
|
2018-10-27 22:25:59 +05:30
|
|
|
if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE_MD5_AUTH)
|
2020-07-20 03:34:33 +05:30
|
|
|
# endif
|
2018-10-27 22:25:59 +05:30
|
|
|
{
|
2010-01-01 21:16:17 +05:30
|
|
|
char *addr;
|
|
|
|
if (size < 0) {
|
|
|
|
if (errno == EAGAIN)
|
|
|
|
goto bail;
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
bb_simple_perror_msg_and_die("recv");
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
addr = xmalloc_sockaddr2dotted_noport(from);
|
|
|
|
bb_error_msg("malformed packet received from %s: size %u", addr, (int)size);
|
|
|
|
free(addr);
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2016-08-01 23:54:24 +05:30
|
|
|
/* Respond only to client and symmetric active packets */
|
|
|
|
if ((msg.m_status & MODE_MASK) != MODE_CLIENT
|
|
|
|
&& (msg.m_status & MODE_MASK) != MODE_SYM_ACT
|
|
|
|
) {
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
query_status = msg.m_status;
|
|
|
|
query_xmttime = msg.m_xmttime;
|
|
|
|
|
|
|
|
/* Build a reply packet */
|
|
|
|
memset(&msg, 0, sizeof(msg));
|
2013-01-14 07:09:10 +05:30
|
|
|
msg.m_status = G.stratum < MAXSTRAT ? (G.ntp_status & LI_MASK) : LI_ALARM;
|
2010-01-01 21:16:17 +05:30
|
|
|
msg.m_status |= (query_status & VERSION_MASK);
|
|
|
|
msg.m_status |= ((query_status & MODE_MASK) == MODE_CLIENT) ?
|
2013-01-14 06:04:48 +05:30
|
|
|
MODE_SERVER : MODE_SYM_PAS;
|
2010-01-01 21:16:17 +05:30
|
|
|
msg.m_stratum = G.stratum;
|
|
|
|
msg.m_ppoll = G.poll_exp;
|
|
|
|
msg.m_precision_exp = G_precision_exp;
|
2010-01-03 13:29:59 +05:30
|
|
|
/* this time was obtained between poll() and recv() */
|
2021-03-26 16:32:08 +05:30
|
|
|
d_to_lfp(&msg.m_rectime, G.cur_time);
|
|
|
|
d_to_lfp(&msg.m_xmttime, gettime1900d()); /* this instant */
|
2010-10-04 04:50:44 +05:30
|
|
|
if (G.peer_cnt == 0) {
|
|
|
|
/* we have no peers: "stratum 1 server" mode. reftime = our own time */
|
|
|
|
G.reftime = G.cur_time;
|
|
|
|
}
|
2021-03-26 16:32:08 +05:30
|
|
|
d_to_lfp(&msg.m_reftime, G.reftime);
|
2010-01-01 21:16:17 +05:30
|
|
|
msg.m_orgtime = query_xmttime;
|
2021-03-26 16:32:08 +05:30
|
|
|
d_to_sfp(&msg.m_rootdelay, G.rootdelay);
|
2010-01-01 21:16:17 +05:30
|
|
|
//simple code does not do this, fix simple code!
|
2021-03-26 16:32:08 +05:30
|
|
|
d_to_sfp(&msg.m_rootdisp, G.rootdisp);
|
2011-05-16 07:23:00 +05:30
|
|
|
//version = (query_status & VERSION_MASK); /* ... >> VERSION_SHIFT - done below instead */
|
2010-01-01 21:16:17 +05:30
|
|
|
msg.m_refid = G.refid; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3;
|
|
|
|
|
|
|
|
/* We reply from the local address packet was sent to,
|
|
|
|
* this makes to/from look swapped here: */
|
2012-04-01 20:01:04 +05:30
|
|
|
do_sendto(G_listen_fd,
|
2010-01-01 21:16:17 +05:30
|
|
|
/*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len,
|
|
|
|
&msg, size);
|
2021-02-21 14:17:34 +05:30
|
|
|
VERB3 {
|
|
|
|
char *addr;
|
|
|
|
addr = xmalloc_sockaddr2dotted_noport(from);
|
|
|
|
bb_error_msg("responded to query from %s", addr);
|
|
|
|
free(addr);
|
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
bail:
|
|
|
|
free(to);
|
|
|
|
free(from);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Upstream ntpd's options:
|
|
|
|
*
|
|
|
|
* -4 Force DNS resolution of host names to the IPv4 namespace.
|
|
|
|
* -6 Force DNS resolution of host names to the IPv6 namespace.
|
|
|
|
* -a Require cryptographic authentication for broadcast client,
|
|
|
|
* multicast client and symmetric passive associations.
|
|
|
|
* This is the default.
|
|
|
|
* -A Do not require cryptographic authentication for broadcast client,
|
|
|
|
* multicast client and symmetric passive associations.
|
|
|
|
* This is almost never a good idea.
|
|
|
|
* -b Enable the client to synchronize to broadcast servers.
|
|
|
|
* -c conffile
|
|
|
|
* Specify the name and path of the configuration file,
|
|
|
|
* default /etc/ntp.conf
|
|
|
|
* -d Specify debugging mode. This option may occur more than once,
|
|
|
|
* with each occurrence indicating greater detail of display.
|
|
|
|
* -D level
|
|
|
|
* Specify debugging level directly.
|
|
|
|
* -f driftfile
|
|
|
|
* Specify the name and path of the frequency file.
|
|
|
|
* This is the same operation as the "driftfile FILE"
|
|
|
|
* configuration command.
|
|
|
|
* -g Normally, ntpd exits with a message to the system log
|
|
|
|
* if the offset exceeds the panic threshold, which is 1000 s
|
|
|
|
* by default. This option allows the time to be set to any value
|
|
|
|
* without restriction; however, this can happen only once.
|
|
|
|
* If the threshold is exceeded after that, ntpd will exit
|
|
|
|
* with a message to the system log. This option can be used
|
|
|
|
* with the -q and -x options. See the tinker command for other options.
|
|
|
|
* -i jaildir
|
|
|
|
* Chroot the server to the directory jaildir. This option also implies
|
|
|
|
* that the server attempts to drop root privileges at startup
|
|
|
|
* (otherwise, chroot gives very little additional security).
|
|
|
|
* You may need to also specify a -u option.
|
|
|
|
* -k keyfile
|
|
|
|
* Specify the name and path of the symmetric key file,
|
|
|
|
* default /etc/ntp/keys. This is the same operation
|
|
|
|
* as the "keys FILE" configuration command.
|
|
|
|
* -l logfile
|
|
|
|
* Specify the name and path of the log file. The default
|
|
|
|
* is the system log file. This is the same operation as
|
|
|
|
* the "logfile FILE" configuration command.
|
|
|
|
* -L Do not listen to virtual IPs. The default is to listen.
|
|
|
|
* -n Don't fork.
|
|
|
|
* -N To the extent permitted by the operating system,
|
|
|
|
* run the ntpd at the highest priority.
|
|
|
|
* -p pidfile
|
|
|
|
* Specify the name and path of the file used to record the ntpd
|
|
|
|
* process ID. This is the same operation as the "pidfile FILE"
|
|
|
|
* configuration command.
|
|
|
|
* -P priority
|
|
|
|
* To the extent permitted by the operating system,
|
|
|
|
* run the ntpd at the specified priority.
|
|
|
|
* -q Exit the ntpd just after the first time the clock is set.
|
|
|
|
* This behavior mimics that of the ntpdate program, which is
|
|
|
|
* to be retired. The -g and -x options can be used with this option.
|
|
|
|
* Note: The kernel time discipline is disabled with this option.
|
|
|
|
* -r broadcastdelay
|
|
|
|
* Specify the default propagation delay from the broadcast/multicast
|
|
|
|
* server to this client. This is necessary only if the delay
|
|
|
|
* cannot be computed automatically by the protocol.
|
|
|
|
* -s statsdir
|
|
|
|
* Specify the directory path for files created by the statistics
|
|
|
|
* facility. This is the same operation as the "statsdir DIR"
|
|
|
|
* configuration command.
|
|
|
|
* -t key
|
|
|
|
* Add a key number to the trusted key list. This option can occur
|
|
|
|
* more than once.
|
|
|
|
* -u user[:group]
|
|
|
|
* Specify a user, and optionally a group, to switch to.
|
|
|
|
* -v variable
|
|
|
|
* -V variable
|
|
|
|
* Add a system variable listed by default.
|
|
|
|
* -x Normally, the time is slewed if the offset is less than the step
|
|
|
|
* threshold, which is 128 ms by default, and stepped if above
|
|
|
|
* the threshold. This option sets the threshold to 600 s, which is
|
|
|
|
* well within the accuracy window to set the clock manually.
|
|
|
|
* Note: since the slew rate of typical Unix kernels is limited
|
|
|
|
* to 0.5 ms/s, each second of adjustment requires an amortization
|
|
|
|
* interval of 2000 s. Thus, an adjustment as much as 600 s
|
|
|
|
* will take almost 14 days to complete. This option can be used
|
|
|
|
* with the -g and -q options. See the tinker command for other options.
|
|
|
|
* Note: The kernel time discipline is disabled with this option.
|
|
|
|
*/
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
static key_entry_t *
|
|
|
|
find_key_entry(llist_t *key_entries, unsigned id)
|
|
|
|
{
|
|
|
|
while (key_entries) {
|
|
|
|
key_entry_t *cur = (key_entry_t*) key_entries->data;
|
|
|
|
if (cur->id == id)
|
|
|
|
return cur;
|
|
|
|
key_entries = key_entries->link;
|
|
|
|
}
|
|
|
|
bb_error_msg_and_die("key %u is not defined", id);
|
|
|
|
}
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* By doing init in a separate function we decrease stack usage
|
|
|
|
* in main loop.
|
|
|
|
*/
|
|
|
|
static NOINLINE void ntp_init(char **argv)
|
|
|
|
{
|
|
|
|
unsigned opts;
|
|
|
|
llist_t *peers;
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
llist_t *key_entries;
|
|
|
|
char *key_file_path;
|
|
|
|
#endif
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2014-03-05 23:28:15 +05:30
|
|
|
srand(getpid());
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Set some globals */
|
2014-10-05 06:40:15 +05:30
|
|
|
G.discipline_jitter = G_precision_sec;
|
2010-01-01 21:16:17 +05:30
|
|
|
G.stratum = MAXSTRAT;
|
2010-01-03 13:29:59 +05:30
|
|
|
if (BURSTPOLL != 0)
|
|
|
|
G.poll_exp = BURSTPOLL; /* speeds up initial sync */
|
2010-01-06 16:57:47 +05:30
|
|
|
G.last_script_run = G.reftime = G.last_update_recv_time = gettime1900d(); /* sets G.cur_time too */
|
2018-03-11 21:38:02 +05:30
|
|
|
G.FREQHOLD_cnt = -1;
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Parse options */
|
|
|
|
peers = NULL;
|
2018-10-27 22:25:59 +05:30
|
|
|
IF_FEATURE_NTP_AUTH(key_entries = NULL;)
|
2017-08-09 01:25:02 +05:30
|
|
|
opts = getopt32(argv, "^"
|
2010-01-01 21:16:17 +05:30
|
|
|
"nqNx" /* compat */
|
2018-10-27 22:25:59 +05:30
|
|
|
IF_FEATURE_NTP_AUTH("k:") /* compat */
|
2016-07-07 01:28:02 +05:30
|
|
|
"wp:*S:"IF_FEATURE_NTPD_SERVER("l") /* NOT compat */
|
2014-07-15 18:36:54 +05:30
|
|
|
IF_FEATURE_NTPD_SERVER("I:") /* compat */
|
2010-01-01 21:16:17 +05:30
|
|
|
"d" /* compat */
|
2017-08-23 18:38:25 +05:30
|
|
|
"46aAbgL" /* compat, ignored */
|
2017-08-09 01:25:02 +05:30
|
|
|
"\0"
|
2019-07-02 14:58:18 +05:30
|
|
|
"=0" /* should have no arguments */
|
|
|
|
":dd:wn" /* -d: counter; -p: list; -w implies -n */
|
2017-08-09 01:25:02 +05:30
|
|
|
IF_FEATURE_NTPD_SERVER(":Il") /* -I implies -l */
|
2018-10-27 22:25:59 +05:30
|
|
|
IF_FEATURE_NTP_AUTH(, &key_file_path)
|
|
|
|
, &peers, &G.script_name
|
|
|
|
IF_FEATURE_NTPD_SERVER(, &G.if_name)
|
|
|
|
, &G.verbose
|
|
|
|
);
|
2014-03-23 19:36:38 +05:30
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
// if (opts & OPT_x) /* disable stepping, only slew is allowed */
|
|
|
|
// G.time_was_stepped = 1;
|
2016-07-03 21:28:54 +05:30
|
|
|
|
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
|
|
|
G_listen_fd = -1;
|
|
|
|
if (opts & OPT_l) {
|
|
|
|
G_listen_fd = create_and_bind_dgram_or_die(NULL, 123);
|
|
|
|
if (G.if_name) {
|
|
|
|
if (setsockopt_bindtodevice(G_listen_fd, G.if_name))
|
|
|
|
xfunc_die();
|
|
|
|
}
|
|
|
|
socket_want_pktinfo(G_listen_fd);
|
2018-04-15 22:07:50 +05:30
|
|
|
setsockopt_int(G_listen_fd, IPPROTO_IP, IP_TOS, IPTOS_DSCP_AF21);
|
2016-07-03 21:28:54 +05:30
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/* I hesitate to set -20 prio. -15 should be high enough for timekeeping */
|
|
|
|
if (opts & OPT_N)
|
|
|
|
setpriority(PRIO_PROCESS, 0, -15);
|
|
|
|
|
|
|
|
if (!(opts & OPT_n)) {
|
|
|
|
bb_daemonize_or_rexec(DAEMON_DEVNULL_STDIO, argv);
|
|
|
|
logmode = LOGMODE_NONE;
|
|
|
|
}
|
|
|
|
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
if (opts & OPT_k) {
|
|
|
|
char *tokens[4];
|
|
|
|
parser_t *parser;
|
|
|
|
|
|
|
|
parser = config_open(key_file_path);
|
|
|
|
while (config_read(parser, tokens, 4, 3, "# \t", PARSE_NORMAL | PARSE_MIN_DIE) == 3) {
|
|
|
|
key_entry_t *key_entry;
|
|
|
|
char buffer[40];
|
|
|
|
smalluint hash_type;
|
|
|
|
smalluint msg_size;
|
|
|
|
smalluint key_length;
|
|
|
|
char *key;
|
|
|
|
|
|
|
|
if ((tokens[1][0] | 0x20) == 'm')
|
|
|
|
/* supports 'M' and 'md5' formats */
|
|
|
|
hash_type = HASH_MD5;
|
|
|
|
else
|
|
|
|
if (strncasecmp(tokens[1], "sha", 3) == 0)
|
|
|
|
/* supports 'sha' and 'sha1' formats */
|
|
|
|
hash_type = HASH_SHA1;
|
|
|
|
else
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
bb_simple_error_msg_and_die("only MD5 and SHA1 keys supported");
|
2018-10-27 22:25:59 +05:30
|
|
|
/* man ntp.keys:
|
|
|
|
* MD5 The key is 1 to 16 printable characters terminated by an EOL,
|
|
|
|
* whitespace, or a # (which is the "start of comment" character).
|
|
|
|
* SHA
|
|
|
|
* SHA1
|
|
|
|
* RMD160 The key is a hex-encoded ASCII string of 40 characters, which
|
|
|
|
* is truncated as necessary.
|
|
|
|
*/
|
|
|
|
key_length = strnlen(tokens[2], sizeof(buffer)+1);
|
|
|
|
if (key_length >= sizeof(buffer)+1) {
|
|
|
|
err:
|
|
|
|
bb_error_msg_and_die("malformed key at line %u", parser->lineno);
|
|
|
|
}
|
|
|
|
if (hash_type == HASH_MD5) {
|
|
|
|
key = tokens[2];
|
|
|
|
msg_size = NTP_MSGSIZE_MD5_AUTH;
|
|
|
|
} else /* it's hash_type == HASH_SHA1 */
|
|
|
|
if (!(key_length & 1)) {
|
|
|
|
key_length >>= 1;
|
|
|
|
if (!hex2bin(buffer, tokens[2], key_length))
|
|
|
|
goto err;
|
|
|
|
key = buffer;
|
|
|
|
msg_size = NTP_MSGSIZE_SHA1_AUTH;
|
|
|
|
} else {
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
key_entry = xzalloc(sizeof(*key_entry) + key_length);
|
|
|
|
key_entry->type = hash_type;
|
|
|
|
key_entry->msg_size = msg_size;
|
|
|
|
key_entry->key_length = key_length;
|
|
|
|
memcpy(key_entry->key, key, key_length);
|
|
|
|
key_entry->id = xatou_range(tokens[0], 1, MAX_KEY_NUMBER);
|
|
|
|
llist_add_to(&key_entries, key_entry);
|
|
|
|
}
|
|
|
|
config_close(parser);
|
|
|
|
}
|
|
|
|
#endif
|
2010-10-04 04:50:44 +05:30
|
|
|
if (peers) {
|
2018-10-27 22:25:59 +05:30
|
|
|
#if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
while (peers) {
|
|
|
|
char *peer = llist_pop(&peers);
|
|
|
|
key_entry_t *key_entry = NULL;
|
|
|
|
if (strncmp(peer, "keyno:", 6) == 0) {
|
|
|
|
char *end;
|
|
|
|
int key_id;
|
|
|
|
peer += 6;
|
|
|
|
end = strchr(peer, ':');
|
2018-11-25 05:12:56 +05:30
|
|
|
if (!end) bb_show_usage();
|
2018-10-27 22:25:59 +05:30
|
|
|
*end = '\0';
|
|
|
|
key_id = xatou_range(peer, 1, MAX_KEY_NUMBER);
|
|
|
|
*end = ':';
|
|
|
|
key_entry = find_key_entry(key_entries, key_id);
|
|
|
|
peer = end + 1;
|
|
|
|
}
|
|
|
|
add_peers(peer, key_entry);
|
|
|
|
}
|
|
|
|
#else
|
2010-10-04 04:50:44 +05:30
|
|
|
while (peers)
|
2018-10-27 22:25:59 +05:30
|
|
|
add_peers(llist_pop(&peers), NULL);
|
|
|
|
#endif
|
2014-03-23 19:36:38 +05:30
|
|
|
}
|
|
|
|
#if ENABLE_FEATURE_NTPD_CONF
|
|
|
|
else {
|
|
|
|
parser_t *parser;
|
2018-10-27 22:25:59 +05:30
|
|
|
char *token[3 + 2*ENABLE_FEATURE_NTP_AUTH];
|
2014-03-23 19:36:38 +05:30
|
|
|
|
|
|
|
parser = config_open("/etc/ntp.conf");
|
2018-10-27 22:25:59 +05:30
|
|
|
while (config_read(parser, token, 3 + 2*ENABLE_FEATURE_NTP_AUTH, 1, "# \t", PARSE_NORMAL)) {
|
2014-03-23 19:36:38 +05:30
|
|
|
if (strcmp(token[0], "server") == 0 && token[1]) {
|
2018-10-27 22:25:59 +05:30
|
|
|
# if ENABLE_FEATURE_NTP_AUTH
|
|
|
|
key_entry_t *key_entry = NULL;
|
|
|
|
if (token[2] && token[3] && strcmp(token[2], "key") == 0) {
|
|
|
|
unsigned key_id = xatou_range(token[3], 1, MAX_KEY_NUMBER);
|
|
|
|
key_entry = find_key_entry(key_entries, key_id);
|
|
|
|
}
|
|
|
|
add_peers(token[1], key_entry);
|
|
|
|
# else
|
|
|
|
add_peers(token[1], NULL);
|
|
|
|
# endif
|
2014-03-23 19:36:38 +05:30
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bb_error_msg("skipping %s:%u: unimplemented command '%s'",
|
|
|
|
"/etc/ntp.conf", parser->lineno, token[0]
|
|
|
|
);
|
|
|
|
}
|
|
|
|
config_close(parser);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (G.peer_cnt == 0) {
|
|
|
|
if (!(opts & OPT_l))
|
|
|
|
bb_show_usage();
|
2010-10-04 04:50:44 +05:30
|
|
|
/* -l but no peers: "stratum 1 server" mode */
|
|
|
|
G.stratum = 1;
|
|
|
|
}
|
2019-03-27 18:07:40 +05:30
|
|
|
|
|
|
|
if (!(opts & OPT_n)) /* only if backgrounded: */
|
|
|
|
write_pidfile_std_path_and_ext("ntpd");
|
|
|
|
|
2010-08-27 05:45:01 +05:30
|
|
|
/* If network is up, syncronization occurs in ~10 seconds.
|
2011-04-07 05:15:20 +05:30
|
|
|
* We give "ntpd -q" 10 seconds to get first reply,
|
|
|
|
* then another 50 seconds to finish syncing.
|
2010-08-27 05:45:01 +05:30
|
|
|
*
|
|
|
|
* I tested ntpd 4.2.6p1 and apparently it never exits
|
|
|
|
* (will try forever), but it does not feel right.
|
|
|
|
* The goal of -q is to act like ntpdate: set time
|
|
|
|
* after a reasonably small period of polling, or fail.
|
|
|
|
*/
|
2011-04-07 05:15:20 +05:30
|
|
|
if (opts & OPT_q) {
|
|
|
|
option_mask32 |= OPT_qq;
|
|
|
|
alarm(10);
|
|
|
|
}
|
2010-08-27 05:45:01 +05:30
|
|
|
|
|
|
|
bb_signals(0
|
|
|
|
| (1 << SIGTERM)
|
|
|
|
| (1 << SIGINT)
|
|
|
|
| (1 << SIGALRM)
|
|
|
|
, record_signo
|
|
|
|
);
|
|
|
|
bb_signals(0
|
|
|
|
| (1 << SIGPIPE)
|
|
|
|
| (1 << SIGCHLD)
|
|
|
|
, SIG_IGN
|
|
|
|
);
|
2018-10-27 22:25:59 +05:30
|
|
|
//TODO: free unused elements of key_entries?
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
int ntpd_main(int argc UNUSED_PARAM, char **argv) MAIN_EXTERNALLY_VISIBLE;
|
|
|
|
int ntpd_main(int argc UNUSED_PARAM, char **argv)
|
|
|
|
{
|
2010-01-03 13:29:59 +05:30
|
|
|
#undef G
|
|
|
|
struct globals G;
|
2010-01-01 21:16:17 +05:30
|
|
|
struct pollfd *pfd;
|
|
|
|
peer_t **idx2peer;
|
2010-01-03 13:29:59 +05:30
|
|
|
unsigned cnt;
|
2010-01-01 21:16:17 +05:30
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
memset(&G, 0, sizeof(G));
|
|
|
|
SET_PTR_TO_GLOBALS(&G);
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
ntp_init(argv);
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
/* If ENABLE_FEATURE_NTPD_SERVER, + 1 for listen_fd: */
|
|
|
|
cnt = G.peer_cnt + ENABLE_FEATURE_NTPD_SERVER;
|
|
|
|
idx2peer = xzalloc(sizeof(idx2peer[0]) * cnt);
|
|
|
|
pfd = xzalloc(sizeof(pfd[0]) * cnt);
|
|
|
|
|
2010-10-21 02:06:51 +05:30
|
|
|
/* Countdown: we never sync before we sent INITIAL_SAMPLES+1
|
2010-01-11 06:44:04 +05:30
|
|
|
* packets to each peer.
|
2010-01-03 13:29:59 +05:30
|
|
|
* NB: if some peer is not responding, we may end up sending
|
|
|
|
* fewer packets to it and more to other peers.
|
2010-10-21 02:06:51 +05:30
|
|
|
* NB2: sync usually happens using INITIAL_SAMPLES packets,
|
2010-01-11 06:44:04 +05:30
|
|
|
* since last reply does not come back instantaneously.
|
2010-01-03 13:29:59 +05:30
|
|
|
*/
|
2010-10-21 02:06:51 +05:30
|
|
|
cnt = G.peer_cnt * (INITIAL_SAMPLES + 1);
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
while (!bb_got_signal) {
|
|
|
|
llist_t *item;
|
|
|
|
unsigned i, j;
|
|
|
|
int nfds, timeout;
|
2010-01-03 13:29:59 +05:30
|
|
|
double nextaction;
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Nothing between here and poll() blocks for any significant time */
|
|
|
|
|
2017-12-27 00:49:37 +05:30
|
|
|
nextaction = G.last_script_run + (11*60);
|
|
|
|
if (nextaction < G.cur_time + 1)
|
|
|
|
nextaction = G.cur_time + 1;
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
i = 0;
|
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
2012-04-01 20:01:04 +05:30
|
|
|
if (G_listen_fd != -1) {
|
|
|
|
pfd[0].fd = G_listen_fd;
|
2010-01-01 21:16:17 +05:30
|
|
|
pfd[0].events = POLLIN;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/* Pass over peer list, send requests, time out on receives */
|
2010-01-03 13:29:59 +05:30
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
2010-01-01 21:16:17 +05:30
|
|
|
peer_t *p = (peer_t *) item->data;
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
if (p->next_action_time <= G.cur_time) {
|
2010-01-01 21:16:17 +05:30
|
|
|
if (p->p_fd == -1) {
|
|
|
|
/* Time to send new req */
|
2010-01-03 13:29:59 +05:30
|
|
|
if (--cnt == 0) {
|
libbb: reduce the overhead of single parameter bb_error_msg() calls
Back in 2007, commit 0c97c9d43707 ("'simple' error message functions by
Loic Grenie") introduced bb_simple_perror_msg() to allow for a lower
overhead call to bb_perror_msg() when only a string was being printed
with no parameters. This saves space for some CPU architectures because
it avoids the overhead of a call to a variadic function. However there
has never been a simple version of bb_error_msg(), and since 2007 many
new calls to bb_perror_msg() have been added that only take a single
parameter and so could have been using bb_simple_perror_message().
This changeset introduces 'simple' versions of bb_info_msg(),
bb_error_msg(), bb_error_msg_and_die(), bb_herror_msg() and
bb_herror_msg_and_die(), and replaces all calls that only take a
single parameter, or use something like ("%s", arg), with calls to the
corresponding 'simple' version.
Since it is likely that single parameter calls to the variadic functions
may be accidentally reintroduced in the future a new debugging config
option WARN_SIMPLE_MSG has been introduced. This uses some macro magic
which will cause any such calls to generate a warning, but this is
turned off by default to avoid use of the unpleasant macros in normal
circumstances.
This is a large changeset due to the number of calls that have been
replaced. The only files that contain changes other than simple
substitution of function calls are libbb.h, libbb/herror_msg.c,
libbb/verror_msg.c and libbb/xfuncs_printf.c. In miscutils/devfsd.c,
networking/udhcp/common.h and util-linux/mdev.c additonal macros have
been added for logging so that single parameter and multiple parameter
logging variants exist.
The amount of space saved varies considerably by architecture, and was
found to be as follows (for 'defconfig' using GCC 7.4):
Arm: -92 bytes
MIPS: -52 bytes
PPC: -1836 bytes
x86_64: -938 bytes
Note that for the MIPS architecture only an exception had to be made
disabling the 'simple' calls for 'udhcp' (in networking/udhcp/common.h)
because it made these files larger on MIPS.
Signed-off-by: James Byrne <james.byrne@origamienergy.com>
Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
2019-07-02 15:05:03 +05:30
|
|
|
VERB4 bb_simple_error_msg("disabling burst mode");
|
2014-09-18 19:49:03 +05:30
|
|
|
G.polladj_count = 0;
|
|
|
|
G.poll_exp = MINPOLL;
|
2010-01-03 13:29:59 +05:30
|
|
|
}
|
|
|
|
send_query_to_peer(p);
|
2010-01-01 21:16:17 +05:30
|
|
|
} else {
|
|
|
|
/* Timed out waiting for reply */
|
|
|
|
close(p->p_fd);
|
|
|
|
p->p_fd = -1;
|
2014-10-02 20:48:43 +05:30
|
|
|
/* If poll interval is small, increase it */
|
|
|
|
if (G.poll_exp < BIGPOLL)
|
|
|
|
adjust_poll(MINPOLL);
|
2014-09-28 02:26:09 +05:30
|
|
|
timeout = poll_interval(NOREPLY_INTERVAL);
|
2010-01-01 21:16:17 +05:30
|
|
|
bb_error_msg("timed out waiting for %s, reach 0x%02x, next query in %us",
|
2010-01-03 13:29:59 +05:30
|
|
|
p->p_dotted, p->reachable_bits, timeout);
|
2016-03-04 11:56:08 +05:30
|
|
|
|
|
|
|
/* What if don't see it because it changed its IP? */
|
2016-06-06 05:56:49 +05:30
|
|
|
if (p->reachable_bits == 0)
|
2017-01-06 20:48:45 +05:30
|
|
|
resolve_peer_hostname(p);
|
2016-03-04 11:56:08 +05:30
|
|
|
|
2010-01-01 21:16:17 +05:30
|
|
|
set_next(p, timeout);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p->next_action_time < nextaction)
|
|
|
|
nextaction = p->next_action_time;
|
|
|
|
|
|
|
|
if (p->p_fd >= 0) {
|
|
|
|
/* Wait for reply from this peer */
|
|
|
|
pfd[i].fd = p->p_fd;
|
|
|
|
pfd[i].events = POLLIN;
|
|
|
|
idx2peer[i] = p;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-03 13:29:59 +05:30
|
|
|
timeout = nextaction - G.cur_time;
|
|
|
|
if (timeout < 0)
|
|
|
|
timeout = 0;
|
|
|
|
timeout++; /* (nextaction - G.cur_time) rounds down, compensating */
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Here we may block */
|
2021-02-21 13:24:08 +05:30
|
|
|
VERB3 {
|
2012-04-01 20:01:04 +05:30
|
|
|
if (i > (ENABLE_FEATURE_NTPD_SERVER && G_listen_fd != -1)) {
|
2012-03-03 16:45:46 +05:30
|
|
|
/* We wait for at least one reply.
|
|
|
|
* Poll for it, without wasting time for message.
|
|
|
|
* Since replies often come under 1 second, this also
|
|
|
|
* reduces clutter in logs.
|
|
|
|
*/
|
|
|
|
nfds = poll(pfd, i, 1000);
|
|
|
|
if (nfds != 0)
|
|
|
|
goto did_poll;
|
|
|
|
if (--timeout <= 0)
|
|
|
|
goto did_poll;
|
|
|
|
}
|
2012-03-06 23:46:50 +05:30
|
|
|
bb_error_msg("poll:%us sockets:%u interval:%us", timeout, i, 1 << G.poll_exp);
|
2012-03-03 16:45:46 +05:30
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
nfds = poll(pfd, i, timeout * 1000);
|
2012-03-03 16:45:46 +05:30
|
|
|
did_poll:
|
2010-01-03 13:29:59 +05:30
|
|
|
gettime1900d(); /* sets G.cur_time */
|
2010-01-06 16:57:47 +05:30
|
|
|
if (nfds <= 0) {
|
2017-10-31 17:14:37 +05:30
|
|
|
double ct;
|
2017-10-31 21:23:23 +05:30
|
|
|
int dns_error;
|
2017-10-31 17:14:37 +05:30
|
|
|
|
|
|
|
if (bb_got_signal)
|
|
|
|
break; /* poll was interrupted by a signal */
|
|
|
|
|
|
|
|
if (G.cur_time - G.last_script_run > 11*60) {
|
2010-01-06 16:57:47 +05:30
|
|
|
/* Useful for updating battery-backed RTC and such */
|
2010-01-11 06:01:59 +05:30
|
|
|
run_script("periodic", G.last_update_offset);
|
2010-01-06 17:35:08 +05:30
|
|
|
gettime1900d(); /* sets G.cur_time */
|
2010-01-06 16:57:47 +05:30
|
|
|
}
|
2017-10-31 17:14:37 +05:30
|
|
|
|
|
|
|
/* Resolve peer names to IPs, if not resolved yet.
|
|
|
|
* We do it only when poll timed out:
|
|
|
|
* this way, we almost never overlap DNS resolution with
|
|
|
|
* "request-reply" packet round trip.
|
|
|
|
*/
|
2017-10-31 21:23:23 +05:30
|
|
|
dns_error = 0;
|
2017-10-31 17:14:37 +05:30
|
|
|
ct = G.cur_time;
|
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
|
|
|
peer_t *p = (peer_t *) item->data;
|
|
|
|
if (p->next_action_time <= ct && !p->p_lsa) {
|
|
|
|
/* This can take up to ~10 sec per each DNS query */
|
2017-10-31 21:23:23 +05:30
|
|
|
dns_error |= (!resolve_peer_hostname(p));
|
2017-10-31 17:14:37 +05:30
|
|
|
}
|
|
|
|
}
|
2017-10-31 21:23:23 +05:30
|
|
|
if (!dns_error)
|
|
|
|
goto check_unsync;
|
2017-10-31 17:14:37 +05:30
|
|
|
/* Set next time for those which are still not resolved */
|
2017-10-31 21:23:23 +05:30
|
|
|
gettime1900d(); /* sets G.cur_time (needed for set_next()) */
|
2017-10-31 17:14:37 +05:30
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
|
|
|
peer_t *p = (peer_t *) item->data;
|
|
|
|
if (p->next_action_time <= ct && !p->p_lsa) {
|
|
|
|
set_next(p, HOSTNAME_INTERVAL * p->dns_errors);
|
|
|
|
}
|
|
|
|
}
|
2013-05-22 21:46:34 +05:30
|
|
|
goto check_unsync;
|
2010-01-06 16:57:47 +05:30
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
|
|
|
|
/* Process any received packets */
|
|
|
|
j = 0;
|
|
|
|
#if ENABLE_FEATURE_NTPD_SERVER
|
2010-01-03 13:29:59 +05:30
|
|
|
if (G.listen_fd != -1) {
|
2010-01-01 21:16:17 +05:30
|
|
|
if (pfd[0].revents /* & (POLLIN|POLLERR)*/) {
|
|
|
|
nfds--;
|
2010-01-03 13:29:59 +05:30
|
|
|
recv_and_process_client_pkt(/*G.listen_fd*/);
|
|
|
|
gettime1900d(); /* sets G.cur_time */
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
j = 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
for (; nfds != 0 && j < i; j++) {
|
|
|
|
if (pfd[j].revents /* & (POLLIN|POLLERR)*/) {
|
2011-04-07 05:15:20 +05:30
|
|
|
/*
|
|
|
|
* At init, alarm was set to 10 sec.
|
|
|
|
* Now we did get a reply.
|
|
|
|
* Increase timeout to 50 seconds to finish syncing.
|
|
|
|
*/
|
|
|
|
if (option_mask32 & OPT_qq) {
|
|
|
|
option_mask32 &= ~OPT_qq;
|
|
|
|
alarm(50);
|
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
nfds--;
|
|
|
|
recv_and_process_peer_pkt(idx2peer[j]);
|
2010-01-03 13:29:59 +05:30
|
|
|
gettime1900d(); /* sets G.cur_time */
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
}
|
2013-05-22 21:18:19 +05:30
|
|
|
|
2013-05-22 21:46:34 +05:30
|
|
|
check_unsync:
|
2013-05-22 21:18:19 +05:30
|
|
|
if (G.ntp_peers && G.stratum != MAXSTRAT) {
|
|
|
|
for (item = G.ntp_peers; item != NULL; item = item->link) {
|
|
|
|
peer_t *p = (peer_t *) item->data;
|
|
|
|
if (p->reachable_bits)
|
|
|
|
goto have_reachable_peer;
|
|
|
|
}
|
|
|
|
/* No peer responded for last 8 packets, panic */
|
2014-09-28 02:26:09 +05:30
|
|
|
clamp_pollexp_and_set_MAXSTRAT();
|
2013-05-23 19:36:59 +05:30
|
|
|
run_script("unsync", 0.0);
|
2013-05-22 21:18:19 +05:30
|
|
|
have_reachable_peer: ;
|
|
|
|
}
|
2010-01-01 21:16:17 +05:30
|
|
|
} /* while (!bb_got_signal) */
|
|
|
|
|
2019-03-18 00:17:52 +05:30
|
|
|
remove_pidfile_std_path_and_ext("ntpd");
|
2010-01-01 21:16:17 +05:30
|
|
|
kill_myself_with_sig(bb_got_signal);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*** openntpd-4.6 uses only adjtime, not adjtimex ***/
|
|
|
|
|
|
|
|
/*** ntp-4.2.6/ntpd/ntp_loopfilter.c - adjtimex usage ***/
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
static double
|
|
|
|
direct_freq(double fp_offset)
|
|
|
|
{
|
|
|
|
#ifdef KERNEL_PLL
|
|
|
|
/*
|
|
|
|
* If the kernel is enabled, we need the residual offset to
|
|
|
|
* calculate the frequency correction.
|
|
|
|
*/
|
|
|
|
if (pll_control && kern_enable) {
|
|
|
|
memset(&ntv, 0, sizeof(ntv));
|
|
|
|
ntp_adjtime(&ntv);
|
|
|
|
#ifdef STA_NANO
|
|
|
|
clock_offset = ntv.offset / 1e9;
|
|
|
|
#else /* STA_NANO */
|
|
|
|
clock_offset = ntv.offset / 1e6;
|
|
|
|
#endif /* STA_NANO */
|
|
|
|
drift_comp = FREQTOD(ntv.freq);
|
|
|
|
}
|
|
|
|
#endif /* KERNEL_PLL */
|
|
|
|
set_freq((fp_offset - clock_offset) / (current_time - clock_epoch) + drift_comp);
|
|
|
|
wander_resid = 0;
|
|
|
|
return drift_comp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2010-10-29 15:16:52 +05:30
|
|
|
set_freq(double freq) /* frequency update */
|
2010-01-01 21:16:17 +05:30
|
|
|
{
|
|
|
|
char tbuf[80];
|
|
|
|
|
|
|
|
drift_comp = freq;
|
|
|
|
|
|
|
|
#ifdef KERNEL_PLL
|
|
|
|
/*
|
|
|
|
* If the kernel is enabled, update the kernel frequency.
|
|
|
|
*/
|
|
|
|
if (pll_control && kern_enable) {
|
|
|
|
memset(&ntv, 0, sizeof(ntv));
|
|
|
|
ntv.modes = MOD_FREQUENCY;
|
|
|
|
ntv.freq = DTOFREQ(drift_comp);
|
|
|
|
ntp_adjtime(&ntv);
|
|
|
|
snprintf(tbuf, sizeof(tbuf), "kernel %.3f PPM", drift_comp * 1e6);
|
|
|
|
report_event(EVNT_FSET, NULL, tbuf);
|
|
|
|
} else {
|
|
|
|
snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6);
|
|
|
|
report_event(EVNT_FSET, NULL, tbuf);
|
|
|
|
}
|
|
|
|
#else /* KERNEL_PLL */
|
|
|
|
snprintf(tbuf, sizeof(tbuf), "ntpd %.3f PPM", drift_comp * 1e6);
|
|
|
|
report_event(EVNT_FSET, NULL, tbuf);
|
|
|
|
#endif /* KERNEL_PLL */
|
|
|
|
}
|
|
|
|
|
|
|
|
...
|
|
|
|
...
|
|
|
|
...
|
|
|
|
|
|
|
|
#ifdef KERNEL_PLL
|
|
|
|
/*
|
|
|
|
* This code segment works when clock adjustments are made using
|
|
|
|
* precision time kernel support and the ntp_adjtime() system
|
|
|
|
* call. This support is available in Solaris 2.6 and later,
|
|
|
|
* Digital Unix 4.0 and later, FreeBSD, Linux and specially
|
|
|
|
* modified kernels for HP-UX 9 and Ultrix 4. In the case of the
|
|
|
|
* DECstation 5000/240 and Alpha AXP, additional kernel
|
|
|
|
* modifications provide a true microsecond clock and nanosecond
|
|
|
|
* clock, respectively.
|
|
|
|
*
|
|
|
|
* Important note: The kernel discipline is used only if the
|
|
|
|
* step threshold is less than 0.5 s, as anything higher can
|
|
|
|
* lead to overflow problems. This might occur if some misguided
|
|
|
|
* lad set the step threshold to something ridiculous.
|
|
|
|
*/
|
|
|
|
if (pll_control && kern_enable) {
|
|
|
|
|
|
|
|
#define MOD_BITS (MOD_OFFSET | MOD_MAXERROR | MOD_ESTERROR | MOD_STATUS | MOD_TIMECONST)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We initialize the structure for the ntp_adjtime()
|
|
|
|
* system call. We have to convert everything to
|
|
|
|
* microseconds or nanoseconds first. Do not update the
|
|
|
|
* system variables if the ext_enable flag is set. In
|
|
|
|
* this case, the external clock driver will update the
|
|
|
|
* variables, which will be read later by the local
|
|
|
|
* clock driver. Afterwards, remember the time and
|
|
|
|
* frequency offsets for jitter and stability values and
|
|
|
|
* to update the frequency file.
|
|
|
|
*/
|
|
|
|
memset(&ntv, 0, sizeof(ntv));
|
|
|
|
if (ext_enable) {
|
|
|
|
ntv.modes = MOD_STATUS;
|
|
|
|
} else {
|
|
|
|
#ifdef STA_NANO
|
|
|
|
ntv.modes = MOD_BITS | MOD_NANO;
|
|
|
|
#else /* STA_NANO */
|
|
|
|
ntv.modes = MOD_BITS;
|
|
|
|
#endif /* STA_NANO */
|
|
|
|
if (clock_offset < 0)
|
|
|
|
dtemp = -.5;
|
|
|
|
else
|
|
|
|
dtemp = .5;
|
|
|
|
#ifdef STA_NANO
|
|
|
|
ntv.offset = (int32)(clock_offset * 1e9 + dtemp);
|
|
|
|
ntv.constant = sys_poll;
|
|
|
|
#else /* STA_NANO */
|
|
|
|
ntv.offset = (int32)(clock_offset * 1e6 + dtemp);
|
|
|
|
ntv.constant = sys_poll - 4;
|
|
|
|
#endif /* STA_NANO */
|
|
|
|
ntv.esterror = (u_int32)(clock_jitter * 1e6);
|
|
|
|
ntv.maxerror = (u_int32)((sys_rootdelay / 2 + sys_rootdisp) * 1e6);
|
|
|
|
ntv.status = STA_PLL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable/disable the PPS if requested.
|
|
|
|
*/
|
|
|
|
if (pps_enable) {
|
|
|
|
if (!(pll_status & STA_PPSTIME))
|
|
|
|
report_event(EVNT_KERN,
|
2013-01-14 06:04:48 +05:30
|
|
|
NULL, "PPS enabled");
|
2010-01-01 21:16:17 +05:30
|
|
|
ntv.status |= STA_PPSTIME | STA_PPSFREQ;
|
|
|
|
} else {
|
|
|
|
if (pll_status & STA_PPSTIME)
|
|
|
|
report_event(EVNT_KERN,
|
2013-01-14 06:04:48 +05:30
|
|
|
NULL, "PPS disabled");
|
|
|
|
ntv.status &= ~(STA_PPSTIME | STA_PPSFREQ);
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
if (sys_leap == LEAP_ADDSECOND)
|
|
|
|
ntv.status |= STA_INS;
|
|
|
|
else if (sys_leap == LEAP_DELSECOND)
|
|
|
|
ntv.status |= STA_DEL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pass the stuff to the kernel. If it squeals, turn off
|
|
|
|
* the pps. In any case, fetch the kernel offset,
|
|
|
|
* frequency and jitter.
|
|
|
|
*/
|
|
|
|
if (ntp_adjtime(&ntv) == TIME_ERROR) {
|
|
|
|
if (!(ntv.status & STA_PPSSIGNAL))
|
|
|
|
report_event(EVNT_KERN, NULL,
|
2013-01-14 06:04:48 +05:30
|
|
|
"PPS no signal");
|
2010-01-01 21:16:17 +05:30
|
|
|
}
|
|
|
|
pll_status = ntv.status;
|
|
|
|
#ifdef STA_NANO
|
|
|
|
clock_offset = ntv.offset / 1e9;
|
|
|
|
#else /* STA_NANO */
|
|
|
|
clock_offset = ntv.offset / 1e6;
|
|
|
|
#endif /* STA_NANO */
|
|
|
|
clock_frequency = FREQTOD(ntv.freq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the kernel PPS is lit, monitor its performance.
|
|
|
|
*/
|
|
|
|
if (ntv.status & STA_PPSTIME) {
|
|
|
|
#ifdef STA_NANO
|
|
|
|
clock_jitter = ntv.jitter / 1e9;
|
|
|
|
#else /* STA_NANO */
|
|
|
|
clock_jitter = ntv.jitter / 1e6;
|
|
|
|
#endif /* STA_NANO */
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(STA_NANO) && NTP_API == 4
|
|
|
|
/*
|
|
|
|
* If the TAI changes, update the kernel TAI.
|
|
|
|
*/
|
|
|
|
if (loop_tai != sys_tai) {
|
|
|
|
loop_tai = sys_tai;
|
|
|
|
ntv.modes = MOD_TAI;
|
|
|
|
ntv.constant = sys_tai;
|
|
|
|
ntp_adjtime(&ntv);
|
|
|
|
}
|
|
|
|
#endif /* STA_NANO */
|
|
|
|
}
|
|
|
|
#endif /* KERNEL_PLL */
|
|
|
|
#endif
|