| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * NTP state machine interfaces and logic. | 
|---|
| 4 | * | 
|---|
| 5 | * This code was mainly moved from kernel/timer.c and kernel/time.c | 
|---|
| 6 | * Please see those files for relevant copyright info and historical | 
|---|
| 7 | * changelogs. | 
|---|
| 8 | */ | 
|---|
| 9 | #include <linux/capability.h> | 
|---|
| 10 | #include <linux/clocksource.h> | 
|---|
| 11 | #include <linux/workqueue.h> | 
|---|
| 12 | #include <linux/hrtimer.h> | 
|---|
| 13 | #include <linux/jiffies.h> | 
|---|
| 14 | #include <linux/math64.h> | 
|---|
| 15 | #include <linux/timex.h> | 
|---|
| 16 | #include <linux/time.h> | 
|---|
| 17 | #include <linux/mm.h> | 
|---|
| 18 | #include <linux/module.h> | 
|---|
| 19 | #include <linux/rtc.h> | 
|---|
| 20 | #include <linux/audit.h> | 
|---|
| 21 | #include <linux/timekeeper_internal.h> | 
|---|
| 22 |  | 
|---|
| 23 | #include "ntp_internal.h" | 
|---|
| 24 | #include "timekeeping_internal.h" | 
|---|
| 25 |  | 
|---|
| 26 | /** | 
|---|
| 27 | * struct ntp_data - Structure holding all NTP related state | 
|---|
| 28 | * @tick_usec:		USER_HZ period in microseconds | 
|---|
| 29 | * @tick_length:	Adjusted tick length | 
|---|
| 30 | * @tick_length_base:	Base value for @tick_length | 
|---|
| 31 | * @time_state:		State of the clock synchronization | 
|---|
| 32 | * @time_status:	Clock status bits | 
|---|
| 33 | * @time_offset:	Time adjustment in nanoseconds | 
|---|
| 34 | * @time_constant:	PLL time constant | 
|---|
| 35 | * @time_maxerror:	Maximum error in microseconds holding the NTP sync distance | 
|---|
| 36 | *			(NTP dispersion + delay / 2) | 
|---|
| 37 | * @time_esterror:	Estimated error in microseconds holding NTP dispersion | 
|---|
| 38 | * @time_freq:		Frequency offset scaled nsecs/secs | 
|---|
| 39 | * @time_reftime:	Time at last adjustment in seconds | 
|---|
| 40 | * @time_adjust:	Adjustment value | 
|---|
| 41 | * @ntp_tick_adj:	Constant boot-param configurable NTP tick adjustment (upscaled) | 
|---|
| 42 | * @ntp_next_leap_sec:	Second value of the next pending leapsecond, or TIME64_MAX if no leap | 
|---|
| 43 | * | 
|---|
| 44 | * @pps_valid:		PPS signal watchdog counter | 
|---|
| 45 | * @pps_tf:		PPS phase median filter | 
|---|
| 46 | * @pps_jitter:		PPS current jitter in nanoseconds | 
|---|
| 47 | * @pps_fbase:		PPS beginning of the last freq interval | 
|---|
| 48 | * @pps_shift:		PPS current interval duration in seconds (shift value) | 
|---|
| 49 | * @pps_intcnt:		PPS interval counter | 
|---|
| 50 | * @pps_freq:		PPS frequency offset in scaled ns/s | 
|---|
| 51 | * @pps_stabil:		PPS current stability in scaled ns/s | 
|---|
| 52 | * @pps_calcnt:		PPS monitor: calibration intervals | 
|---|
| 53 | * @pps_jitcnt:		PPS monitor: jitter limit exceeded | 
|---|
| 54 | * @pps_stbcnt:		PPS monitor: stability limit exceeded | 
|---|
| 55 | * @pps_errcnt:		PPS monitor: calibration errors | 
|---|
| 56 | * | 
|---|
| 57 | * Protected by the timekeeping locks. | 
|---|
| 58 | */ | 
|---|
| 59 | struct ntp_data { | 
|---|
| 60 | unsigned long		tick_usec; | 
|---|
| 61 | u64			tick_length; | 
|---|
| 62 | u64			tick_length_base; | 
|---|
| 63 | int			time_state; | 
|---|
| 64 | int			time_status; | 
|---|
| 65 | s64			time_offset; | 
|---|
| 66 | long			time_constant; | 
|---|
| 67 | long			time_maxerror; | 
|---|
| 68 | long			time_esterror; | 
|---|
| 69 | s64			time_freq; | 
|---|
| 70 | time64_t		time_reftime; | 
|---|
| 71 | long			time_adjust; | 
|---|
| 72 | s64			ntp_tick_adj; | 
|---|
| 73 | time64_t		ntp_next_leap_sec; | 
|---|
| 74 | #ifdef CONFIG_NTP_PPS | 
|---|
| 75 | int			pps_valid; | 
|---|
| 76 | long			pps_tf[3]; | 
|---|
| 77 | long			pps_jitter; | 
|---|
| 78 | struct timespec64	pps_fbase; | 
|---|
| 79 | int			pps_shift; | 
|---|
| 80 | int			pps_intcnt; | 
|---|
| 81 | s64			pps_freq; | 
|---|
| 82 | long			pps_stabil; | 
|---|
| 83 | long			pps_calcnt; | 
|---|
| 84 | long			pps_jitcnt; | 
|---|
| 85 | long			pps_stbcnt; | 
|---|
| 86 | long			pps_errcnt; | 
|---|
| 87 | #endif | 
|---|
| 88 | }; | 
|---|
| 89 |  | 
|---|
| 90 | static struct ntp_data tk_ntp_data[TIMEKEEPERS_MAX] = { | 
|---|
| 91 | [ 0 ... TIMEKEEPERS_MAX - 1 ] = { | 
|---|
| 92 | .tick_usec		= USER_TICK_USEC, | 
|---|
| 93 | .time_state		= TIME_OK, | 
|---|
| 94 | .time_status		= STA_UNSYNC, | 
|---|
| 95 | .time_constant		= 2, | 
|---|
| 96 | .time_maxerror		= NTP_PHASE_LIMIT, | 
|---|
| 97 | .time_esterror		= NTP_PHASE_LIMIT, | 
|---|
| 98 | .ntp_next_leap_sec	= TIME64_MAX, | 
|---|
| 99 | }, | 
|---|
| 100 | }; | 
|---|
| 101 |  | 
|---|
| 102 | #define SECS_PER_DAY		86400 | 
|---|
| 103 | #define MAX_TICKADJ		500LL		/* usecs */ | 
|---|
| 104 | #define MAX_TICKADJ_SCALED \ | 
|---|
| 105 | (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) | 
|---|
| 106 | #define MAX_TAI_OFFSET		100000 | 
|---|
| 107 |  | 
|---|
| 108 | #ifdef CONFIG_NTP_PPS | 
|---|
| 109 |  | 
|---|
| 110 | /* | 
|---|
| 111 | * The following variables are used when a pulse-per-second (PPS) signal | 
|---|
| 112 | * is available. They establish the engineering parameters of the clock | 
|---|
| 113 | * discipline loop when controlled by the PPS signal. | 
|---|
| 114 | */ | 
|---|
| 115 | #define PPS_VALID	10	/* PPS signal watchdog max (s) */ | 
|---|
| 116 | #define PPS_POPCORN	4	/* popcorn spike threshold (shift) */ | 
|---|
| 117 | #define PPS_INTMIN	2	/* min freq interval (s) (shift) */ | 
|---|
| 118 | #define PPS_INTMAX	8	/* max freq interval (s) (shift) */ | 
|---|
| 119 | #define PPS_INTCOUNT	4	/* number of consecutive good intervals to | 
|---|
| 120 | increase pps_shift or consecutive bad | 
|---|
| 121 | intervals to decrease it */ | 
|---|
| 122 | #define PPS_MAXWANDER	100000	/* max PPS freq wander (ns/s) */ | 
|---|
| 123 |  | 
|---|
| 124 | /* | 
|---|
| 125 | * PPS kernel consumer compensates the whole phase error immediately. | 
|---|
| 126 | * Otherwise, reduce the offset by a fixed factor times the time constant. | 
|---|
| 127 | */ | 
|---|
| 128 | static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) | 
|---|
| 129 | { | 
|---|
| 130 | if (ntpdata->time_status & STA_PPSTIME && ntpdata->time_status & STA_PPSSIGNAL) | 
|---|
| 131 | return offset; | 
|---|
| 132 | else | 
|---|
| 133 | return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); | 
|---|
| 134 | } | 
|---|
| 135 |  | 
|---|
| 136 | static inline void pps_reset_freq_interval(struct ntp_data *ntpdata) | 
|---|
| 137 | { | 
|---|
| 138 | /* The PPS calibration interval may end surprisingly early */ | 
|---|
| 139 | ntpdata->pps_shift = PPS_INTMIN; | 
|---|
| 140 | ntpdata->pps_intcnt = 0; | 
|---|
| 141 | } | 
|---|
| 142 |  | 
|---|
| 143 | /** | 
|---|
| 144 | * pps_clear - Clears the PPS state variables | 
|---|
| 145 | * @ntpdata:	Pointer to ntp data | 
|---|
| 146 | */ | 
|---|
| 147 | static inline void pps_clear(struct ntp_data *ntpdata) | 
|---|
| 148 | { | 
|---|
| 149 | pps_reset_freq_interval(ntpdata); | 
|---|
| 150 | ntpdata->pps_tf[0] = 0; | 
|---|
| 151 | ntpdata->pps_tf[1] = 0; | 
|---|
| 152 | ntpdata->pps_tf[2] = 0; | 
|---|
| 153 | ntpdata->pps_fbase.tv_sec = ntpdata->pps_fbase.tv_nsec = 0; | 
|---|
| 154 | ntpdata->pps_freq = 0; | 
|---|
| 155 | } | 
|---|
| 156 |  | 
|---|
| 157 | /* | 
|---|
| 158 | * Decrease pps_valid to indicate that another second has passed since the | 
|---|
| 159 | * last PPS signal. When it reaches 0, indicate that PPS signal is missing. | 
|---|
| 160 | */ | 
|---|
| 161 | static inline void pps_dec_valid(struct ntp_data *ntpdata) | 
|---|
| 162 | { | 
|---|
| 163 | if (ntpdata->pps_valid > 0) { | 
|---|
| 164 | ntpdata->pps_valid--; | 
|---|
| 165 | } else { | 
|---|
| 166 | ntpdata->time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | | 
|---|
| 167 | STA_PPSWANDER | STA_PPSERROR); | 
|---|
| 168 | pps_clear(ntpdata); | 
|---|
| 169 | } | 
|---|
| 170 | } | 
|---|
| 171 |  | 
|---|
| 172 | static inline void pps_set_freq(struct ntp_data *ntpdata) | 
|---|
| 173 | { | 
|---|
| 174 | ntpdata->pps_freq = ntpdata->time_freq; | 
|---|
| 175 | } | 
|---|
| 176 |  | 
|---|
| 177 | static inline bool is_error_status(int status) | 
|---|
| 178 | { | 
|---|
| 179 | return (status & (STA_UNSYNC|STA_CLOCKERR)) | 
|---|
| 180 | /* | 
|---|
| 181 | * PPS signal lost when either PPS time or PPS frequency | 
|---|
| 182 | * synchronization requested | 
|---|
| 183 | */ | 
|---|
| 184 | || ((status & (STA_PPSFREQ|STA_PPSTIME)) | 
|---|
| 185 | && !(status & STA_PPSSIGNAL)) | 
|---|
| 186 | /* | 
|---|
| 187 | * PPS jitter exceeded when PPS time synchronization | 
|---|
| 188 | * requested | 
|---|
| 189 | */ | 
|---|
| 190 | || ((status & (STA_PPSTIME|STA_PPSJITTER)) | 
|---|
| 191 | == (STA_PPSTIME|STA_PPSJITTER)) | 
|---|
| 192 | /* | 
|---|
| 193 | * PPS wander exceeded or calibration error when PPS | 
|---|
| 194 | * frequency synchronization requested | 
|---|
| 195 | */ | 
|---|
| 196 | || ((status & STA_PPSFREQ) | 
|---|
| 197 | && (status & (STA_PPSWANDER|STA_PPSERROR))); | 
|---|
| 198 | } | 
|---|
| 199 |  | 
|---|
| 200 | static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) | 
|---|
| 201 | { | 
|---|
| 202 | txc->ppsfreq	   = shift_right((ntpdata->pps_freq >> PPM_SCALE_INV_SHIFT) * | 
|---|
| 203 | PPM_SCALE_INV, NTP_SCALE_SHIFT); | 
|---|
| 204 | txc->jitter	   = ntpdata->pps_jitter; | 
|---|
| 205 | if (!(ntpdata->time_status & STA_NANO)) | 
|---|
| 206 | txc->jitter = ntpdata->pps_jitter / NSEC_PER_USEC; | 
|---|
| 207 | txc->shift	   = ntpdata->pps_shift; | 
|---|
| 208 | txc->stabil	   = ntpdata->pps_stabil; | 
|---|
| 209 | txc->jitcnt	   = ntpdata->pps_jitcnt; | 
|---|
| 210 | txc->calcnt	   = ntpdata->pps_calcnt; | 
|---|
| 211 | txc->errcnt	   = ntpdata->pps_errcnt; | 
|---|
| 212 | txc->stbcnt	   = ntpdata->pps_stbcnt; | 
|---|
| 213 | } | 
|---|
| 214 |  | 
|---|
| 215 | #else /* !CONFIG_NTP_PPS */ | 
|---|
| 216 |  | 
|---|
| 217 | static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) | 
|---|
| 218 | { | 
|---|
| 219 | return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); | 
|---|
| 220 | } | 
|---|
| 221 |  | 
|---|
| 222 | static inline void pps_reset_freq_interval(struct ntp_data *ntpdata) {} | 
|---|
| 223 | static inline void pps_clear(struct ntp_data *ntpdata) {} | 
|---|
| 224 | static inline void pps_dec_valid(struct ntp_data *ntpdata) {} | 
|---|
| 225 | static inline void pps_set_freq(struct ntp_data *ntpdata) {} | 
|---|
| 226 |  | 
|---|
| 227 | static inline bool is_error_status(int status) | 
|---|
| 228 | { | 
|---|
| 229 | return status & (STA_UNSYNC|STA_CLOCKERR); | 
|---|
| 230 | } | 
|---|
| 231 |  | 
|---|
| 232 | static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) | 
|---|
| 233 | { | 
|---|
| 234 | /* PPS is not implemented, so these are zero */ | 
|---|
| 235 | txc->ppsfreq	   = 0; | 
|---|
| 236 | txc->jitter	   = 0; | 
|---|
| 237 | txc->shift	   = 0; | 
|---|
| 238 | txc->stabil	   = 0; | 
|---|
| 239 | txc->jitcnt	   = 0; | 
|---|
| 240 | txc->calcnt	   = 0; | 
|---|
| 241 | txc->errcnt	   = 0; | 
|---|
| 242 | txc->stbcnt	   = 0; | 
|---|
| 243 | } | 
|---|
| 244 |  | 
|---|
| 245 | #endif /* CONFIG_NTP_PPS */ | 
|---|
| 246 |  | 
|---|
| 247 | /* | 
|---|
| 248 | * Update tick_length and tick_length_base, based on tick_usec, ntp_tick_adj and | 
|---|
| 249 | * time_freq: | 
|---|
| 250 | */ | 
|---|
| 251 | static void ntp_update_frequency(struct ntp_data *ntpdata) | 
|---|
| 252 | { | 
|---|
| 253 | u64 second_length, new_base, tick_usec = (u64)ntpdata->tick_usec; | 
|---|
| 254 |  | 
|---|
| 255 | second_length		 = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << NTP_SCALE_SHIFT; | 
|---|
| 256 |  | 
|---|
| 257 | second_length		+= ntpdata->ntp_tick_adj; | 
|---|
| 258 | second_length		+= ntpdata->time_freq; | 
|---|
| 259 |  | 
|---|
| 260 | new_base		 = div_u64(dividend: second_length, NTP_INTERVAL_FREQ); | 
|---|
| 261 |  | 
|---|
| 262 | /* | 
|---|
| 263 | * Don't wait for the next second_overflow, apply the change to the | 
|---|
| 264 | * tick length immediately: | 
|---|
| 265 | */ | 
|---|
| 266 | ntpdata->tick_length		+= new_base - ntpdata->tick_length_base; | 
|---|
| 267 | ntpdata->tick_length_base	 = new_base; | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | static inline s64 ntp_update_offset_fll(struct ntp_data *ntpdata, s64 offset64, long secs) | 
|---|
| 271 | { | 
|---|
| 272 | ntpdata->time_status &= ~STA_MODE; | 
|---|
| 273 |  | 
|---|
| 274 | if (secs < MINSEC) | 
|---|
| 275 | return 0; | 
|---|
| 276 |  | 
|---|
| 277 | if (!(ntpdata->time_status & STA_FLL) && (secs <= MAXSEC)) | 
|---|
| 278 | return 0; | 
|---|
| 279 |  | 
|---|
| 280 | ntpdata->time_status |= STA_MODE; | 
|---|
| 281 |  | 
|---|
| 282 | return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); | 
|---|
| 283 | } | 
|---|
| 284 |  | 
|---|
| 285 | static void ntp_update_offset(struct ntp_data *ntpdata, long offset) | 
|---|
| 286 | { | 
|---|
| 287 | s64 freq_adj, offset64; | 
|---|
| 288 | long secs, real_secs; | 
|---|
| 289 |  | 
|---|
| 290 | if (!(ntpdata->time_status & STA_PLL)) | 
|---|
| 291 | return; | 
|---|
| 292 |  | 
|---|
| 293 | if (!(ntpdata->time_status & STA_NANO)) { | 
|---|
| 294 | /* Make sure the multiplication below won't overflow */ | 
|---|
| 295 | offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC); | 
|---|
| 296 | offset *= NSEC_PER_USEC; | 
|---|
| 297 | } | 
|---|
| 298 |  | 
|---|
| 299 | /* Scale the phase adjustment and clamp to the operating range. */ | 
|---|
| 300 | offset = clamp(offset, -MAXPHASE, MAXPHASE); | 
|---|
| 301 |  | 
|---|
| 302 | /* | 
|---|
| 303 | * Select how the frequency is to be controlled | 
|---|
| 304 | * and in which mode (PLL or FLL). | 
|---|
| 305 | */ | 
|---|
| 306 | real_secs = ktime_get_ntp_seconds(id: ntpdata - tk_ntp_data); | 
|---|
| 307 | secs = (long)(real_secs - ntpdata->time_reftime); | 
|---|
| 308 | if (unlikely(ntpdata->time_status & STA_FREQHOLD)) | 
|---|
| 309 | secs = 0; | 
|---|
| 310 |  | 
|---|
| 311 | ntpdata->time_reftime = real_secs; | 
|---|
| 312 |  | 
|---|
| 313 | offset64    = offset; | 
|---|
| 314 | freq_adj    = ntp_update_offset_fll(ntpdata, offset64, secs); | 
|---|
| 315 |  | 
|---|
| 316 | /* | 
|---|
| 317 | * Clamp update interval to reduce PLL gain with low | 
|---|
| 318 | * sampling rate (e.g. intermittent network connection) | 
|---|
| 319 | * to avoid instability. | 
|---|
| 320 | */ | 
|---|
| 321 | if (unlikely(secs > 1 << (SHIFT_PLL + 1 + ntpdata->time_constant))) | 
|---|
| 322 | secs = 1 << (SHIFT_PLL + 1 + ntpdata->time_constant); | 
|---|
| 323 |  | 
|---|
| 324 | freq_adj    += (offset64 * secs) << | 
|---|
| 325 | (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + ntpdata->time_constant)); | 
|---|
| 326 |  | 
|---|
| 327 | freq_adj    = min(freq_adj + ntpdata->time_freq, MAXFREQ_SCALED); | 
|---|
| 328 |  | 
|---|
| 329 | ntpdata->time_freq   = max(freq_adj, -MAXFREQ_SCALED); | 
|---|
| 330 |  | 
|---|
| 331 | ntpdata->time_offset = div_s64(dividend: offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); | 
|---|
| 332 | } | 
|---|
| 333 |  | 
|---|
| 334 | static void __ntp_clear(struct ntp_data *ntpdata) | 
|---|
| 335 | { | 
|---|
| 336 | /* Stop active adjtime() */ | 
|---|
| 337 | ntpdata->time_adjust	= 0; | 
|---|
| 338 | ntpdata->time_status	|= STA_UNSYNC; | 
|---|
| 339 | ntpdata->time_maxerror	= NTP_PHASE_LIMIT; | 
|---|
| 340 | ntpdata->time_esterror	= NTP_PHASE_LIMIT; | 
|---|
| 341 |  | 
|---|
| 342 | ntp_update_frequency(ntpdata); | 
|---|
| 343 |  | 
|---|
| 344 | ntpdata->tick_length	= ntpdata->tick_length_base; | 
|---|
| 345 | ntpdata->time_offset	= 0; | 
|---|
| 346 |  | 
|---|
| 347 | ntpdata->ntp_next_leap_sec = TIME64_MAX; | 
|---|
| 348 | /* Clear PPS state variables */ | 
|---|
| 349 | pps_clear(ntpdata); | 
|---|
| 350 | } | 
|---|
| 351 |  | 
|---|
| 352 | /** | 
|---|
| 353 | * ntp_clear - Clears the NTP state variables | 
|---|
| 354 | * @tkid:	Timekeeper ID to be able to select proper ntp data array member | 
|---|
| 355 | */ | 
|---|
| 356 | void ntp_clear(unsigned int tkid) | 
|---|
| 357 | { | 
|---|
| 358 | __ntp_clear(ntpdata: &tk_ntp_data[tkid]); | 
|---|
| 359 | } | 
|---|
| 360 |  | 
|---|
| 361 |  | 
|---|
| 362 | u64 ntp_tick_length(unsigned int tkid) | 
|---|
| 363 | { | 
|---|
| 364 | return tk_ntp_data[tkid].tick_length; | 
|---|
| 365 | } | 
|---|
| 366 |  | 
|---|
| 367 | /** | 
|---|
| 368 | * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t | 
|---|
| 369 | * @tkid:	Timekeeper ID | 
|---|
| 370 | * | 
|---|
| 371 | * Returns: For @tkid == TIMEKEEPER_CORE this provides the time of the next | 
|---|
| 372 | *	    leap second against CLOCK_REALTIME in a ktime_t format if a | 
|---|
| 373 | *	    leap second is pending. KTIME_MAX otherwise. | 
|---|
| 374 | */ | 
|---|
| 375 | ktime_t ntp_get_next_leap(unsigned int tkid) | 
|---|
| 376 | { | 
|---|
| 377 | struct ntp_data *ntpdata = &tk_ntp_data[TIMEKEEPER_CORE]; | 
|---|
| 378 |  | 
|---|
| 379 | if (tkid != TIMEKEEPER_CORE) | 
|---|
| 380 | return KTIME_MAX; | 
|---|
| 381 |  | 
|---|
| 382 | if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) | 
|---|
| 383 | return ktime_set(secs: ntpdata->ntp_next_leap_sec, nsecs: 0); | 
|---|
| 384 |  | 
|---|
| 385 | return KTIME_MAX; | 
|---|
| 386 | } | 
|---|
| 387 |  | 
|---|
| 388 | /* | 
|---|
| 389 | * This routine handles the overflow of the microsecond field | 
|---|
| 390 | * | 
|---|
| 391 | * The tricky bits of code to handle the accurate clock support | 
|---|
| 392 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. | 
|---|
| 393 | * They were originally developed for SUN and DEC kernels. | 
|---|
| 394 | * All the kudos should go to Dave for this stuff. | 
|---|
| 395 | * | 
|---|
| 396 | * Also handles leap second processing, and returns leap offset | 
|---|
| 397 | */ | 
|---|
| 398 | int second_overflow(unsigned int tkid, time64_t secs) | 
|---|
| 399 | { | 
|---|
| 400 | struct ntp_data *ntpdata = &tk_ntp_data[tkid]; | 
|---|
| 401 | s64 delta; | 
|---|
| 402 | int leap = 0; | 
|---|
| 403 | s32 rem; | 
|---|
| 404 |  | 
|---|
| 405 | /* | 
|---|
| 406 | * Leap second processing. If in leap-insert state at the end of the | 
|---|
| 407 | * day, the system clock is set back one second; if in leap-delete | 
|---|
| 408 | * state, the system clock is set ahead one second. | 
|---|
| 409 | */ | 
|---|
| 410 | switch (ntpdata->time_state) { | 
|---|
| 411 | case TIME_OK: | 
|---|
| 412 | if (ntpdata->time_status & STA_INS) { | 
|---|
| 413 | ntpdata->time_state = TIME_INS; | 
|---|
| 414 | div_s64_rem(dividend: secs, SECS_PER_DAY, remainder: &rem); | 
|---|
| 415 | ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem; | 
|---|
| 416 | } else if (ntpdata->time_status & STA_DEL) { | 
|---|
| 417 | ntpdata->time_state = TIME_DEL; | 
|---|
| 418 | div_s64_rem(dividend: secs + 1, SECS_PER_DAY, remainder: &rem); | 
|---|
| 419 | ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem; | 
|---|
| 420 | } | 
|---|
| 421 | break; | 
|---|
| 422 | case TIME_INS: | 
|---|
| 423 | if (!(ntpdata->time_status & STA_INS)) { | 
|---|
| 424 | ntpdata->ntp_next_leap_sec = TIME64_MAX; | 
|---|
| 425 | ntpdata->time_state = TIME_OK; | 
|---|
| 426 | } else if (secs == ntpdata->ntp_next_leap_sec) { | 
|---|
| 427 | leap = -1; | 
|---|
| 428 | ntpdata->time_state = TIME_OOP; | 
|---|
| 429 | pr_notice( "Clock: inserting leap second 23:59:60 UTC\n"); | 
|---|
| 430 | } | 
|---|
| 431 | break; | 
|---|
| 432 | case TIME_DEL: | 
|---|
| 433 | if (!(ntpdata->time_status & STA_DEL)) { | 
|---|
| 434 | ntpdata->ntp_next_leap_sec = TIME64_MAX; | 
|---|
| 435 | ntpdata->time_state = TIME_OK; | 
|---|
| 436 | } else if (secs == ntpdata->ntp_next_leap_sec) { | 
|---|
| 437 | leap = 1; | 
|---|
| 438 | ntpdata->ntp_next_leap_sec = TIME64_MAX; | 
|---|
| 439 | ntpdata->time_state = TIME_WAIT; | 
|---|
| 440 | pr_notice( "Clock: deleting leap second 23:59:59 UTC\n"); | 
|---|
| 441 | } | 
|---|
| 442 | break; | 
|---|
| 443 | case TIME_OOP: | 
|---|
| 444 | ntpdata->ntp_next_leap_sec = TIME64_MAX; | 
|---|
| 445 | ntpdata->time_state = TIME_WAIT; | 
|---|
| 446 | break; | 
|---|
| 447 | case TIME_WAIT: | 
|---|
| 448 | if (!(ntpdata->time_status & (STA_INS | STA_DEL))) | 
|---|
| 449 | ntpdata->time_state = TIME_OK; | 
|---|
| 450 | break; | 
|---|
| 451 | } | 
|---|
| 452 |  | 
|---|
| 453 | /* Bump the maxerror field */ | 
|---|
| 454 | ntpdata->time_maxerror += MAXFREQ / NSEC_PER_USEC; | 
|---|
| 455 | if (ntpdata->time_maxerror > NTP_PHASE_LIMIT) { | 
|---|
| 456 | ntpdata->time_maxerror = NTP_PHASE_LIMIT; | 
|---|
| 457 | ntpdata->time_status |= STA_UNSYNC; | 
|---|
| 458 | } | 
|---|
| 459 |  | 
|---|
| 460 | /* Compute the phase adjustment for the next second */ | 
|---|
| 461 | ntpdata->tick_length	 = ntpdata->tick_length_base; | 
|---|
| 462 |  | 
|---|
| 463 | delta			 = ntp_offset_chunk(ntpdata, offset: ntpdata->time_offset); | 
|---|
| 464 | ntpdata->time_offset	-= delta; | 
|---|
| 465 | ntpdata->tick_length	+= delta; | 
|---|
| 466 |  | 
|---|
| 467 | /* Check PPS signal */ | 
|---|
| 468 | pps_dec_valid(ntpdata); | 
|---|
| 469 |  | 
|---|
| 470 | if (!ntpdata->time_adjust) | 
|---|
| 471 | goto out; | 
|---|
| 472 |  | 
|---|
| 473 | if (ntpdata->time_adjust > MAX_TICKADJ) { | 
|---|
| 474 | ntpdata->time_adjust -= MAX_TICKADJ; | 
|---|
| 475 | ntpdata->tick_length += MAX_TICKADJ_SCALED; | 
|---|
| 476 | goto out; | 
|---|
| 477 | } | 
|---|
| 478 |  | 
|---|
| 479 | if (ntpdata->time_adjust < -MAX_TICKADJ) { | 
|---|
| 480 | ntpdata->time_adjust += MAX_TICKADJ; | 
|---|
| 481 | ntpdata->tick_length -= MAX_TICKADJ_SCALED; | 
|---|
| 482 | goto out; | 
|---|
| 483 | } | 
|---|
| 484 |  | 
|---|
| 485 | ntpdata->tick_length += (s64)(ntpdata->time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) | 
|---|
| 486 | << NTP_SCALE_SHIFT; | 
|---|
| 487 | ntpdata->time_adjust = 0; | 
|---|
| 488 |  | 
|---|
| 489 | out: | 
|---|
| 490 | return leap; | 
|---|
| 491 | } | 
|---|
| 492 |  | 
|---|
| 493 | #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) | 
|---|
| 494 | static void sync_hw_clock(struct work_struct *work); | 
|---|
| 495 | static DECLARE_WORK(sync_work, sync_hw_clock); | 
|---|
| 496 | static struct hrtimer sync_hrtimer; | 
|---|
| 497 | #define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC) | 
|---|
| 498 |  | 
|---|
| 499 | static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer) | 
|---|
| 500 | { | 
|---|
| 501 | queue_work(wq: system_freezable_power_efficient_wq, work: &sync_work); | 
|---|
| 502 |  | 
|---|
| 503 | return HRTIMER_NORESTART; | 
|---|
| 504 | } | 
|---|
| 505 |  | 
|---|
| 506 | static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry) | 
|---|
| 507 | { | 
|---|
| 508 | ktime_t exp = ktime_set(secs: ktime_get_real_seconds(), nsecs: 0); | 
|---|
| 509 |  | 
|---|
| 510 | if (retry) | 
|---|
| 511 | exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec); | 
|---|
| 512 | else | 
|---|
| 513 | exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec); | 
|---|
| 514 |  | 
|---|
| 515 | hrtimer_start(timer: &sync_hrtimer, tim: exp, mode: HRTIMER_MODE_ABS); | 
|---|
| 516 | } | 
|---|
| 517 |  | 
|---|
| 518 | /* | 
|---|
| 519 | * Check whether @now is correct versus the required time to update the RTC | 
|---|
| 520 | * and calculate the value which needs to be written to the RTC so that the | 
|---|
| 521 | * next seconds increment of the RTC after the write is aligned with the next | 
|---|
| 522 | * seconds increment of clock REALTIME. | 
|---|
| 523 | * | 
|---|
| 524 | * tsched     t1 write(t2.tv_sec - 1sec))	t2 RTC increments seconds | 
|---|
| 525 | * | 
|---|
| 526 | * t2.tv_nsec == 0 | 
|---|
| 527 | * tsched = t2 - set_offset_nsec | 
|---|
| 528 | * newval = t2 - NSEC_PER_SEC | 
|---|
| 529 | * | 
|---|
| 530 | * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC | 
|---|
| 531 | * | 
|---|
| 532 | * As the execution of this code is not guaranteed to happen exactly at | 
|---|
| 533 | * tsched this allows it to happen within a fuzzy region: | 
|---|
| 534 | * | 
|---|
| 535 | *	abs(now - tsched) < FUZZ | 
|---|
| 536 | * | 
|---|
| 537 | * If @now is not inside the allowed window the function returns false. | 
|---|
| 538 | */ | 
|---|
| 539 | static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec, | 
|---|
| 540 | struct timespec64 *to_set, | 
|---|
| 541 | const struct timespec64 *now) | 
|---|
| 542 | { | 
|---|
| 543 | /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ | 
|---|
| 544 | const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; | 
|---|
| 545 | struct timespec64 delay = {.tv_sec = -1, | 
|---|
| 546 | .tv_nsec = set_offset_nsec}; | 
|---|
| 547 |  | 
|---|
| 548 | *to_set = timespec64_add(lhs: *now, rhs: delay); | 
|---|
| 549 |  | 
|---|
| 550 | if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { | 
|---|
| 551 | to_set->tv_nsec = 0; | 
|---|
| 552 | return true; | 
|---|
| 553 | } | 
|---|
| 554 |  | 
|---|
| 555 | if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { | 
|---|
| 556 | to_set->tv_sec++; | 
|---|
| 557 | to_set->tv_nsec = 0; | 
|---|
| 558 | return true; | 
|---|
| 559 | } | 
|---|
| 560 | return false; | 
|---|
| 561 | } | 
|---|
| 562 |  | 
|---|
| 563 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 
|---|
| 564 | int __weak update_persistent_clock64(struct timespec64 now64) | 
|---|
| 565 | { | 
|---|
| 566 | return -ENODEV; | 
|---|
| 567 | } | 
|---|
| 568 | #else | 
|---|
| 569 | static inline int update_persistent_clock64(struct timespec64 now64) | 
|---|
| 570 | { | 
|---|
| 571 | return -ENODEV; | 
|---|
| 572 | } | 
|---|
| 573 | #endif | 
|---|
| 574 |  | 
|---|
| 575 | #ifdef CONFIG_RTC_SYSTOHC | 
|---|
| 576 | /* Save NTP synchronized time to the RTC */ | 
|---|
| 577 | static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) | 
|---|
| 578 | { | 
|---|
| 579 | struct rtc_device *rtc; | 
|---|
| 580 | struct rtc_time tm; | 
|---|
| 581 | int err = -ENODEV; | 
|---|
| 582 |  | 
|---|
| 583 | rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); | 
|---|
| 584 | if (!rtc) | 
|---|
| 585 | return -ENODEV; | 
|---|
| 586 |  | 
|---|
| 587 | if (!rtc->ops || !rtc->ops->set_time) | 
|---|
| 588 | goto out_close; | 
|---|
| 589 |  | 
|---|
| 590 | /* First call might not have the correct offset */ | 
|---|
| 591 | if (*offset_nsec == rtc->set_offset_nsec) { | 
|---|
| 592 | rtc_time64_to_tm(time: to_set->tv_sec, tm: &tm); | 
|---|
| 593 | err = rtc_set_time(rtc, tm: &tm); | 
|---|
| 594 | } else { | 
|---|
| 595 | /* Store the update offset and let the caller try again */ | 
|---|
| 596 | *offset_nsec = rtc->set_offset_nsec; | 
|---|
| 597 | err = -EAGAIN; | 
|---|
| 598 | } | 
|---|
| 599 | out_close: | 
|---|
| 600 | rtc_class_close(rtc); | 
|---|
| 601 | return err; | 
|---|
| 602 | } | 
|---|
| 603 | #else | 
|---|
| 604 | static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) | 
|---|
| 605 | { | 
|---|
| 606 | return -ENODEV; | 
|---|
| 607 | } | 
|---|
| 608 | #endif | 
|---|
| 609 |  | 
|---|
| 610 | /** | 
|---|
| 611 | * ntp_synced - Tells whether the NTP status is not UNSYNC | 
|---|
| 612 | * Returns:	true if not UNSYNC, false otherwise | 
|---|
| 613 | */ | 
|---|
| 614 | static inline bool ntp_synced(void) | 
|---|
| 615 | { | 
|---|
| 616 | return !(tk_ntp_data[TIMEKEEPER_CORE].time_status & STA_UNSYNC); | 
|---|
| 617 | } | 
|---|
| 618 |  | 
|---|
| 619 | /* | 
|---|
| 620 | * If we have an externally synchronized Linux clock, then update RTC clock | 
|---|
| 621 | * accordingly every ~11 minutes. Generally RTCs can only store second | 
|---|
| 622 | * precision, but many RTCs will adjust the phase of their second tick to | 
|---|
| 623 | * match the moment of update. This infrastructure arranges to call to the RTC | 
|---|
| 624 | * set at the correct moment to phase synchronize the RTC second tick over | 
|---|
| 625 | * with the kernel clock. | 
|---|
| 626 | */ | 
|---|
| 627 | static void sync_hw_clock(struct work_struct *work) | 
|---|
| 628 | { | 
|---|
| 629 | /* | 
|---|
| 630 | * The default synchronization offset is 500ms for the deprecated | 
|---|
| 631 | * update_persistent_clock64() under the assumption that it uses | 
|---|
| 632 | * the infamous CMOS clock (MC146818). | 
|---|
| 633 | */ | 
|---|
| 634 | static unsigned long offset_nsec = NSEC_PER_SEC / 2; | 
|---|
| 635 | struct timespec64 now, to_set; | 
|---|
| 636 | int res = -EAGAIN; | 
|---|
| 637 |  | 
|---|
| 638 | /* | 
|---|
| 639 | * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer() | 
|---|
| 640 | * managed to schedule the work between the timer firing and the | 
|---|
| 641 | * work being able to rearm the timer. Wait for the timer to expire. | 
|---|
| 642 | */ | 
|---|
| 643 | if (!ntp_synced() || hrtimer_is_queued(timer: &sync_hrtimer)) | 
|---|
| 644 | return; | 
|---|
| 645 |  | 
|---|
| 646 | ktime_get_real_ts64(tv: &now); | 
|---|
| 647 | /* If @now is not in the allowed window, try again */ | 
|---|
| 648 | if (!rtc_tv_nsec_ok(set_offset_nsec: offset_nsec, to_set: &to_set, now: &now)) | 
|---|
| 649 | goto rearm; | 
|---|
| 650 |  | 
|---|
| 651 | /* Take timezone adjusted RTCs into account */ | 
|---|
| 652 | if (persistent_clock_is_local) | 
|---|
| 653 | to_set.tv_sec -= (sys_tz.tz_minuteswest * 60); | 
|---|
| 654 |  | 
|---|
| 655 | /* Try the legacy RTC first. */ | 
|---|
| 656 | res = update_persistent_clock64(now64: to_set); | 
|---|
| 657 | if (res != -ENODEV) | 
|---|
| 658 | goto rearm; | 
|---|
| 659 |  | 
|---|
| 660 | /* Try the RTC class */ | 
|---|
| 661 | res = update_rtc(to_set: &to_set, offset_nsec: &offset_nsec); | 
|---|
| 662 | if (res == -ENODEV) | 
|---|
| 663 | return; | 
|---|
| 664 | rearm: | 
|---|
| 665 | sched_sync_hw_clock(offset_nsec, retry: res != 0); | 
|---|
| 666 | } | 
|---|
| 667 |  | 
|---|
| 668 | void ntp_notify_cmos_timer(bool offset_set) | 
|---|
| 669 | { | 
|---|
| 670 | /* | 
|---|
| 671 | * If the time jumped (using ADJ_SETOFFSET) cancels sync timer, | 
|---|
| 672 | * which may have been running if the time was synchronized | 
|---|
| 673 | * prior to the ADJ_SETOFFSET call. | 
|---|
| 674 | */ | 
|---|
| 675 | if (offset_set) | 
|---|
| 676 | hrtimer_cancel(timer: &sync_hrtimer); | 
|---|
| 677 |  | 
|---|
| 678 | /* | 
|---|
| 679 | * When the work is currently executed but has not yet the timer | 
|---|
| 680 | * rearmed this queues the work immediately again. No big issue, | 
|---|
| 681 | * just a pointless work scheduled. | 
|---|
| 682 | */ | 
|---|
| 683 | if (ntp_synced() && !hrtimer_is_queued(timer: &sync_hrtimer)) | 
|---|
| 684 | queue_work(wq: system_freezable_power_efficient_wq, work: &sync_work); | 
|---|
| 685 | } | 
|---|
| 686 |  | 
|---|
| 687 | static void __init ntp_init_cmos_sync(void) | 
|---|
| 688 | { | 
|---|
| 689 | hrtimer_setup(timer: &sync_hrtimer, function: sync_timer_callback, CLOCK_REALTIME, mode: HRTIMER_MODE_ABS); | 
|---|
| 690 | } | 
|---|
| 691 | #else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ | 
|---|
| 692 | static inline void __init ntp_init_cmos_sync(void) { } | 
|---|
| 693 | #endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ | 
|---|
| 694 |  | 
|---|
| 695 | /* | 
|---|
| 696 | * Propagate a new txc->status value into the NTP state: | 
|---|
| 697 | */ | 
|---|
| 698 | static inline void process_adj_status(struct ntp_data *ntpdata, const struct __kernel_timex *txc) | 
|---|
| 699 | { | 
|---|
| 700 | if ((ntpdata->time_status & STA_PLL) && !(txc->status & STA_PLL)) { | 
|---|
| 701 | ntpdata->time_state = TIME_OK; | 
|---|
| 702 | ntpdata->time_status = STA_UNSYNC; | 
|---|
| 703 | ntpdata->ntp_next_leap_sec = TIME64_MAX; | 
|---|
| 704 | /* Restart PPS frequency calibration */ | 
|---|
| 705 | pps_reset_freq_interval(ntpdata); | 
|---|
| 706 | } | 
|---|
| 707 |  | 
|---|
| 708 | /* | 
|---|
| 709 | * If we turn on PLL adjustments then reset the | 
|---|
| 710 | * reference time to current time. | 
|---|
| 711 | */ | 
|---|
| 712 | if (!(ntpdata->time_status & STA_PLL) && (txc->status & STA_PLL)) | 
|---|
| 713 | ntpdata->time_reftime = ktime_get_ntp_seconds(id: ntpdata - tk_ntp_data); | 
|---|
| 714 |  | 
|---|
| 715 | /* only set allowed bits */ | 
|---|
| 716 | ntpdata->time_status &= STA_RONLY; | 
|---|
| 717 | ntpdata->time_status |= txc->status & ~STA_RONLY; | 
|---|
| 718 | } | 
|---|
| 719 |  | 
|---|
| 720 | static inline void process_adjtimex_modes(struct ntp_data *ntpdata, const struct __kernel_timex *txc, | 
|---|
| 721 | s32 *time_tai) | 
|---|
| 722 | { | 
|---|
| 723 | if (txc->modes & ADJ_STATUS) | 
|---|
| 724 | process_adj_status(ntpdata, txc); | 
|---|
| 725 |  | 
|---|
| 726 | if (txc->modes & ADJ_NANO) | 
|---|
| 727 | ntpdata->time_status |= STA_NANO; | 
|---|
| 728 |  | 
|---|
| 729 | if (txc->modes & ADJ_MICRO) | 
|---|
| 730 | ntpdata->time_status &= ~STA_NANO; | 
|---|
| 731 |  | 
|---|
| 732 | if (txc->modes & ADJ_FREQUENCY) { | 
|---|
| 733 | ntpdata->time_freq = txc->freq * PPM_SCALE; | 
|---|
| 734 | ntpdata->time_freq = min(ntpdata->time_freq, MAXFREQ_SCALED); | 
|---|
| 735 | ntpdata->time_freq = max(ntpdata->time_freq, -MAXFREQ_SCALED); | 
|---|
| 736 | /* Update pps_freq */ | 
|---|
| 737 | pps_set_freq(ntpdata); | 
|---|
| 738 | } | 
|---|
| 739 |  | 
|---|
| 740 | if (txc->modes & ADJ_MAXERROR) | 
|---|
| 741 | ntpdata->time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); | 
|---|
| 742 |  | 
|---|
| 743 | if (txc->modes & ADJ_ESTERROR) | 
|---|
| 744 | ntpdata->time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); | 
|---|
| 745 |  | 
|---|
| 746 | if (txc->modes & ADJ_TIMECONST) { | 
|---|
| 747 | ntpdata->time_constant = clamp(txc->constant, 0, MAXTC); | 
|---|
| 748 | if (!(ntpdata->time_status & STA_NANO)) | 
|---|
| 749 | ntpdata->time_constant += 4; | 
|---|
| 750 | ntpdata->time_constant = clamp(ntpdata->time_constant, 0, MAXTC); | 
|---|
| 751 | } | 
|---|
| 752 |  | 
|---|
| 753 | if (txc->modes & ADJ_TAI && txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET) | 
|---|
| 754 | *time_tai = txc->constant; | 
|---|
| 755 |  | 
|---|
| 756 | if (txc->modes & ADJ_OFFSET) | 
|---|
| 757 | ntp_update_offset(ntpdata, offset: txc->offset); | 
|---|
| 758 |  | 
|---|
| 759 | if (txc->modes & ADJ_TICK) | 
|---|
| 760 | ntpdata->tick_usec = txc->tick; | 
|---|
| 761 |  | 
|---|
| 762 | if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) | 
|---|
| 763 | ntp_update_frequency(ntpdata); | 
|---|
| 764 | } | 
|---|
| 765 |  | 
|---|
| 766 | /* | 
|---|
| 767 | * adjtimex() mainly allows reading (and writing, if superuser) of | 
|---|
| 768 | * kernel time-keeping variables. used by xntpd. | 
|---|
| 769 | */ | 
|---|
| 770 | int ntp_adjtimex(unsigned int tkid, struct __kernel_timex *txc, const struct timespec64 *ts, | 
|---|
| 771 | s32 *time_tai, struct audit_ntp_data *ad) | 
|---|
| 772 | { | 
|---|
| 773 | struct ntp_data *ntpdata = &tk_ntp_data[tkid]; | 
|---|
| 774 | int result; | 
|---|
| 775 |  | 
|---|
| 776 | if (txc->modes & ADJ_ADJTIME) { | 
|---|
| 777 | long save_adjust = ntpdata->time_adjust; | 
|---|
| 778 |  | 
|---|
| 779 | if (!(txc->modes & ADJ_OFFSET_READONLY)) { | 
|---|
| 780 | /* adjtime() is independent from ntp_adjtime() */ | 
|---|
| 781 | ntpdata->time_adjust = txc->offset; | 
|---|
| 782 | ntp_update_frequency(ntpdata); | 
|---|
| 783 |  | 
|---|
| 784 | audit_ntp_set_old(ad, type: AUDIT_NTP_ADJUST,	val: save_adjust); | 
|---|
| 785 | audit_ntp_set_new(ad, type: AUDIT_NTP_ADJUST,	val: ntpdata->time_adjust); | 
|---|
| 786 | } | 
|---|
| 787 | txc->offset = save_adjust; | 
|---|
| 788 | } else { | 
|---|
| 789 | /* If there are input parameters, then process them: */ | 
|---|
| 790 | if (txc->modes) { | 
|---|
| 791 | audit_ntp_set_old(ad, type: AUDIT_NTP_OFFSET,	val: ntpdata->time_offset); | 
|---|
| 792 | audit_ntp_set_old(ad, type: AUDIT_NTP_FREQ,	val: ntpdata->time_freq); | 
|---|
| 793 | audit_ntp_set_old(ad, type: AUDIT_NTP_STATUS,	val: ntpdata->time_status); | 
|---|
| 794 | audit_ntp_set_old(ad, type: AUDIT_NTP_TAI,	val: *time_tai); | 
|---|
| 795 | audit_ntp_set_old(ad, type: AUDIT_NTP_TICK,	val: ntpdata->tick_usec); | 
|---|
| 796 |  | 
|---|
| 797 | process_adjtimex_modes(ntpdata, txc, time_tai); | 
|---|
| 798 |  | 
|---|
| 799 | audit_ntp_set_new(ad, type: AUDIT_NTP_OFFSET,	val: ntpdata->time_offset); | 
|---|
| 800 | audit_ntp_set_new(ad, type: AUDIT_NTP_FREQ,	val: ntpdata->time_freq); | 
|---|
| 801 | audit_ntp_set_new(ad, type: AUDIT_NTP_STATUS,	val: ntpdata->time_status); | 
|---|
| 802 | audit_ntp_set_new(ad, type: AUDIT_NTP_TAI,	val: *time_tai); | 
|---|
| 803 | audit_ntp_set_new(ad, type: AUDIT_NTP_TICK,	val: ntpdata->tick_usec); | 
|---|
| 804 | } | 
|---|
| 805 |  | 
|---|
| 806 | txc->offset = shift_right(ntpdata->time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); | 
|---|
| 807 | if (!(ntpdata->time_status & STA_NANO)) | 
|---|
| 808 | txc->offset = div_s64(dividend: txc->offset, NSEC_PER_USEC); | 
|---|
| 809 | } | 
|---|
| 810 |  | 
|---|
| 811 | result = ntpdata->time_state; | 
|---|
| 812 | if (is_error_status(status: ntpdata->time_status)) | 
|---|
| 813 | result = TIME_ERROR; | 
|---|
| 814 |  | 
|---|
| 815 | txc->freq	   = shift_right((ntpdata->time_freq >> PPM_SCALE_INV_SHIFT) * | 
|---|
| 816 | PPM_SCALE_INV, NTP_SCALE_SHIFT); | 
|---|
| 817 | txc->maxerror	   = ntpdata->time_maxerror; | 
|---|
| 818 | txc->esterror	   = ntpdata->time_esterror; | 
|---|
| 819 | txc->status	   = ntpdata->time_status; | 
|---|
| 820 | txc->constant	   = ntpdata->time_constant; | 
|---|
| 821 | txc->precision	   = 1; | 
|---|
| 822 | txc->tolerance	   = MAXFREQ_SCALED / PPM_SCALE; | 
|---|
| 823 | txc->tick	   = ntpdata->tick_usec; | 
|---|
| 824 | txc->tai	   = *time_tai; | 
|---|
| 825 |  | 
|---|
| 826 | /* Fill PPS status fields */ | 
|---|
| 827 | pps_fill_timex(ntpdata, txc); | 
|---|
| 828 |  | 
|---|
| 829 | txc->time.tv_sec = ts->tv_sec; | 
|---|
| 830 | txc->time.tv_usec = ts->tv_nsec; | 
|---|
| 831 | if (!(ntpdata->time_status & STA_NANO)) | 
|---|
| 832 | txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC; | 
|---|
| 833 |  | 
|---|
| 834 | /* Handle leapsec adjustments */ | 
|---|
| 835 | if (unlikely(ts->tv_sec >= ntpdata->ntp_next_leap_sec)) { | 
|---|
| 836 | if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) { | 
|---|
| 837 | result = TIME_OOP; | 
|---|
| 838 | txc->tai++; | 
|---|
| 839 | txc->time.tv_sec--; | 
|---|
| 840 | } | 
|---|
| 841 | if ((ntpdata->time_state == TIME_DEL) && (ntpdata->time_status & STA_DEL)) { | 
|---|
| 842 | result = TIME_WAIT; | 
|---|
| 843 | txc->tai--; | 
|---|
| 844 | txc->time.tv_sec++; | 
|---|
| 845 | } | 
|---|
| 846 | if ((ntpdata->time_state == TIME_OOP) && (ts->tv_sec == ntpdata->ntp_next_leap_sec)) | 
|---|
| 847 | result = TIME_WAIT; | 
|---|
| 848 | } | 
|---|
| 849 |  | 
|---|
| 850 | return result; | 
|---|
| 851 | } | 
|---|
| 852 |  | 
|---|
| 853 | #ifdef	CONFIG_NTP_PPS | 
|---|
| 854 |  | 
|---|
| 855 | /* | 
|---|
| 856 | * struct pps_normtime is basically a struct timespec, but it is | 
|---|
| 857 | * semantically different (and it is the reason why it was invented): | 
|---|
| 858 | * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] | 
|---|
| 859 | * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) | 
|---|
| 860 | */ | 
|---|
| 861 | struct pps_normtime { | 
|---|
| 862 | s64		sec;	/* seconds */ | 
|---|
| 863 | long		nsec;	/* nanoseconds */ | 
|---|
| 864 | }; | 
|---|
| 865 |  | 
|---|
| 866 | /* | 
|---|
| 867 | * Normalize the timestamp so that nsec is in the | 
|---|
| 868 | * [ -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval | 
|---|
| 869 | */ | 
|---|
| 870 | static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) | 
|---|
| 871 | { | 
|---|
| 872 | struct pps_normtime norm = { | 
|---|
| 873 | .sec = ts.tv_sec, | 
|---|
| 874 | .nsec = ts.tv_nsec | 
|---|
| 875 | }; | 
|---|
| 876 |  | 
|---|
| 877 | if (norm.nsec > (NSEC_PER_SEC >> 1)) { | 
|---|
| 878 | norm.nsec -= NSEC_PER_SEC; | 
|---|
| 879 | norm.sec++; | 
|---|
| 880 | } | 
|---|
| 881 |  | 
|---|
| 882 | return norm; | 
|---|
| 883 | } | 
|---|
| 884 |  | 
|---|
| 885 | /* Get current phase correction and jitter */ | 
|---|
| 886 | static inline long pps_phase_filter_get(struct ntp_data *ntpdata, long *jitter) | 
|---|
| 887 | { | 
|---|
| 888 | *jitter = ntpdata->pps_tf[0] - ntpdata->pps_tf[1]; | 
|---|
| 889 | if (*jitter < 0) | 
|---|
| 890 | *jitter = -*jitter; | 
|---|
| 891 |  | 
|---|
| 892 | /* TODO: test various filters */ | 
|---|
| 893 | return ntpdata->pps_tf[0]; | 
|---|
| 894 | } | 
|---|
| 895 |  | 
|---|
| 896 | /* Add the sample to the phase filter */ | 
|---|
| 897 | static inline void pps_phase_filter_add(struct ntp_data *ntpdata, long err) | 
|---|
| 898 | { | 
|---|
| 899 | ntpdata->pps_tf[2] = ntpdata->pps_tf[1]; | 
|---|
| 900 | ntpdata->pps_tf[1] = ntpdata->pps_tf[0]; | 
|---|
| 901 | ntpdata->pps_tf[0] = err; | 
|---|
| 902 | } | 
|---|
| 903 |  | 
|---|
| 904 | /* | 
|---|
| 905 | * Decrease frequency calibration interval length. It is halved after four | 
|---|
| 906 | * consecutive unstable intervals. | 
|---|
| 907 | */ | 
|---|
| 908 | static inline void pps_dec_freq_interval(struct ntp_data *ntpdata) | 
|---|
| 909 | { | 
|---|
| 910 | if (--ntpdata->pps_intcnt <= -PPS_INTCOUNT) { | 
|---|
| 911 | ntpdata->pps_intcnt = -PPS_INTCOUNT; | 
|---|
| 912 | if (ntpdata->pps_shift > PPS_INTMIN) { | 
|---|
| 913 | ntpdata->pps_shift--; | 
|---|
| 914 | ntpdata->pps_intcnt = 0; | 
|---|
| 915 | } | 
|---|
| 916 | } | 
|---|
| 917 | } | 
|---|
| 918 |  | 
|---|
| 919 | /* | 
|---|
| 920 | * Increase frequency calibration interval length. It is doubled after | 
|---|
| 921 | * four consecutive stable intervals. | 
|---|
| 922 | */ | 
|---|
| 923 | static inline void pps_inc_freq_interval(struct ntp_data *ntpdata) | 
|---|
| 924 | { | 
|---|
| 925 | if (++ntpdata->pps_intcnt >= PPS_INTCOUNT) { | 
|---|
| 926 | ntpdata->pps_intcnt = PPS_INTCOUNT; | 
|---|
| 927 | if (ntpdata->pps_shift < PPS_INTMAX) { | 
|---|
| 928 | ntpdata->pps_shift++; | 
|---|
| 929 | ntpdata->pps_intcnt = 0; | 
|---|
| 930 | } | 
|---|
| 931 | } | 
|---|
| 932 | } | 
|---|
| 933 |  | 
|---|
| 934 | /* | 
|---|
| 935 | * Update clock frequency based on MONOTONIC_RAW clock PPS signal | 
|---|
| 936 | * timestamps | 
|---|
| 937 | * | 
|---|
| 938 | * At the end of the calibration interval the difference between the | 
|---|
| 939 | * first and last MONOTONIC_RAW clock timestamps divided by the length | 
|---|
| 940 | * of the interval becomes the frequency update. If the interval was | 
|---|
| 941 | * too long, the data are discarded. | 
|---|
| 942 | * Returns the difference between old and new frequency values. | 
|---|
| 943 | */ | 
|---|
| 944 | static long hardpps_update_freq(struct ntp_data *ntpdata, struct pps_normtime freq_norm) | 
|---|
| 945 | { | 
|---|
| 946 | long delta, delta_mod; | 
|---|
| 947 | s64 ftemp; | 
|---|
| 948 |  | 
|---|
| 949 | /* Check if the frequency interval was too long */ | 
|---|
| 950 | if (freq_norm.sec > (2 << ntpdata->pps_shift)) { | 
|---|
| 951 | ntpdata->time_status |= STA_PPSERROR; | 
|---|
| 952 | ntpdata->pps_errcnt++; | 
|---|
| 953 | pps_dec_freq_interval(ntpdata); | 
|---|
| 954 | printk_deferred(KERN_ERR "hardpps: PPSERROR: interval too long - %lld s\n", | 
|---|
| 955 | freq_norm.sec); | 
|---|
| 956 | return 0; | 
|---|
| 957 | } | 
|---|
| 958 |  | 
|---|
| 959 | /* | 
|---|
| 960 | * Here the raw frequency offset and wander (stability) is | 
|---|
| 961 | * calculated. If the wander is less than the wander threshold the | 
|---|
| 962 | * interval is increased; otherwise it is decreased. | 
|---|
| 963 | */ | 
|---|
| 964 | ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT, | 
|---|
| 965 | freq_norm.sec); | 
|---|
| 966 | delta = shift_right(ftemp - ntpdata->pps_freq, NTP_SCALE_SHIFT); | 
|---|
| 967 | ntpdata->pps_freq = ftemp; | 
|---|
| 968 | if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { | 
|---|
| 969 | printk_deferred(KERN_WARNING "hardpps: PPSWANDER: change=%ld\n", delta); | 
|---|
| 970 | ntpdata->time_status |= STA_PPSWANDER; | 
|---|
| 971 | ntpdata->pps_stbcnt++; | 
|---|
| 972 | pps_dec_freq_interval(ntpdata); | 
|---|
| 973 | } else { | 
|---|
| 974 | /* Good sample */ | 
|---|
| 975 | pps_inc_freq_interval(ntpdata); | 
|---|
| 976 | } | 
|---|
| 977 |  | 
|---|
| 978 | /* | 
|---|
| 979 | * The stability metric is calculated as the average of recent | 
|---|
| 980 | * frequency changes, but is used only for performance monitoring | 
|---|
| 981 | */ | 
|---|
| 982 | delta_mod = delta; | 
|---|
| 983 | if (delta_mod < 0) | 
|---|
| 984 | delta_mod = -delta_mod; | 
|---|
| 985 | ntpdata->pps_stabil += (div_s64(((s64)delta_mod) << (NTP_SCALE_SHIFT - SHIFT_USEC), | 
|---|
| 986 | NSEC_PER_USEC) - ntpdata->pps_stabil) >> PPS_INTMIN; | 
|---|
| 987 |  | 
|---|
| 988 | /* If enabled, the system clock frequency is updated */ | 
|---|
| 989 | if ((ntpdata->time_status & STA_PPSFREQ) && !(ntpdata->time_status & STA_FREQHOLD)) { | 
|---|
| 990 | ntpdata->time_freq = ntpdata->pps_freq; | 
|---|
| 991 | ntp_update_frequency(ntpdata); | 
|---|
| 992 | } | 
|---|
| 993 |  | 
|---|
| 994 | return delta; | 
|---|
| 995 | } | 
|---|
| 996 |  | 
|---|
| 997 | /* Correct REALTIME clock phase error against PPS signal */ | 
|---|
| 998 | static void hardpps_update_phase(struct ntp_data *ntpdata, long error) | 
|---|
| 999 | { | 
|---|
| 1000 | long correction = -error; | 
|---|
| 1001 | long jitter; | 
|---|
| 1002 |  | 
|---|
| 1003 | /* Add the sample to the median filter */ | 
|---|
| 1004 | pps_phase_filter_add(ntpdata, correction); | 
|---|
| 1005 | correction = pps_phase_filter_get(ntpdata, &jitter); | 
|---|
| 1006 |  | 
|---|
| 1007 | /* | 
|---|
| 1008 | * Nominal jitter is due to PPS signal noise. If it exceeds the | 
|---|
| 1009 | * threshold, the sample is discarded; otherwise, if so enabled, | 
|---|
| 1010 | * the time offset is updated. | 
|---|
| 1011 | */ | 
|---|
| 1012 | if (jitter > (ntpdata->pps_jitter << PPS_POPCORN)) { | 
|---|
| 1013 | printk_deferred(KERN_WARNING "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n", | 
|---|
| 1014 | jitter, (ntpdata->pps_jitter << PPS_POPCORN)); | 
|---|
| 1015 | ntpdata->time_status |= STA_PPSJITTER; | 
|---|
| 1016 | ntpdata->pps_jitcnt++; | 
|---|
| 1017 | } else if (ntpdata->time_status & STA_PPSTIME) { | 
|---|
| 1018 | /* Correct the time using the phase offset */ | 
|---|
| 1019 | ntpdata->time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT, | 
|---|
| 1020 | NTP_INTERVAL_FREQ); | 
|---|
| 1021 | /* Cancel running adjtime() */ | 
|---|
| 1022 | ntpdata->time_adjust = 0; | 
|---|
| 1023 | } | 
|---|
| 1024 | /* Update jitter */ | 
|---|
| 1025 | ntpdata->pps_jitter += (jitter - ntpdata->pps_jitter) >> PPS_INTMIN; | 
|---|
| 1026 | } | 
|---|
| 1027 |  | 
|---|
| 1028 | /* | 
|---|
| 1029 | * __hardpps() - discipline CPU clock oscillator to external PPS signal | 
|---|
| 1030 | * | 
|---|
| 1031 | * This routine is called at each PPS signal arrival in order to | 
|---|
| 1032 | * discipline the CPU clock oscillator to the PPS signal. It takes two | 
|---|
| 1033 | * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former | 
|---|
| 1034 | * is used to correct clock phase error and the latter is used to | 
|---|
| 1035 | * correct the frequency. | 
|---|
| 1036 | * | 
|---|
| 1037 | * This code is based on David Mills's reference nanokernel | 
|---|
| 1038 | * implementation. It was mostly rewritten but keeps the same idea. | 
|---|
| 1039 | */ | 
|---|
| 1040 | void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) | 
|---|
| 1041 | { | 
|---|
| 1042 | struct ntp_data *ntpdata = &tk_ntp_data[TIMEKEEPER_CORE]; | 
|---|
| 1043 | struct pps_normtime pts_norm, freq_norm; | 
|---|
| 1044 |  | 
|---|
| 1045 | pts_norm = pps_normalize_ts(*phase_ts); | 
|---|
| 1046 |  | 
|---|
| 1047 | /* Clear the error bits, they will be set again if needed */ | 
|---|
| 1048 | ntpdata->time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); | 
|---|
| 1049 |  | 
|---|
| 1050 | /* indicate signal presence */ | 
|---|
| 1051 | ntpdata->time_status |= STA_PPSSIGNAL; | 
|---|
| 1052 | ntpdata->pps_valid = PPS_VALID; | 
|---|
| 1053 |  | 
|---|
| 1054 | /* | 
|---|
| 1055 | * When called for the first time, just start the frequency | 
|---|
| 1056 | * interval | 
|---|
| 1057 | */ | 
|---|
| 1058 | if (unlikely(ntpdata->pps_fbase.tv_sec == 0)) { | 
|---|
| 1059 | ntpdata->pps_fbase = *raw_ts; | 
|---|
| 1060 | return; | 
|---|
| 1061 | } | 
|---|
| 1062 |  | 
|---|
| 1063 | /* Ok, now we have a base for frequency calculation */ | 
|---|
| 1064 | freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, ntpdata->pps_fbase)); | 
|---|
| 1065 |  | 
|---|
| 1066 | /* | 
|---|
| 1067 | * Check that the signal is in the range | 
|---|
| 1068 | * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it | 
|---|
| 1069 | */ | 
|---|
| 1070 | if ((freq_norm.sec == 0) || (freq_norm.nsec > MAXFREQ * freq_norm.sec) || | 
|---|
| 1071 | (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) { | 
|---|
| 1072 | ntpdata->time_status |= STA_PPSJITTER; | 
|---|
| 1073 | /* Restart the frequency calibration interval */ | 
|---|
| 1074 | ntpdata->pps_fbase = *raw_ts; | 
|---|
| 1075 | printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n"); | 
|---|
| 1076 | return; | 
|---|
| 1077 | } | 
|---|
| 1078 |  | 
|---|
| 1079 | /* Signal is ok. Check if the current frequency interval is finished */ | 
|---|
| 1080 | if (freq_norm.sec >= (1 << ntpdata->pps_shift)) { | 
|---|
| 1081 | ntpdata->pps_calcnt++; | 
|---|
| 1082 | /* Restart the frequency calibration interval */ | 
|---|
| 1083 | ntpdata->pps_fbase = *raw_ts; | 
|---|
| 1084 | hardpps_update_freq(ntpdata, freq_norm); | 
|---|
| 1085 | } | 
|---|
| 1086 |  | 
|---|
| 1087 | hardpps_update_phase(ntpdata, pts_norm.nsec); | 
|---|
| 1088 |  | 
|---|
| 1089 | } | 
|---|
| 1090 | #endif	/* CONFIG_NTP_PPS */ | 
|---|
| 1091 |  | 
|---|
| 1092 | static int __init ntp_tick_adj_setup(char *str) | 
|---|
| 1093 | { | 
|---|
| 1094 | int rc = kstrtos64(s: str, base: 0, res: &tk_ntp_data[TIMEKEEPER_CORE].ntp_tick_adj); | 
|---|
| 1095 | if (rc) | 
|---|
| 1096 | return rc; | 
|---|
| 1097 |  | 
|---|
| 1098 | tk_ntp_data[TIMEKEEPER_CORE].ntp_tick_adj <<= NTP_SCALE_SHIFT; | 
|---|
| 1099 | return 1; | 
|---|
| 1100 | } | 
|---|
| 1101 | __setup( "ntp_tick_adj=", ntp_tick_adj_setup); | 
|---|
| 1102 |  | 
|---|
| 1103 | void __init ntp_init(void) | 
|---|
| 1104 | { | 
|---|
| 1105 | for (int id = 0; id < TIMEKEEPERS_MAX; id++) | 
|---|
| 1106 | __ntp_clear(ntpdata: tk_ntp_data + id); | 
|---|
| 1107 | ntp_init_cmos_sync(); | 
|---|
| 1108 | } | 
|---|
| 1109 |  | 
|---|