| 1 | // SPDX-License-Identifier: GPL-2.0+ | 
|---|
| 2 | /* | 
|---|
| 3 | * 2002-10-15  Posix Clocks & timers | 
|---|
| 4 | *                           by George Anzinger george@mvista.com | 
|---|
| 5 | *			     Copyright (C) 2002 2003 by MontaVista Software. | 
|---|
| 6 | * | 
|---|
| 7 | * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. | 
|---|
| 8 | *			     Copyright (C) 2004 Boris Hu | 
|---|
| 9 | * | 
|---|
| 10 | * These are all the functions necessary to implement POSIX clocks & timers | 
|---|
| 11 | */ | 
|---|
| 12 | #include <linux/compat.h> | 
|---|
| 13 | #include <linux/compiler.h> | 
|---|
| 14 | #include <linux/init.h> | 
|---|
| 15 | #include <linux/jhash.h> | 
|---|
| 16 | #include <linux/interrupt.h> | 
|---|
| 17 | #include <linux/list.h> | 
|---|
| 18 | #include <linux/memblock.h> | 
|---|
| 19 | #include <linux/nospec.h> | 
|---|
| 20 | #include <linux/posix-clock.h> | 
|---|
| 21 | #include <linux/posix-timers.h> | 
|---|
| 22 | #include <linux/prctl.h> | 
|---|
| 23 | #include <linux/sched/task.h> | 
|---|
| 24 | #include <linux/slab.h> | 
|---|
| 25 | #include <linux/syscalls.h> | 
|---|
| 26 | #include <linux/time.h> | 
|---|
| 27 | #include <linux/time_namespace.h> | 
|---|
| 28 | #include <linux/uaccess.h> | 
|---|
| 29 |  | 
|---|
| 30 | #include "timekeeping.h" | 
|---|
| 31 | #include "posix-timers.h" | 
|---|
| 32 |  | 
|---|
| 33 | /* | 
|---|
| 34 | * Timers are managed in a hash table for lockless lookup. The hash key is | 
|---|
| 35 | * constructed from current::signal and the timer ID and the timer is | 
|---|
| 36 | * matched against current::signal and the timer ID when walking the hash | 
|---|
| 37 | * bucket list. | 
|---|
| 38 | * | 
|---|
| 39 | * This allows checkpoint/restore to reconstruct the exact timer IDs for | 
|---|
| 40 | * a process. | 
|---|
| 41 | */ | 
|---|
| 42 | struct timer_hash_bucket { | 
|---|
| 43 | spinlock_t		lock; | 
|---|
| 44 | struct hlist_head	head; | 
|---|
| 45 | }; | 
|---|
| 46 |  | 
|---|
| 47 | static struct { | 
|---|
| 48 | struct timer_hash_bucket	*buckets; | 
|---|
| 49 | unsigned long			mask; | 
|---|
| 50 | struct kmem_cache		*cache; | 
|---|
| 51 | } __timer_data __ro_after_init __aligned(4*sizeof(long)); | 
|---|
| 52 |  | 
|---|
| 53 | #define timer_buckets		(__timer_data.buckets) | 
|---|
| 54 | #define timer_hashmask		(__timer_data.mask) | 
|---|
| 55 | #define posix_timers_cache	(__timer_data.cache) | 
|---|
| 56 |  | 
|---|
| 57 | static const struct k_clock * const posix_clocks[]; | 
|---|
| 58 | static const struct k_clock *clockid_to_kclock(const clockid_t id); | 
|---|
| 59 | static const struct k_clock clock_realtime, clock_monotonic; | 
|---|
| 60 |  | 
|---|
| 61 | #define TIMER_ANY_ID		INT_MIN | 
|---|
| 62 |  | 
|---|
| 63 | /* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */ | 
|---|
| 64 | #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ | 
|---|
| 65 | ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) | 
|---|
| 66 | #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" | 
|---|
| 67 | #endif | 
|---|
| 68 |  | 
|---|
| 69 | static struct k_itimer *__lock_timer(timer_t timer_id); | 
|---|
| 70 |  | 
|---|
| 71 | #define lock_timer(tid)							\ | 
|---|
| 72 | ({	struct k_itimer *__timr;					\ | 
|---|
| 73 | __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid));	\ | 
|---|
| 74 | __timr;								\ | 
|---|
| 75 | }) | 
|---|
| 76 |  | 
|---|
| 77 | static inline void unlock_timer(struct k_itimer *timr) | 
|---|
| 78 | { | 
|---|
| 79 | if (likely((timr))) | 
|---|
| 80 | spin_unlock_irq(lock: &timr->it_lock); | 
|---|
| 81 | } | 
|---|
| 82 |  | 
|---|
| 83 | #define scoped_timer_get_or_fail(_id)					\ | 
|---|
| 84 | scoped_cond_guard(lock_timer, return -EINVAL, _id) | 
|---|
| 85 |  | 
|---|
| 86 | #define scoped_timer				(scope) | 
|---|
| 87 |  | 
|---|
| 88 | DEFINE_CLASS(lock_timer, struct k_itimer *, unlock_timer(_T), __lock_timer(id), timer_t id); | 
|---|
| 89 | DEFINE_CLASS_IS_COND_GUARD(lock_timer); | 
|---|
| 90 |  | 
|---|
| 91 | static struct timer_hash_bucket *hash_bucket(struct signal_struct *sig, unsigned int nr) | 
|---|
| 92 | { | 
|---|
| 93 | return &timer_buckets[jhash2(k: (u32 *)&sig, length: sizeof(sig) / sizeof(u32), initval: nr) & timer_hashmask]; | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | static struct k_itimer *posix_timer_by_id(timer_t id) | 
|---|
| 97 | { | 
|---|
| 98 | struct signal_struct *sig = current->signal; | 
|---|
| 99 | struct timer_hash_bucket *bucket = hash_bucket(sig, nr: id); | 
|---|
| 100 | struct k_itimer *timer; | 
|---|
| 101 |  | 
|---|
| 102 | hlist_for_each_entry_rcu(timer, &bucket->head, t_hash) { | 
|---|
| 103 | /* timer->it_signal can be set concurrently */ | 
|---|
| 104 | if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id)) | 
|---|
| 105 | return timer; | 
|---|
| 106 | } | 
|---|
| 107 | return NULL; | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | static inline struct signal_struct *posix_sig_owner(const struct k_itimer *timer) | 
|---|
| 111 | { | 
|---|
| 112 | unsigned long val = (unsigned long)timer->it_signal; | 
|---|
| 113 |  | 
|---|
| 114 | /* | 
|---|
| 115 | * Mask out bit 0, which acts as invalid marker to prevent | 
|---|
| 116 | * posix_timer_by_id() detecting it as valid. | 
|---|
| 117 | */ | 
|---|
| 118 | return (struct signal_struct *)(val & ~1UL); | 
|---|
| 119 | } | 
|---|
| 120 |  | 
|---|
| 121 | static bool posix_timer_hashed(struct timer_hash_bucket *bucket, struct signal_struct *sig, | 
|---|
| 122 | timer_t id) | 
|---|
| 123 | { | 
|---|
| 124 | struct hlist_head *head = &bucket->head; | 
|---|
| 125 | struct k_itimer *timer; | 
|---|
| 126 |  | 
|---|
| 127 | hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&bucket->lock)) { | 
|---|
| 128 | if ((posix_sig_owner(timer) == sig) && (timer->it_id == id)) | 
|---|
| 129 | return true; | 
|---|
| 130 | } | 
|---|
| 131 | return false; | 
|---|
| 132 | } | 
|---|
| 133 |  | 
|---|
| 134 | static bool posix_timer_add_at(struct k_itimer *timer, struct signal_struct *sig, unsigned int id) | 
|---|
| 135 | { | 
|---|
| 136 | struct timer_hash_bucket *bucket = hash_bucket(sig, nr: id); | 
|---|
| 137 |  | 
|---|
| 138 | scoped_guard (spinlock, &bucket->lock) { | 
|---|
| 139 | /* | 
|---|
| 140 | * Validate under the lock as this could have raced against | 
|---|
| 141 | * another thread ending up with the same ID, which is | 
|---|
| 142 | * highly unlikely, but possible. | 
|---|
| 143 | */ | 
|---|
| 144 | if (!posix_timer_hashed(bucket, sig, id)) { | 
|---|
| 145 | /* | 
|---|
| 146 | * Set the timer ID and the signal pointer to make | 
|---|
| 147 | * it identifiable in the hash table. The signal | 
|---|
| 148 | * pointer has bit 0 set to indicate that it is not | 
|---|
| 149 | * yet fully initialized. posix_timer_hashed() | 
|---|
| 150 | * masks this bit out, but the syscall lookup fails | 
|---|
| 151 | * to match due to it being set. This guarantees | 
|---|
| 152 | * that there can't be duplicate timer IDs handed | 
|---|
| 153 | * out. | 
|---|
| 154 | */ | 
|---|
| 155 | timer->it_id = (timer_t)id; | 
|---|
| 156 | timer->it_signal = (struct signal_struct *)((unsigned long)sig | 1UL); | 
|---|
| 157 | hlist_add_head_rcu(n: &timer->t_hash, h: &bucket->head); | 
|---|
| 158 | return true; | 
|---|
| 159 | } | 
|---|
| 160 | } | 
|---|
| 161 | return false; | 
|---|
| 162 | } | 
|---|
| 163 |  | 
|---|
| 164 | static int posix_timer_add(struct k_itimer *timer, int req_id) | 
|---|
| 165 | { | 
|---|
| 166 | struct signal_struct *sig = current->signal; | 
|---|
| 167 |  | 
|---|
| 168 | if (unlikely(req_id != TIMER_ANY_ID)) { | 
|---|
| 169 | if (!posix_timer_add_at(timer, sig, id: req_id)) | 
|---|
| 170 | return -EBUSY; | 
|---|
| 171 |  | 
|---|
| 172 | /* | 
|---|
| 173 | * Move the ID counter past the requested ID, so that after | 
|---|
| 174 | * switching back to normal mode the IDs are outside of the | 
|---|
| 175 | * exact allocated region. That avoids ID collisions on the | 
|---|
| 176 | * next regular timer_create() invocations. | 
|---|
| 177 | */ | 
|---|
| 178 | atomic_set(v: &sig->next_posix_timer_id, i: req_id + 1); | 
|---|
| 179 | return req_id; | 
|---|
| 180 | } | 
|---|
| 181 |  | 
|---|
| 182 | for (unsigned int cnt = 0; cnt <= INT_MAX; cnt++) { | 
|---|
| 183 | /* Get the next timer ID and clamp it to positive space */ | 
|---|
| 184 | unsigned int id = atomic_fetch_inc(v: &sig->next_posix_timer_id) & INT_MAX; | 
|---|
| 185 |  | 
|---|
| 186 | if (posix_timer_add_at(timer, sig, id)) | 
|---|
| 187 | return id; | 
|---|
| 188 | cond_resched(); | 
|---|
| 189 | } | 
|---|
| 190 | /* POSIX return code when no timer ID could be allocated */ | 
|---|
| 191 | return -EAGAIN; | 
|---|
| 192 | } | 
|---|
| 193 |  | 
|---|
| 194 | static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 195 | { | 
|---|
| 196 | ktime_get_real_ts64(tv: tp); | 
|---|
| 197 | return 0; | 
|---|
| 198 | } | 
|---|
| 199 |  | 
|---|
| 200 | static ktime_t posix_get_realtime_ktime(clockid_t which_clock) | 
|---|
| 201 | { | 
|---|
| 202 | return ktime_get_real(); | 
|---|
| 203 | } | 
|---|
| 204 |  | 
|---|
| 205 | static int posix_clock_realtime_set(const clockid_t which_clock, | 
|---|
| 206 | const struct timespec64 *tp) | 
|---|
| 207 | { | 
|---|
| 208 | return do_sys_settimeofday64(tv: tp, NULL); | 
|---|
| 209 | } | 
|---|
| 210 |  | 
|---|
| 211 | static int posix_clock_realtime_adj(const clockid_t which_clock, | 
|---|
| 212 | struct __kernel_timex *t) | 
|---|
| 213 | { | 
|---|
| 214 | return do_adjtimex(t); | 
|---|
| 215 | } | 
|---|
| 216 |  | 
|---|
| 217 | static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 218 | { | 
|---|
| 219 | ktime_get_ts64(ts: tp); | 
|---|
| 220 | timens_add_monotonic(ts: tp); | 
|---|
| 221 | return 0; | 
|---|
| 222 | } | 
|---|
| 223 |  | 
|---|
| 224 | static ktime_t posix_get_monotonic_ktime(clockid_t which_clock) | 
|---|
| 225 | { | 
|---|
| 226 | return ktime_get(); | 
|---|
| 227 | } | 
|---|
| 228 |  | 
|---|
| 229 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 230 | { | 
|---|
| 231 | ktime_get_raw_ts64(ts: tp); | 
|---|
| 232 | timens_add_monotonic(ts: tp); | 
|---|
| 233 | return 0; | 
|---|
| 234 | } | 
|---|
| 235 |  | 
|---|
| 236 | static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 237 | { | 
|---|
| 238 | ktime_get_coarse_real_ts64(ts: tp); | 
|---|
| 239 | return 0; | 
|---|
| 240 | } | 
|---|
| 241 |  | 
|---|
| 242 | static int posix_get_monotonic_coarse(clockid_t which_clock, | 
|---|
| 243 | struct timespec64 *tp) | 
|---|
| 244 | { | 
|---|
| 245 | ktime_get_coarse_ts64(ts: tp); | 
|---|
| 246 | timens_add_monotonic(ts: tp); | 
|---|
| 247 | return 0; | 
|---|
| 248 | } | 
|---|
| 249 |  | 
|---|
| 250 | static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 251 | { | 
|---|
| 252 | *tp = ktime_to_timespec64(KTIME_LOW_RES); | 
|---|
| 253 | return 0; | 
|---|
| 254 | } | 
|---|
| 255 |  | 
|---|
| 256 | static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 257 | { | 
|---|
| 258 | ktime_get_boottime_ts64(ts: tp); | 
|---|
| 259 | timens_add_boottime(ts: tp); | 
|---|
| 260 | return 0; | 
|---|
| 261 | } | 
|---|
| 262 |  | 
|---|
| 263 | static ktime_t posix_get_boottime_ktime(const clockid_t which_clock) | 
|---|
| 264 | { | 
|---|
| 265 | return ktime_get_boottime(); | 
|---|
| 266 | } | 
|---|
| 267 |  | 
|---|
| 268 | static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 269 | { | 
|---|
| 270 | ktime_get_clocktai_ts64(ts: tp); | 
|---|
| 271 | return 0; | 
|---|
| 272 | } | 
|---|
| 273 |  | 
|---|
| 274 | static ktime_t posix_get_tai_ktime(clockid_t which_clock) | 
|---|
| 275 | { | 
|---|
| 276 | return ktime_get_clocktai(); | 
|---|
| 277 | } | 
|---|
| 278 |  | 
|---|
| 279 | static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) | 
|---|
| 280 | { | 
|---|
| 281 | tp->tv_sec = 0; | 
|---|
| 282 | tp->tv_nsec = hrtimer_resolution; | 
|---|
| 283 | return 0; | 
|---|
| 284 | } | 
|---|
| 285 |  | 
|---|
| 286 | /* | 
|---|
| 287 | * The siginfo si_overrun field and the return value of timer_getoverrun(2) | 
|---|
| 288 | * are of type int. Clamp the overrun value to INT_MAX | 
|---|
| 289 | */ | 
|---|
| 290 | static inline int timer_overrun_to_int(struct k_itimer *timr) | 
|---|
| 291 | { | 
|---|
| 292 | if (timr->it_overrun_last > (s64)INT_MAX) | 
|---|
| 293 | return INT_MAX; | 
|---|
| 294 |  | 
|---|
| 295 | return (int)timr->it_overrun_last; | 
|---|
| 296 | } | 
|---|
| 297 |  | 
|---|
| 298 | static void common_hrtimer_rearm(struct k_itimer *timr) | 
|---|
| 299 | { | 
|---|
| 300 | struct hrtimer *timer = &timr->it.real.timer; | 
|---|
| 301 |  | 
|---|
| 302 | timr->it_overrun += hrtimer_forward_now(timer, interval: timr->it_interval); | 
|---|
| 303 | hrtimer_restart(timer); | 
|---|
| 304 | } | 
|---|
| 305 |  | 
|---|
| 306 | static bool __posixtimer_deliver_signal(struct kernel_siginfo *info, struct k_itimer *timr) | 
|---|
| 307 | { | 
|---|
| 308 | guard(spinlock)(l: &timr->it_lock); | 
|---|
| 309 |  | 
|---|
| 310 | /* | 
|---|
| 311 | * Check if the timer is still alive or whether it got modified | 
|---|
| 312 | * since the signal was queued. In either case, don't rearm and | 
|---|
| 313 | * drop the signal. | 
|---|
| 314 | */ | 
|---|
| 315 | if (timr->it_signal_seq != timr->it_sigqueue_seq || WARN_ON_ONCE(!posixtimer_valid(timr))) | 
|---|
| 316 | return false; | 
|---|
| 317 |  | 
|---|
| 318 | if (!timr->it_interval || WARN_ON_ONCE(timr->it_status != POSIX_TIMER_REQUEUE_PENDING)) | 
|---|
| 319 | return true; | 
|---|
| 320 |  | 
|---|
| 321 | timr->kclock->timer_rearm(timr); | 
|---|
| 322 | timr->it_status = POSIX_TIMER_ARMED; | 
|---|
| 323 | timr->it_overrun_last = timr->it_overrun; | 
|---|
| 324 | timr->it_overrun = -1LL; | 
|---|
| 325 | ++timr->it_signal_seq; | 
|---|
| 326 | info->si_overrun = timer_overrun_to_int(timr); | 
|---|
| 327 | return true; | 
|---|
| 328 | } | 
|---|
| 329 |  | 
|---|
| 330 | /* | 
|---|
| 331 | * This function is called from the signal delivery code. It decides | 
|---|
| 332 | * whether the signal should be dropped and rearms interval timers.  The | 
|---|
| 333 | * timer can be unconditionally accessed as there is a reference held on | 
|---|
| 334 | * it. | 
|---|
| 335 | */ | 
|---|
| 336 | bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq) | 
|---|
| 337 | { | 
|---|
| 338 | struct k_itimer *timr = container_of(timer_sigq, struct k_itimer, sigq); | 
|---|
| 339 | bool ret; | 
|---|
| 340 |  | 
|---|
| 341 | /* | 
|---|
| 342 | * Release siglock to ensure proper locking order versus | 
|---|
| 343 | * timr::it_lock. Keep interrupts disabled. | 
|---|
| 344 | */ | 
|---|
| 345 | spin_unlock(lock: ¤t->sighand->siglock); | 
|---|
| 346 |  | 
|---|
| 347 | ret = __posixtimer_deliver_signal(info, timr); | 
|---|
| 348 |  | 
|---|
| 349 | /* Drop the reference which was acquired when the signal was queued */ | 
|---|
| 350 | posixtimer_putref(tmr: timr); | 
|---|
| 351 |  | 
|---|
| 352 | spin_lock(lock: ¤t->sighand->siglock); | 
|---|
| 353 | return ret; | 
|---|
| 354 | } | 
|---|
| 355 |  | 
|---|
| 356 | void posix_timer_queue_signal(struct k_itimer *timr) | 
|---|
| 357 | { | 
|---|
| 358 | lockdep_assert_held(&timr->it_lock); | 
|---|
| 359 |  | 
|---|
| 360 | if (!posixtimer_valid(timer: timr)) | 
|---|
| 361 | return; | 
|---|
| 362 |  | 
|---|
| 363 | timr->it_status = timr->it_interval ? POSIX_TIMER_REQUEUE_PENDING : POSIX_TIMER_DISARMED; | 
|---|
| 364 | posixtimer_send_sigqueue(tmr: timr); | 
|---|
| 365 | } | 
|---|
| 366 |  | 
|---|
| 367 | /* | 
|---|
| 368 | * This function gets called when a POSIX.1b interval timer expires from | 
|---|
| 369 | * the HRTIMER interrupt (soft interrupt on RT kernels). | 
|---|
| 370 | * | 
|---|
| 371 | * Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI | 
|---|
| 372 | * based timers. | 
|---|
| 373 | */ | 
|---|
| 374 | static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) | 
|---|
| 375 | { | 
|---|
| 376 | struct k_itimer *timr = container_of(timer, struct k_itimer, it.real.timer); | 
|---|
| 377 |  | 
|---|
| 378 | guard(spinlock_irqsave)(l: &timr->it_lock); | 
|---|
| 379 | posix_timer_queue_signal(timr); | 
|---|
| 380 | return HRTIMER_NORESTART; | 
|---|
| 381 | } | 
|---|
| 382 |  | 
|---|
| 383 | long posixtimer_create_prctl(unsigned long ctrl) | 
|---|
| 384 | { | 
|---|
| 385 | switch (ctrl) { | 
|---|
| 386 | case PR_TIMER_CREATE_RESTORE_IDS_OFF: | 
|---|
| 387 | current->signal->timer_create_restore_ids = 0; | 
|---|
| 388 | return 0; | 
|---|
| 389 | case PR_TIMER_CREATE_RESTORE_IDS_ON: | 
|---|
| 390 | current->signal->timer_create_restore_ids = 1; | 
|---|
| 391 | return 0; | 
|---|
| 392 | case PR_TIMER_CREATE_RESTORE_IDS_GET: | 
|---|
| 393 | return current->signal->timer_create_restore_ids; | 
|---|
| 394 | } | 
|---|
| 395 | return -EINVAL; | 
|---|
| 396 | } | 
|---|
| 397 |  | 
|---|
| 398 | static struct pid *good_sigevent(sigevent_t * event) | 
|---|
| 399 | { | 
|---|
| 400 | struct pid *pid = task_tgid(current); | 
|---|
| 401 | struct task_struct *rtn; | 
|---|
| 402 |  | 
|---|
| 403 | switch (event->sigev_notify) { | 
|---|
| 404 | case SIGEV_SIGNAL | SIGEV_THREAD_ID: | 
|---|
| 405 | pid = find_vpid(nr: event->sigev_notify_thread_id); | 
|---|
| 406 | rtn = pid_task(pid, PIDTYPE_PID); | 
|---|
| 407 | if (!rtn || !same_thread_group(p1: rtn, current)) | 
|---|
| 408 | return NULL; | 
|---|
| 409 | fallthrough; | 
|---|
| 410 | case SIGEV_SIGNAL: | 
|---|
| 411 | case SIGEV_THREAD: | 
|---|
| 412 | if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) | 
|---|
| 413 | return NULL; | 
|---|
| 414 | fallthrough; | 
|---|
| 415 | case SIGEV_NONE: | 
|---|
| 416 | return pid; | 
|---|
| 417 | default: | 
|---|
| 418 | return NULL; | 
|---|
| 419 | } | 
|---|
| 420 | } | 
|---|
| 421 |  | 
|---|
| 422 | static struct k_itimer *alloc_posix_timer(void) | 
|---|
| 423 | { | 
|---|
| 424 | struct k_itimer *tmr; | 
|---|
| 425 |  | 
|---|
| 426 | if (unlikely(!posix_timers_cache)) | 
|---|
| 427 | return NULL; | 
|---|
| 428 |  | 
|---|
| 429 | tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); | 
|---|
| 430 | if (!tmr) | 
|---|
| 431 | return tmr; | 
|---|
| 432 |  | 
|---|
| 433 | if (unlikely(!posixtimer_init_sigqueue(&tmr->sigq))) { | 
|---|
| 434 | kmem_cache_free(posix_timers_cache, objp: tmr); | 
|---|
| 435 | return NULL; | 
|---|
| 436 | } | 
|---|
| 437 | rcuref_init(ref: &tmr->rcuref, cnt: 1); | 
|---|
| 438 | return tmr; | 
|---|
| 439 | } | 
|---|
| 440 |  | 
|---|
| 441 | void posixtimer_free_timer(struct k_itimer *tmr) | 
|---|
| 442 | { | 
|---|
| 443 | put_pid(pid: tmr->it_pid); | 
|---|
| 444 | if (tmr->sigq.ucounts) | 
|---|
| 445 | dec_rlimit_put_ucounts(ucounts: tmr->sigq.ucounts, type: UCOUNT_RLIMIT_SIGPENDING); | 
|---|
| 446 | kfree_rcu(tmr, rcu); | 
|---|
| 447 | } | 
|---|
| 448 |  | 
|---|
| 449 | static void posix_timer_unhash_and_free(struct k_itimer *tmr) | 
|---|
| 450 | { | 
|---|
| 451 | struct timer_hash_bucket *bucket = hash_bucket(sig: posix_sig_owner(timer: tmr), nr: tmr->it_id); | 
|---|
| 452 |  | 
|---|
| 453 | scoped_guard (spinlock, &bucket->lock) | 
|---|
| 454 | hlist_del_rcu(n: &tmr->t_hash); | 
|---|
| 455 | posixtimer_putref(tmr); | 
|---|
| 456 | } | 
|---|
| 457 |  | 
|---|
| 458 | static int common_timer_create(struct k_itimer *new_timer) | 
|---|
| 459 | { | 
|---|
| 460 | hrtimer_setup(timer: &new_timer->it.real.timer, function: posix_timer_fn, clock_id: new_timer->it_clock, mode: 0); | 
|---|
| 461 | return 0; | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | /* Create a POSIX.1b interval timer. */ | 
|---|
| 465 | static int do_timer_create(clockid_t which_clock, struct sigevent *event, | 
|---|
| 466 | timer_t __user *created_timer_id) | 
|---|
| 467 | { | 
|---|
| 468 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 469 | timer_t req_id = TIMER_ANY_ID; | 
|---|
| 470 | struct k_itimer *new_timer; | 
|---|
| 471 | int error, new_timer_id; | 
|---|
| 472 |  | 
|---|
| 473 | if (!kc) | 
|---|
| 474 | return -EINVAL; | 
|---|
| 475 | if (!kc->timer_create) | 
|---|
| 476 | return -EOPNOTSUPP; | 
|---|
| 477 |  | 
|---|
| 478 | new_timer = alloc_posix_timer(); | 
|---|
| 479 | if (unlikely(!new_timer)) | 
|---|
| 480 | return -EAGAIN; | 
|---|
| 481 |  | 
|---|
| 482 | spin_lock_init(&new_timer->it_lock); | 
|---|
| 483 |  | 
|---|
| 484 | /* Special case for CRIU to restore timers with a given timer ID. */ | 
|---|
| 485 | if (unlikely(current->signal->timer_create_restore_ids)) { | 
|---|
| 486 | if (copy_from_user(to: &req_id, from: created_timer_id, n: sizeof(req_id))) | 
|---|
| 487 | return -EFAULT; | 
|---|
| 488 | /* Valid IDs are 0..INT_MAX */ | 
|---|
| 489 | if ((unsigned int)req_id > INT_MAX) | 
|---|
| 490 | return -EINVAL; | 
|---|
| 491 | } | 
|---|
| 492 |  | 
|---|
| 493 | /* | 
|---|
| 494 | * Add the timer to the hash table. The timer is not yet valid | 
|---|
| 495 | * after insertion, but has a unique ID allocated. | 
|---|
| 496 | */ | 
|---|
| 497 | new_timer_id = posix_timer_add(timer: new_timer, req_id); | 
|---|
| 498 | if (new_timer_id < 0) { | 
|---|
| 499 | posixtimer_free_timer(tmr: new_timer); | 
|---|
| 500 | return new_timer_id; | 
|---|
| 501 | } | 
|---|
| 502 |  | 
|---|
| 503 | new_timer->it_clock = which_clock; | 
|---|
| 504 | new_timer->kclock = kc; | 
|---|
| 505 | new_timer->it_overrun = -1LL; | 
|---|
| 506 |  | 
|---|
| 507 | if (event) { | 
|---|
| 508 | scoped_guard (rcu) | 
|---|
| 509 | new_timer->it_pid = get_pid(pid: good_sigevent(event)); | 
|---|
| 510 | if (!new_timer->it_pid) { | 
|---|
| 511 | error = -EINVAL; | 
|---|
| 512 | goto out; | 
|---|
| 513 | } | 
|---|
| 514 | new_timer->it_sigev_notify     = event->sigev_notify; | 
|---|
| 515 | new_timer->sigq.info.si_signo = event->sigev_signo; | 
|---|
| 516 | new_timer->sigq.info.si_value = event->sigev_value; | 
|---|
| 517 | } else { | 
|---|
| 518 | new_timer->it_sigev_notify     = SIGEV_SIGNAL; | 
|---|
| 519 | new_timer->sigq.info.si_signo = SIGALRM; | 
|---|
| 520 | new_timer->sigq.info.si_value.sival_int = new_timer->it_id; | 
|---|
| 521 | new_timer->it_pid = get_pid(pid: task_tgid(current)); | 
|---|
| 522 | } | 
|---|
| 523 |  | 
|---|
| 524 | if (new_timer->it_sigev_notify & SIGEV_THREAD_ID) | 
|---|
| 525 | new_timer->it_pid_type = PIDTYPE_PID; | 
|---|
| 526 | else | 
|---|
| 527 | new_timer->it_pid_type = PIDTYPE_TGID; | 
|---|
| 528 |  | 
|---|
| 529 | new_timer->sigq.info.si_tid = new_timer->it_id; | 
|---|
| 530 | new_timer->sigq.info.si_code = SI_TIMER; | 
|---|
| 531 |  | 
|---|
| 532 | if (copy_to_user(to: created_timer_id, from: &new_timer_id, n: sizeof (new_timer_id))) { | 
|---|
| 533 | error = -EFAULT; | 
|---|
| 534 | goto out; | 
|---|
| 535 | } | 
|---|
| 536 | /* | 
|---|
| 537 | * After successful copy out, the timer ID is visible to user space | 
|---|
| 538 | * now but not yet valid because new_timer::signal low order bit is 1. | 
|---|
| 539 | * | 
|---|
| 540 | * Complete the initialization with the clock specific create | 
|---|
| 541 | * callback. | 
|---|
| 542 | */ | 
|---|
| 543 | error = kc->timer_create(new_timer); | 
|---|
| 544 | if (error) | 
|---|
| 545 | goto out; | 
|---|
| 546 |  | 
|---|
| 547 | /* | 
|---|
| 548 | * timer::it_lock ensures that __lock_timer() observes a fully | 
|---|
| 549 | * initialized timer when it observes a valid timer::it_signal. | 
|---|
| 550 | * | 
|---|
| 551 | * sighand::siglock is required to protect signal::posix_timers. | 
|---|
| 552 | */ | 
|---|
| 553 | scoped_guard (spinlock_irq, &new_timer->it_lock) { | 
|---|
| 554 | guard(spinlock)(l: ¤t->sighand->siglock); | 
|---|
| 555 | /* | 
|---|
| 556 | * new_timer::it_signal contains the signal pointer with | 
|---|
| 557 | * bit 0 set, which makes it invalid for syscall operations. | 
|---|
| 558 | * Store the unmodified signal pointer to make it valid. | 
|---|
| 559 | */ | 
|---|
| 560 | WRITE_ONCE(new_timer->it_signal, current->signal); | 
|---|
| 561 | hlist_add_head_rcu(n: &new_timer->list, h: ¤t->signal->posix_timers); | 
|---|
| 562 | } | 
|---|
| 563 | /* | 
|---|
| 564 | * After unlocking @new_timer is subject to concurrent removal and | 
|---|
| 565 | * cannot be touched anymore | 
|---|
| 566 | */ | 
|---|
| 567 | return 0; | 
|---|
| 568 | out: | 
|---|
| 569 | posix_timer_unhash_and_free(tmr: new_timer); | 
|---|
| 570 | return error; | 
|---|
| 571 | } | 
|---|
| 572 |  | 
|---|
| 573 | SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, | 
|---|
| 574 | struct sigevent __user *, timer_event_spec, | 
|---|
| 575 | timer_t __user *, created_timer_id) | 
|---|
| 576 | { | 
|---|
| 577 | if (timer_event_spec) { | 
|---|
| 578 | sigevent_t event; | 
|---|
| 579 |  | 
|---|
| 580 | if (copy_from_user(to: &event, from: timer_event_spec, n: sizeof (event))) | 
|---|
| 581 | return -EFAULT; | 
|---|
| 582 | return do_timer_create(which_clock, event: &event, created_timer_id); | 
|---|
| 583 | } | 
|---|
| 584 | return do_timer_create(which_clock, NULL, created_timer_id); | 
|---|
| 585 | } | 
|---|
| 586 |  | 
|---|
| 587 | #ifdef CONFIG_COMPAT | 
|---|
| 588 | COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, | 
|---|
| 589 | struct compat_sigevent __user *, timer_event_spec, | 
|---|
| 590 | timer_t __user *, created_timer_id) | 
|---|
| 591 | { | 
|---|
| 592 | if (timer_event_spec) { | 
|---|
| 593 | sigevent_t event; | 
|---|
| 594 |  | 
|---|
| 595 | if (get_compat_sigevent(event: &event, u_event: timer_event_spec)) | 
|---|
| 596 | return -EFAULT; | 
|---|
| 597 | return do_timer_create(which_clock, event: &event, created_timer_id); | 
|---|
| 598 | } | 
|---|
| 599 | return do_timer_create(which_clock, NULL, created_timer_id); | 
|---|
| 600 | } | 
|---|
| 601 | #endif | 
|---|
| 602 |  | 
|---|
| 603 | static struct k_itimer *__lock_timer(timer_t timer_id) | 
|---|
| 604 | { | 
|---|
| 605 | struct k_itimer *timr; | 
|---|
| 606 |  | 
|---|
| 607 | /* | 
|---|
| 608 | * timer_t could be any type >= int and we want to make sure any | 
|---|
| 609 | * @timer_id outside positive int range fails lookup. | 
|---|
| 610 | */ | 
|---|
| 611 | if ((unsigned long long)timer_id > INT_MAX) | 
|---|
| 612 | return NULL; | 
|---|
| 613 |  | 
|---|
| 614 | /* | 
|---|
| 615 | * The hash lookup and the timers are RCU protected. | 
|---|
| 616 | * | 
|---|
| 617 | * Timers are added to the hash in invalid state where | 
|---|
| 618 | * timr::it_signal is marked invalid. timer::it_signal is only set | 
|---|
| 619 | * after the rest of the initialization succeeded. | 
|---|
| 620 | * | 
|---|
| 621 | * Timer destruction happens in steps: | 
|---|
| 622 | *  1) Set timr::it_signal marked invalid with timr::it_lock held | 
|---|
| 623 | *  2) Release timr::it_lock | 
|---|
| 624 | *  3) Remove from the hash under hash_lock | 
|---|
| 625 | *  4) Put the reference count. | 
|---|
| 626 | * | 
|---|
| 627 | * The reference count might not drop to zero if timr::sigq is | 
|---|
| 628 | * queued. In that case the signal delivery or flush will put the | 
|---|
| 629 | * last reference count. | 
|---|
| 630 | * | 
|---|
| 631 | * When the reference count reaches zero, the timer is scheduled | 
|---|
| 632 | * for RCU removal after the grace period. | 
|---|
| 633 | * | 
|---|
| 634 | * Holding rcu_read_lock() across the lookup ensures that | 
|---|
| 635 | * the timer cannot be freed. | 
|---|
| 636 | * | 
|---|
| 637 | * The lookup validates locklessly that timr::it_signal == | 
|---|
| 638 | * current::it_signal and timr::it_id == @timer_id. timr::it_id | 
|---|
| 639 | * can't change, but timr::it_signal can become invalid during | 
|---|
| 640 | * destruction, which makes the locked check fail. | 
|---|
| 641 | */ | 
|---|
| 642 | guard(rcu)(); | 
|---|
| 643 | timr = posix_timer_by_id(id: timer_id); | 
|---|
| 644 | if (timr) { | 
|---|
| 645 | spin_lock_irq(lock: &timr->it_lock); | 
|---|
| 646 | /* | 
|---|
| 647 | * Validate under timr::it_lock that timr::it_signal is | 
|---|
| 648 | * still valid. Pairs with #1 above. | 
|---|
| 649 | */ | 
|---|
| 650 | if (timr->it_signal == current->signal) | 
|---|
| 651 | return timr; | 
|---|
| 652 | spin_unlock_irq(lock: &timr->it_lock); | 
|---|
| 653 | } | 
|---|
| 654 | return NULL; | 
|---|
| 655 | } | 
|---|
| 656 |  | 
|---|
| 657 | static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) | 
|---|
| 658 | { | 
|---|
| 659 | struct hrtimer *timer = &timr->it.real.timer; | 
|---|
| 660 |  | 
|---|
| 661 | return __hrtimer_expires_remaining_adjusted(timer, now); | 
|---|
| 662 | } | 
|---|
| 663 |  | 
|---|
| 664 | static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) | 
|---|
| 665 | { | 
|---|
| 666 | struct hrtimer *timer = &timr->it.real.timer; | 
|---|
| 667 |  | 
|---|
| 668 | return hrtimer_forward(timer, now, interval: timr->it_interval); | 
|---|
| 669 | } | 
|---|
| 670 |  | 
|---|
| 671 | /* | 
|---|
| 672 | * Get the time remaining on a POSIX.1b interval timer. | 
|---|
| 673 | * | 
|---|
| 674 | * Two issues to handle here: | 
|---|
| 675 | * | 
|---|
| 676 | *  1) The timer has a requeue pending. The return value must appear as | 
|---|
| 677 | *     if the timer has been requeued right now. | 
|---|
| 678 | * | 
|---|
| 679 | *  2) The timer is a SIGEV_NONE timer. These timers are never enqueued | 
|---|
| 680 | *     into the hrtimer queue and therefore never expired. Emulate expiry | 
|---|
| 681 | *     here taking #1 into account. | 
|---|
| 682 | */ | 
|---|
| 683 | void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) | 
|---|
| 684 | { | 
|---|
| 685 | const struct k_clock *kc = timr->kclock; | 
|---|
| 686 | ktime_t now, remaining, iv; | 
|---|
| 687 | bool sig_none; | 
|---|
| 688 |  | 
|---|
| 689 | sig_none = timr->it_sigev_notify == SIGEV_NONE; | 
|---|
| 690 | iv = timr->it_interval; | 
|---|
| 691 |  | 
|---|
| 692 | /* interval timer ? */ | 
|---|
| 693 | if (iv) { | 
|---|
| 694 | cur_setting->it_interval = ktime_to_timespec64(iv); | 
|---|
| 695 | } else if (timr->it_status == POSIX_TIMER_DISARMED) { | 
|---|
| 696 | /* | 
|---|
| 697 | * SIGEV_NONE oneshot timers are never queued and therefore | 
|---|
| 698 | * timr->it_status is always DISARMED. The check below | 
|---|
| 699 | * vs. remaining time will handle this case. | 
|---|
| 700 | * | 
|---|
| 701 | * For all other timers there is nothing to update here, so | 
|---|
| 702 | * return. | 
|---|
| 703 | */ | 
|---|
| 704 | if (!sig_none) | 
|---|
| 705 | return; | 
|---|
| 706 | } | 
|---|
| 707 |  | 
|---|
| 708 | now = kc->clock_get_ktime(timr->it_clock); | 
|---|
| 709 |  | 
|---|
| 710 | /* | 
|---|
| 711 | * If this is an interval timer and either has requeue pending or | 
|---|
| 712 | * is a SIGEV_NONE timer move the expiry time forward by intervals, | 
|---|
| 713 | * so expiry is > now. | 
|---|
| 714 | */ | 
|---|
| 715 | if (iv && timr->it_status != POSIX_TIMER_ARMED) | 
|---|
| 716 | timr->it_overrun += kc->timer_forward(timr, now); | 
|---|
| 717 |  | 
|---|
| 718 | remaining = kc->timer_remaining(timr, now); | 
|---|
| 719 | /* | 
|---|
| 720 | * As @now is retrieved before a possible timer_forward() and | 
|---|
| 721 | * cannot be reevaluated by the compiler @remaining is based on the | 
|---|
| 722 | * same @now value. Therefore @remaining is consistent vs. @now. | 
|---|
| 723 | * | 
|---|
| 724 | * Consequently all interval timers, i.e. @iv > 0, cannot have a | 
|---|
| 725 | * remaining time <= 0 because timer_forward() guarantees to move | 
|---|
| 726 | * them forward so that the next timer expiry is > @now. | 
|---|
| 727 | */ | 
|---|
| 728 | if (remaining <= 0) { | 
|---|
| 729 | /* | 
|---|
| 730 | * A single shot SIGEV_NONE timer must return 0, when it is | 
|---|
| 731 | * expired! Timers which have a real signal delivery mode | 
|---|
| 732 | * must return a remaining time greater than 0 because the | 
|---|
| 733 | * signal has not yet been delivered. | 
|---|
| 734 | */ | 
|---|
| 735 | if (!sig_none) | 
|---|
| 736 | cur_setting->it_value.tv_nsec = 1; | 
|---|
| 737 | } else { | 
|---|
| 738 | cur_setting->it_value = ktime_to_timespec64(remaining); | 
|---|
| 739 | } | 
|---|
| 740 | } | 
|---|
| 741 |  | 
|---|
| 742 | static int do_timer_gettime(timer_t timer_id,  struct itimerspec64 *setting) | 
|---|
| 743 | { | 
|---|
| 744 | memset(s: setting, c: 0, n: sizeof(*setting)); | 
|---|
| 745 | scoped_timer_get_or_fail(timer_id) | 
|---|
| 746 | scoped_timer->kclock->timer_get(scoped_timer, setting); | 
|---|
| 747 | return 0; | 
|---|
| 748 | } | 
|---|
| 749 |  | 
|---|
| 750 | /* Get the time remaining on a POSIX.1b interval timer. */ | 
|---|
| 751 | SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, | 
|---|
| 752 | struct __kernel_itimerspec __user *, setting) | 
|---|
| 753 | { | 
|---|
| 754 | struct itimerspec64 cur_setting; | 
|---|
| 755 |  | 
|---|
| 756 | int ret = do_timer_gettime(timer_id, setting: &cur_setting); | 
|---|
| 757 | if (!ret) { | 
|---|
| 758 | if (put_itimerspec64(it: &cur_setting, uit: setting)) | 
|---|
| 759 | ret = -EFAULT; | 
|---|
| 760 | } | 
|---|
| 761 | return ret; | 
|---|
| 762 | } | 
|---|
| 763 |  | 
|---|
| 764 | #ifdef CONFIG_COMPAT_32BIT_TIME | 
|---|
| 765 |  | 
|---|
| 766 | SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id, | 
|---|
| 767 | struct old_itimerspec32 __user *, setting) | 
|---|
| 768 | { | 
|---|
| 769 | struct itimerspec64 cur_setting; | 
|---|
| 770 |  | 
|---|
| 771 | int ret = do_timer_gettime(timer_id, setting: &cur_setting); | 
|---|
| 772 | if (!ret) { | 
|---|
| 773 | if (put_old_itimerspec32(its: &cur_setting, uits: setting)) | 
|---|
| 774 | ret = -EFAULT; | 
|---|
| 775 | } | 
|---|
| 776 | return ret; | 
|---|
| 777 | } | 
|---|
| 778 |  | 
|---|
| 779 | #endif | 
|---|
| 780 |  | 
|---|
| 781 | /** | 
|---|
| 782 | * sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer | 
|---|
| 783 | * @timer_id:	The timer ID which identifies the timer | 
|---|
| 784 | * | 
|---|
| 785 | * The "overrun count" of a timer is one plus the number of expiration | 
|---|
| 786 | * intervals which have elapsed between the first expiry, which queues the | 
|---|
| 787 | * signal and the actual signal delivery. On signal delivery the "overrun | 
|---|
| 788 | * count" is calculated and cached, so it can be returned directly here. | 
|---|
| 789 | * | 
|---|
| 790 | * As this is relative to the last queued signal the returned overrun count | 
|---|
| 791 | * is meaningless outside of the signal delivery path and even there it | 
|---|
| 792 | * does not accurately reflect the current state when user space evaluates | 
|---|
| 793 | * it. | 
|---|
| 794 | * | 
|---|
| 795 | * Returns: | 
|---|
| 796 | *	-EINVAL		@timer_id is invalid | 
|---|
| 797 | *	1..INT_MAX	The number of overruns related to the last delivered signal | 
|---|
| 798 | */ | 
|---|
| 799 | SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) | 
|---|
| 800 | { | 
|---|
| 801 | scoped_timer_get_or_fail(timer_id) | 
|---|
| 802 | return timer_overrun_to_int(scoped_timer); | 
|---|
| 803 | } | 
|---|
| 804 |  | 
|---|
| 805 | static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, | 
|---|
| 806 | bool absolute, bool sigev_none) | 
|---|
| 807 | { | 
|---|
| 808 | struct hrtimer *timer = &timr->it.real.timer; | 
|---|
| 809 | enum hrtimer_mode mode; | 
|---|
| 810 |  | 
|---|
| 811 | mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; | 
|---|
| 812 | /* | 
|---|
| 813 | * Posix magic: Relative CLOCK_REALTIME timers are not affected by | 
|---|
| 814 | * clock modifications, so they become CLOCK_MONOTONIC based under the | 
|---|
| 815 | * hood. See hrtimer_setup(). Update timr->kclock, so the generic | 
|---|
| 816 | * functions which use timr->kclock->clock_get_*() work. | 
|---|
| 817 | * | 
|---|
| 818 | * Note: it_clock stays unmodified, because the next timer_set() might | 
|---|
| 819 | * use ABSTIME, so it needs to switch back. | 
|---|
| 820 | */ | 
|---|
| 821 | if (timr->it_clock == CLOCK_REALTIME) | 
|---|
| 822 | timr->kclock = absolute ? &clock_realtime : &clock_monotonic; | 
|---|
| 823 |  | 
|---|
| 824 | hrtimer_setup(timer: &timr->it.real.timer, function: posix_timer_fn, clock_id: timr->it_clock, mode); | 
|---|
| 825 |  | 
|---|
| 826 | if (!absolute) | 
|---|
| 827 | expires = ktime_add_safe(lhs: expires, rhs: hrtimer_cb_get_time(timer)); | 
|---|
| 828 | hrtimer_set_expires(timer, time: expires); | 
|---|
| 829 |  | 
|---|
| 830 | if (!sigev_none) | 
|---|
| 831 | hrtimer_start_expires(timer, mode: HRTIMER_MODE_ABS); | 
|---|
| 832 | } | 
|---|
| 833 |  | 
|---|
| 834 | static int common_hrtimer_try_to_cancel(struct k_itimer *timr) | 
|---|
| 835 | { | 
|---|
| 836 | return hrtimer_try_to_cancel(timer: &timr->it.real.timer); | 
|---|
| 837 | } | 
|---|
| 838 |  | 
|---|
| 839 | static void common_timer_wait_running(struct k_itimer *timer) | 
|---|
| 840 | { | 
|---|
| 841 | hrtimer_cancel_wait_running(timer: &timer->it.real.timer); | 
|---|
| 842 | } | 
|---|
| 843 |  | 
|---|
| 844 | /* | 
|---|
| 845 | * On PREEMPT_RT this prevents priority inversion and a potential livelock | 
|---|
| 846 | * against the ksoftirqd thread in case that ksoftirqd gets preempted while | 
|---|
| 847 | * executing a hrtimer callback. | 
|---|
| 848 | * | 
|---|
| 849 | * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this | 
|---|
| 850 | * just results in a cpu_relax(). | 
|---|
| 851 | * | 
|---|
| 852 | * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is | 
|---|
| 853 | * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this | 
|---|
| 854 | * prevents spinning on an eventually scheduled out task and a livelock | 
|---|
| 855 | * when the task which tries to delete or disarm the timer has preempted | 
|---|
| 856 | * the task which runs the expiry in task work context. | 
|---|
| 857 | */ | 
|---|
| 858 | static void timer_wait_running(struct k_itimer *timer) | 
|---|
| 859 | { | 
|---|
| 860 | /* | 
|---|
| 861 | * kc->timer_wait_running() might drop RCU lock. So @timer | 
|---|
| 862 | * cannot be touched anymore after the function returns! | 
|---|
| 863 | */ | 
|---|
| 864 | timer->kclock->timer_wait_running(timer); | 
|---|
| 865 | } | 
|---|
| 866 |  | 
|---|
| 867 | /* | 
|---|
| 868 | * Set up the new interval and reset the signal delivery data | 
|---|
| 869 | */ | 
|---|
| 870 | void posix_timer_set_common(struct k_itimer *timer, struct itimerspec64 *new_setting) | 
|---|
| 871 | { | 
|---|
| 872 | if (new_setting->it_value.tv_sec || new_setting->it_value.tv_nsec) | 
|---|
| 873 | timer->it_interval = timespec64_to_ktime(ts: new_setting->it_interval); | 
|---|
| 874 | else | 
|---|
| 875 | timer->it_interval = 0; | 
|---|
| 876 |  | 
|---|
| 877 | /* Reset overrun accounting */ | 
|---|
| 878 | timer->it_overrun_last = 0; | 
|---|
| 879 | timer->it_overrun = -1LL; | 
|---|
| 880 | } | 
|---|
| 881 |  | 
|---|
| 882 | /* Set a POSIX.1b interval timer. */ | 
|---|
| 883 | int common_timer_set(struct k_itimer *timr, int flags, | 
|---|
| 884 | struct itimerspec64 *new_setting, | 
|---|
| 885 | struct itimerspec64 *old_setting) | 
|---|
| 886 | { | 
|---|
| 887 | const struct k_clock *kc = timr->kclock; | 
|---|
| 888 | bool sigev_none; | 
|---|
| 889 | ktime_t expires; | 
|---|
| 890 |  | 
|---|
| 891 | if (old_setting) | 
|---|
| 892 | common_timer_get(timr, cur_setting: old_setting); | 
|---|
| 893 |  | 
|---|
| 894 | /* | 
|---|
| 895 | * Careful here. On SMP systems the timer expiry function could be | 
|---|
| 896 | * active and spinning on timr->it_lock. | 
|---|
| 897 | */ | 
|---|
| 898 | if (kc->timer_try_to_cancel(timr) < 0) | 
|---|
| 899 | return TIMER_RETRY; | 
|---|
| 900 |  | 
|---|
| 901 | timr->it_status = POSIX_TIMER_DISARMED; | 
|---|
| 902 | posix_timer_set_common(timer: timr, new_setting); | 
|---|
| 903 |  | 
|---|
| 904 | /* Keep timer disarmed when it_value is zero */ | 
|---|
| 905 | if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) | 
|---|
| 906 | return 0; | 
|---|
| 907 |  | 
|---|
| 908 | expires = timespec64_to_ktime(ts: new_setting->it_value); | 
|---|
| 909 | if (flags & TIMER_ABSTIME) | 
|---|
| 910 | expires = timens_ktime_to_host(clockid: timr->it_clock, tim: expires); | 
|---|
| 911 | sigev_none = timr->it_sigev_notify == SIGEV_NONE; | 
|---|
| 912 |  | 
|---|
| 913 | kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); | 
|---|
| 914 | if (!sigev_none) | 
|---|
| 915 | timr->it_status = POSIX_TIMER_ARMED; | 
|---|
| 916 | return 0; | 
|---|
| 917 | } | 
|---|
| 918 |  | 
|---|
| 919 | static int do_timer_settime(timer_t timer_id, int tmr_flags, struct itimerspec64 *new_spec64, | 
|---|
| 920 | struct itimerspec64 *old_spec64) | 
|---|
| 921 | { | 
|---|
| 922 | if (!timespec64_valid(ts: &new_spec64->it_interval) || | 
|---|
| 923 | !timespec64_valid(ts: &new_spec64->it_value)) | 
|---|
| 924 | return -EINVAL; | 
|---|
| 925 |  | 
|---|
| 926 | if (old_spec64) | 
|---|
| 927 | memset(s: old_spec64, c: 0, n: sizeof(*old_spec64)); | 
|---|
| 928 |  | 
|---|
| 929 | for (; ; old_spec64 = NULL) { | 
|---|
| 930 | struct k_itimer *timr; | 
|---|
| 931 |  | 
|---|
| 932 | scoped_timer_get_or_fail(timer_id) { | 
|---|
| 933 | timr = scoped_timer; | 
|---|
| 934 |  | 
|---|
| 935 | if (old_spec64) | 
|---|
| 936 | old_spec64->it_interval = ktime_to_timespec64(timr->it_interval); | 
|---|
| 937 |  | 
|---|
| 938 | /* Prevent signal delivery and rearming. */ | 
|---|
| 939 | timr->it_signal_seq++; | 
|---|
| 940 |  | 
|---|
| 941 | int ret = timr->kclock->timer_set(timr, tmr_flags, new_spec64, old_spec64); | 
|---|
| 942 | if (ret != TIMER_RETRY) | 
|---|
| 943 | return ret; | 
|---|
| 944 |  | 
|---|
| 945 | /* Protect the timer from being freed when leaving the lock scope */ | 
|---|
| 946 | rcu_read_lock(); | 
|---|
| 947 | } | 
|---|
| 948 | timer_wait_running(timer: timr); | 
|---|
| 949 | rcu_read_unlock(); | 
|---|
| 950 | } | 
|---|
| 951 | } | 
|---|
| 952 |  | 
|---|
| 953 | /* Set a POSIX.1b interval timer */ | 
|---|
| 954 | SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, | 
|---|
| 955 | const struct __kernel_itimerspec __user *, new_setting, | 
|---|
| 956 | struct __kernel_itimerspec __user *, old_setting) | 
|---|
| 957 | { | 
|---|
| 958 | struct itimerspec64 new_spec, old_spec, *rtn; | 
|---|
| 959 | int error = 0; | 
|---|
| 960 |  | 
|---|
| 961 | if (!new_setting) | 
|---|
| 962 | return -EINVAL; | 
|---|
| 963 |  | 
|---|
| 964 | if (get_itimerspec64(it: &new_spec, uit: new_setting)) | 
|---|
| 965 | return -EFAULT; | 
|---|
| 966 |  | 
|---|
| 967 | rtn = old_setting ? &old_spec : NULL; | 
|---|
| 968 | error = do_timer_settime(timer_id, tmr_flags: flags, new_spec64: &new_spec, old_spec64: rtn); | 
|---|
| 969 | if (!error && old_setting) { | 
|---|
| 970 | if (put_itimerspec64(it: &old_spec, uit: old_setting)) | 
|---|
| 971 | error = -EFAULT; | 
|---|
| 972 | } | 
|---|
| 973 | return error; | 
|---|
| 974 | } | 
|---|
| 975 |  | 
|---|
| 976 | #ifdef CONFIG_COMPAT_32BIT_TIME | 
|---|
| 977 | SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags, | 
|---|
| 978 | struct old_itimerspec32 __user *, new, | 
|---|
| 979 | struct old_itimerspec32 __user *, old) | 
|---|
| 980 | { | 
|---|
| 981 | struct itimerspec64 new_spec, old_spec; | 
|---|
| 982 | struct itimerspec64 *rtn = old ? &old_spec : NULL; | 
|---|
| 983 | int error = 0; | 
|---|
| 984 |  | 
|---|
| 985 | if (!new) | 
|---|
| 986 | return -EINVAL; | 
|---|
| 987 | if (get_old_itimerspec32(its: &new_spec, uits: new)) | 
|---|
| 988 | return -EFAULT; | 
|---|
| 989 |  | 
|---|
| 990 | error = do_timer_settime(timer_id, tmr_flags: flags, new_spec64: &new_spec, old_spec64: rtn); | 
|---|
| 991 | if (!error && old) { | 
|---|
| 992 | if (put_old_itimerspec32(its: &old_spec, uits: old)) | 
|---|
| 993 | error = -EFAULT; | 
|---|
| 994 | } | 
|---|
| 995 | return error; | 
|---|
| 996 | } | 
|---|
| 997 | #endif | 
|---|
| 998 |  | 
|---|
| 999 | int common_timer_del(struct k_itimer *timer) | 
|---|
| 1000 | { | 
|---|
| 1001 | const struct k_clock *kc = timer->kclock; | 
|---|
| 1002 |  | 
|---|
| 1003 | if (kc->timer_try_to_cancel(timer) < 0) | 
|---|
| 1004 | return TIMER_RETRY; | 
|---|
| 1005 | timer->it_status = POSIX_TIMER_DISARMED; | 
|---|
| 1006 | return 0; | 
|---|
| 1007 | } | 
|---|
| 1008 |  | 
|---|
| 1009 | /* | 
|---|
| 1010 | * If the deleted timer is on the ignored list, remove it and | 
|---|
| 1011 | * drop the associated reference. | 
|---|
| 1012 | */ | 
|---|
| 1013 | static inline void posix_timer_cleanup_ignored(struct k_itimer *tmr) | 
|---|
| 1014 | { | 
|---|
| 1015 | if (!hlist_unhashed(h: &tmr->ignored_list)) { | 
|---|
| 1016 | hlist_del_init(n: &tmr->ignored_list); | 
|---|
| 1017 | posixtimer_putref(tmr); | 
|---|
| 1018 | } | 
|---|
| 1019 | } | 
|---|
| 1020 |  | 
|---|
| 1021 | static void posix_timer_delete(struct k_itimer *timer) | 
|---|
| 1022 | { | 
|---|
| 1023 | /* | 
|---|
| 1024 | * Invalidate the timer, remove it from the linked list and remove | 
|---|
| 1025 | * it from the ignored list if pending. | 
|---|
| 1026 | * | 
|---|
| 1027 | * The invalidation must be written with siglock held so that the | 
|---|
| 1028 | * signal code observes the invalidated timer::it_signal in | 
|---|
| 1029 | * do_sigaction(), which prevents it from moving a pending signal | 
|---|
| 1030 | * of a deleted timer to the ignore list. | 
|---|
| 1031 | * | 
|---|
| 1032 | * The invalidation also prevents signal queueing, signal delivery | 
|---|
| 1033 | * and therefore rearming from the signal delivery path. | 
|---|
| 1034 | * | 
|---|
| 1035 | * A concurrent lookup can still find the timer in the hash, but it | 
|---|
| 1036 | * will check timer::it_signal with timer::it_lock held and observe | 
|---|
| 1037 | * bit 0 set, which invalidates it. That also prevents the timer ID | 
|---|
| 1038 | * from being handed out before this timer is completely gone. | 
|---|
| 1039 | */ | 
|---|
| 1040 | timer->it_signal_seq++; | 
|---|
| 1041 |  | 
|---|
| 1042 | scoped_guard (spinlock, ¤t->sighand->siglock) { | 
|---|
| 1043 | unsigned long sig = (unsigned long)timer->it_signal | 1UL; | 
|---|
| 1044 |  | 
|---|
| 1045 | WRITE_ONCE(timer->it_signal, (struct signal_struct *)sig); | 
|---|
| 1046 | hlist_del_rcu(n: &timer->list); | 
|---|
| 1047 | posix_timer_cleanup_ignored(tmr: timer); | 
|---|
| 1048 | } | 
|---|
| 1049 |  | 
|---|
| 1050 | while (timer->kclock->timer_del(timer) == TIMER_RETRY) { | 
|---|
| 1051 | guard(rcu)(); | 
|---|
| 1052 | spin_unlock_irq(lock: &timer->it_lock); | 
|---|
| 1053 | timer_wait_running(timer); | 
|---|
| 1054 | spin_lock_irq(lock: &timer->it_lock); | 
|---|
| 1055 | } | 
|---|
| 1056 | } | 
|---|
| 1057 |  | 
|---|
| 1058 | /* Delete a POSIX.1b interval timer. */ | 
|---|
| 1059 | SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) | 
|---|
| 1060 | { | 
|---|
| 1061 | struct k_itimer *timer; | 
|---|
| 1062 |  | 
|---|
| 1063 | scoped_timer_get_or_fail(timer_id) { | 
|---|
| 1064 | timer = scoped_timer; | 
|---|
| 1065 | posix_timer_delete(timer); | 
|---|
| 1066 | } | 
|---|
| 1067 | /* Remove it from the hash, which frees up the timer ID */ | 
|---|
| 1068 | posix_timer_unhash_and_free(tmr: timer); | 
|---|
| 1069 | return 0; | 
|---|
| 1070 | } | 
|---|
| 1071 |  | 
|---|
| 1072 | /* | 
|---|
| 1073 | * Invoked from do_exit() when the last thread of a thread group exits. | 
|---|
| 1074 | * At that point no other task can access the timers of the dying | 
|---|
| 1075 | * task anymore. | 
|---|
| 1076 | */ | 
|---|
| 1077 | void exit_itimers(struct task_struct *tsk) | 
|---|
| 1078 | { | 
|---|
| 1079 | struct hlist_head timers; | 
|---|
| 1080 | struct hlist_node *next; | 
|---|
| 1081 | struct k_itimer *timer; | 
|---|
| 1082 |  | 
|---|
| 1083 | /* Clear restore mode for exec() */ | 
|---|
| 1084 | tsk->signal->timer_create_restore_ids = 0; | 
|---|
| 1085 |  | 
|---|
| 1086 | if (hlist_empty(h: &tsk->signal->posix_timers)) | 
|---|
| 1087 | return; | 
|---|
| 1088 |  | 
|---|
| 1089 | /* Protect against concurrent read via /proc/$PID/timers */ | 
|---|
| 1090 | scoped_guard (spinlock_irq, &tsk->sighand->siglock) | 
|---|
| 1091 | hlist_move_list(old: &tsk->signal->posix_timers, new: &timers); | 
|---|
| 1092 |  | 
|---|
| 1093 | /* The timers are not longer accessible via tsk::signal */ | 
|---|
| 1094 | hlist_for_each_entry_safe(timer, next, &timers, list) { | 
|---|
| 1095 | scoped_guard (spinlock_irq, &timer->it_lock) | 
|---|
| 1096 | posix_timer_delete(timer); | 
|---|
| 1097 | posix_timer_unhash_and_free(tmr: timer); | 
|---|
| 1098 | cond_resched(); | 
|---|
| 1099 | } | 
|---|
| 1100 |  | 
|---|
| 1101 | /* | 
|---|
| 1102 | * There should be no timers on the ignored list. itimer_delete() has | 
|---|
| 1103 | * mopped them up. | 
|---|
| 1104 | */ | 
|---|
| 1105 | if (!WARN_ON_ONCE(!hlist_empty(&tsk->signal->ignored_posix_timers))) | 
|---|
| 1106 | return; | 
|---|
| 1107 |  | 
|---|
| 1108 | hlist_move_list(old: &tsk->signal->ignored_posix_timers, new: &timers); | 
|---|
| 1109 | while (!hlist_empty(h: &timers)) { | 
|---|
| 1110 | posix_timer_cleanup_ignored(hlist_entry(timers.first, struct k_itimer, | 
|---|
| 1111 | ignored_list)); | 
|---|
| 1112 | } | 
|---|
| 1113 | } | 
|---|
| 1114 |  | 
|---|
| 1115 | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, | 
|---|
| 1116 | const struct __kernel_timespec __user *, tp) | 
|---|
| 1117 | { | 
|---|
| 1118 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1119 | struct timespec64 new_tp; | 
|---|
| 1120 |  | 
|---|
| 1121 | if (!kc || !kc->clock_set) | 
|---|
| 1122 | return -EINVAL; | 
|---|
| 1123 |  | 
|---|
| 1124 | if (get_timespec64(ts: &new_tp, uts: tp)) | 
|---|
| 1125 | return -EFAULT; | 
|---|
| 1126 |  | 
|---|
| 1127 | /* | 
|---|
| 1128 | * Permission checks have to be done inside the clock specific | 
|---|
| 1129 | * setter callback. | 
|---|
| 1130 | */ | 
|---|
| 1131 | return kc->clock_set(which_clock, &new_tp); | 
|---|
| 1132 | } | 
|---|
| 1133 |  | 
|---|
| 1134 | SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, | 
|---|
| 1135 | struct __kernel_timespec __user *, tp) | 
|---|
| 1136 | { | 
|---|
| 1137 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1138 | struct timespec64 kernel_tp; | 
|---|
| 1139 | int error; | 
|---|
| 1140 |  | 
|---|
| 1141 | if (!kc) | 
|---|
| 1142 | return -EINVAL; | 
|---|
| 1143 |  | 
|---|
| 1144 | error = kc->clock_get_timespec(which_clock, &kernel_tp); | 
|---|
| 1145 |  | 
|---|
| 1146 | if (!error && put_timespec64(ts: &kernel_tp, uts: tp)) | 
|---|
| 1147 | error = -EFAULT; | 
|---|
| 1148 |  | 
|---|
| 1149 | return error; | 
|---|
| 1150 | } | 
|---|
| 1151 |  | 
|---|
| 1152 | int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx) | 
|---|
| 1153 | { | 
|---|
| 1154 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1155 |  | 
|---|
| 1156 | if (!kc) | 
|---|
| 1157 | return -EINVAL; | 
|---|
| 1158 | if (!kc->clock_adj) | 
|---|
| 1159 | return -EOPNOTSUPP; | 
|---|
| 1160 |  | 
|---|
| 1161 | return kc->clock_adj(which_clock, ktx); | 
|---|
| 1162 | } | 
|---|
| 1163 |  | 
|---|
| 1164 | SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, | 
|---|
| 1165 | struct __kernel_timex __user *, utx) | 
|---|
| 1166 | { | 
|---|
| 1167 | struct __kernel_timex ktx; | 
|---|
| 1168 | int err; | 
|---|
| 1169 |  | 
|---|
| 1170 | if (copy_from_user(to: &ktx, from: utx, n: sizeof(ktx))) | 
|---|
| 1171 | return -EFAULT; | 
|---|
| 1172 |  | 
|---|
| 1173 | err = do_clock_adjtime(which_clock, ktx: &ktx); | 
|---|
| 1174 |  | 
|---|
| 1175 | if (err >= 0 && copy_to_user(to: utx, from: &ktx, n: sizeof(ktx))) | 
|---|
| 1176 | return -EFAULT; | 
|---|
| 1177 |  | 
|---|
| 1178 | return err; | 
|---|
| 1179 | } | 
|---|
| 1180 |  | 
|---|
| 1181 | /** | 
|---|
| 1182 | * sys_clock_getres - Get the resolution of a clock | 
|---|
| 1183 | * @which_clock:	The clock to get the resolution for | 
|---|
| 1184 | * @tp:			Pointer to a a user space timespec64 for storage | 
|---|
| 1185 | * | 
|---|
| 1186 | * POSIX defines: | 
|---|
| 1187 | * | 
|---|
| 1188 | * "The clock_getres() function shall return the resolution of any | 
|---|
| 1189 | * clock. Clock resolutions are implementation-defined and cannot be set by | 
|---|
| 1190 | * a process. If the argument res is not NULL, the resolution of the | 
|---|
| 1191 | * specified clock shall be stored in the location pointed to by res. If | 
|---|
| 1192 | * res is NULL, the clock resolution is not returned. If the time argument | 
|---|
| 1193 | * of clock_settime() is not a multiple of res, then the value is truncated | 
|---|
| 1194 | * to a multiple of res." | 
|---|
| 1195 | * | 
|---|
| 1196 | * Due to the various hardware constraints the real resolution can vary | 
|---|
| 1197 | * wildly and even change during runtime when the underlying devices are | 
|---|
| 1198 | * replaced. The kernel also can use hardware devices with different | 
|---|
| 1199 | * resolutions for reading the time and for arming timers. | 
|---|
| 1200 | * | 
|---|
| 1201 | * The kernel therefore deviates from the POSIX spec in various aspects: | 
|---|
| 1202 | * | 
|---|
| 1203 | * 1) The resolution returned to user space | 
|---|
| 1204 | * | 
|---|
| 1205 | *    For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI, | 
|---|
| 1206 | *    CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW | 
|---|
| 1207 | *    the kernel differentiates only two cases: | 
|---|
| 1208 | * | 
|---|
| 1209 | *    I)  Low resolution mode: | 
|---|
| 1210 | * | 
|---|
| 1211 | *	  When high resolution timers are disabled at compile or runtime | 
|---|
| 1212 | *	  the resolution returned is nanoseconds per tick, which represents | 
|---|
| 1213 | *	  the precision at which timers expire. | 
|---|
| 1214 | * | 
|---|
| 1215 | *    II) High resolution mode: | 
|---|
| 1216 | * | 
|---|
| 1217 | *	  When high resolution timers are enabled the resolution returned | 
|---|
| 1218 | *	  is always one nanosecond independent of the actual resolution of | 
|---|
| 1219 | *	  the underlying hardware devices. | 
|---|
| 1220 | * | 
|---|
| 1221 | *	  For CLOCK_*_ALARM the actual resolution depends on system | 
|---|
| 1222 | *	  state. When system is running the resolution is the same as the | 
|---|
| 1223 | *	  resolution of the other clocks. During suspend the actual | 
|---|
| 1224 | *	  resolution is the resolution of the underlying RTC device which | 
|---|
| 1225 | *	  might be way less precise than the clockevent device used during | 
|---|
| 1226 | *	  running state. | 
|---|
| 1227 | * | 
|---|
| 1228 | *   For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution | 
|---|
| 1229 | *   returned is always nanoseconds per tick. | 
|---|
| 1230 | * | 
|---|
| 1231 | *   For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution | 
|---|
| 1232 | *   returned is always one nanosecond under the assumption that the | 
|---|
| 1233 | *   underlying scheduler clock has a better resolution than nanoseconds | 
|---|
| 1234 | *   per tick. | 
|---|
| 1235 | * | 
|---|
| 1236 | *   For dynamic POSIX clocks (PTP devices) the resolution returned is | 
|---|
| 1237 | *   always one nanosecond. | 
|---|
| 1238 | * | 
|---|
| 1239 | * 2) Affect on sys_clock_settime() | 
|---|
| 1240 | * | 
|---|
| 1241 | *    The kernel does not truncate the time which is handed in to | 
|---|
| 1242 | *    sys_clock_settime(). The kernel internal timekeeping is always using | 
|---|
| 1243 | *    nanoseconds precision independent of the clocksource device which is | 
|---|
| 1244 | *    used to read the time from. The resolution of that device only | 
|---|
| 1245 | *    affects the presicion of the time returned by sys_clock_gettime(). | 
|---|
| 1246 | * | 
|---|
| 1247 | * Returns: | 
|---|
| 1248 | *	0		Success. @tp contains the resolution | 
|---|
| 1249 | *	-EINVAL		@which_clock is not a valid clock ID | 
|---|
| 1250 | *	-EFAULT		Copying the resolution to @tp faulted | 
|---|
| 1251 | *	-ENODEV		Dynamic POSIX clock is not backed by a device | 
|---|
| 1252 | *	-EOPNOTSUPP	Dynamic POSIX clock does not support getres() | 
|---|
| 1253 | */ | 
|---|
| 1254 | SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, | 
|---|
| 1255 | struct __kernel_timespec __user *, tp) | 
|---|
| 1256 | { | 
|---|
| 1257 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1258 | struct timespec64 rtn_tp; | 
|---|
| 1259 | int error; | 
|---|
| 1260 |  | 
|---|
| 1261 | if (!kc) | 
|---|
| 1262 | return -EINVAL; | 
|---|
| 1263 |  | 
|---|
| 1264 | error = kc->clock_getres(which_clock, &rtn_tp); | 
|---|
| 1265 |  | 
|---|
| 1266 | if (!error && tp && put_timespec64(ts: &rtn_tp, uts: tp)) | 
|---|
| 1267 | error = -EFAULT; | 
|---|
| 1268 |  | 
|---|
| 1269 | return error; | 
|---|
| 1270 | } | 
|---|
| 1271 |  | 
|---|
| 1272 | #ifdef CONFIG_COMPAT_32BIT_TIME | 
|---|
| 1273 |  | 
|---|
| 1274 | SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock, | 
|---|
| 1275 | struct old_timespec32 __user *, tp) | 
|---|
| 1276 | { | 
|---|
| 1277 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1278 | struct timespec64 ts; | 
|---|
| 1279 |  | 
|---|
| 1280 | if (!kc || !kc->clock_set) | 
|---|
| 1281 | return -EINVAL; | 
|---|
| 1282 |  | 
|---|
| 1283 | if (get_old_timespec32(&ts, tp)) | 
|---|
| 1284 | return -EFAULT; | 
|---|
| 1285 |  | 
|---|
| 1286 | return kc->clock_set(which_clock, &ts); | 
|---|
| 1287 | } | 
|---|
| 1288 |  | 
|---|
| 1289 | SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, | 
|---|
| 1290 | struct old_timespec32 __user *, tp) | 
|---|
| 1291 | { | 
|---|
| 1292 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1293 | struct timespec64 ts; | 
|---|
| 1294 | int err; | 
|---|
| 1295 |  | 
|---|
| 1296 | if (!kc) | 
|---|
| 1297 | return -EINVAL; | 
|---|
| 1298 |  | 
|---|
| 1299 | err = kc->clock_get_timespec(which_clock, &ts); | 
|---|
| 1300 |  | 
|---|
| 1301 | if (!err && put_old_timespec32(&ts, tp)) | 
|---|
| 1302 | err = -EFAULT; | 
|---|
| 1303 |  | 
|---|
| 1304 | return err; | 
|---|
| 1305 | } | 
|---|
| 1306 |  | 
|---|
| 1307 | SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, | 
|---|
| 1308 | struct old_timex32 __user *, utp) | 
|---|
| 1309 | { | 
|---|
| 1310 | struct __kernel_timex ktx; | 
|---|
| 1311 | int err; | 
|---|
| 1312 |  | 
|---|
| 1313 | err = get_old_timex32(&ktx, utp); | 
|---|
| 1314 | if (err) | 
|---|
| 1315 | return err; | 
|---|
| 1316 |  | 
|---|
| 1317 | err = do_clock_adjtime(which_clock, ktx: &ktx); | 
|---|
| 1318 |  | 
|---|
| 1319 | if (err >= 0 && put_old_timex32(utp, &ktx)) | 
|---|
| 1320 | return -EFAULT; | 
|---|
| 1321 |  | 
|---|
| 1322 | return err; | 
|---|
| 1323 | } | 
|---|
| 1324 |  | 
|---|
| 1325 | SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, | 
|---|
| 1326 | struct old_timespec32 __user *, tp) | 
|---|
| 1327 | { | 
|---|
| 1328 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1329 | struct timespec64 ts; | 
|---|
| 1330 | int err; | 
|---|
| 1331 |  | 
|---|
| 1332 | if (!kc) | 
|---|
| 1333 | return -EINVAL; | 
|---|
| 1334 |  | 
|---|
| 1335 | err = kc->clock_getres(which_clock, &ts); | 
|---|
| 1336 | if (!err && tp && put_old_timespec32(&ts, tp)) | 
|---|
| 1337 | return -EFAULT; | 
|---|
| 1338 |  | 
|---|
| 1339 | return err; | 
|---|
| 1340 | } | 
|---|
| 1341 |  | 
|---|
| 1342 | #endif | 
|---|
| 1343 |  | 
|---|
| 1344 | /* | 
|---|
| 1345 | * sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI | 
|---|
| 1346 | */ | 
|---|
| 1347 | static int common_nsleep(const clockid_t which_clock, int flags, | 
|---|
| 1348 | const struct timespec64 *rqtp) | 
|---|
| 1349 | { | 
|---|
| 1350 | ktime_t texp = timespec64_to_ktime(ts: *rqtp); | 
|---|
| 1351 |  | 
|---|
| 1352 | return hrtimer_nanosleep(rqtp: texp, mode: flags & TIMER_ABSTIME ? | 
|---|
| 1353 | HRTIMER_MODE_ABS : HRTIMER_MODE_REL, | 
|---|
| 1354 | clockid: which_clock); | 
|---|
| 1355 | } | 
|---|
| 1356 |  | 
|---|
| 1357 | /* | 
|---|
| 1358 | * sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME | 
|---|
| 1359 | * | 
|---|
| 1360 | * Absolute nanosleeps for these clocks are time-namespace adjusted. | 
|---|
| 1361 | */ | 
|---|
| 1362 | static int common_nsleep_timens(const clockid_t which_clock, int flags, | 
|---|
| 1363 | const struct timespec64 *rqtp) | 
|---|
| 1364 | { | 
|---|
| 1365 | ktime_t texp = timespec64_to_ktime(ts: *rqtp); | 
|---|
| 1366 |  | 
|---|
| 1367 | if (flags & TIMER_ABSTIME) | 
|---|
| 1368 | texp = timens_ktime_to_host(clockid: which_clock, tim: texp); | 
|---|
| 1369 |  | 
|---|
| 1370 | return hrtimer_nanosleep(rqtp: texp, mode: flags & TIMER_ABSTIME ? | 
|---|
| 1371 | HRTIMER_MODE_ABS : HRTIMER_MODE_REL, | 
|---|
| 1372 | clockid: which_clock); | 
|---|
| 1373 | } | 
|---|
| 1374 |  | 
|---|
| 1375 | SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, | 
|---|
| 1376 | const struct __kernel_timespec __user *, rqtp, | 
|---|
| 1377 | struct __kernel_timespec __user *, rmtp) | 
|---|
| 1378 | { | 
|---|
| 1379 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1380 | struct timespec64 t; | 
|---|
| 1381 |  | 
|---|
| 1382 | if (!kc) | 
|---|
| 1383 | return -EINVAL; | 
|---|
| 1384 | if (!kc->nsleep) | 
|---|
| 1385 | return -EOPNOTSUPP; | 
|---|
| 1386 |  | 
|---|
| 1387 | if (get_timespec64(ts: &t, uts: rqtp)) | 
|---|
| 1388 | return -EFAULT; | 
|---|
| 1389 |  | 
|---|
| 1390 | if (!timespec64_valid(ts: &t)) | 
|---|
| 1391 | return -EINVAL; | 
|---|
| 1392 | if (flags & TIMER_ABSTIME) | 
|---|
| 1393 | rmtp = NULL; | 
|---|
| 1394 | current->restart_block.fn = do_no_restart_syscall; | 
|---|
| 1395 | current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; | 
|---|
| 1396 | current->restart_block.nanosleep.rmtp = rmtp; | 
|---|
| 1397 |  | 
|---|
| 1398 | return kc->nsleep(which_clock, flags, &t); | 
|---|
| 1399 | } | 
|---|
| 1400 |  | 
|---|
| 1401 | #ifdef CONFIG_COMPAT_32BIT_TIME | 
|---|
| 1402 |  | 
|---|
| 1403 | SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, | 
|---|
| 1404 | struct old_timespec32 __user *, rqtp, | 
|---|
| 1405 | struct old_timespec32 __user *, rmtp) | 
|---|
| 1406 | { | 
|---|
| 1407 | const struct k_clock *kc = clockid_to_kclock(id: which_clock); | 
|---|
| 1408 | struct timespec64 t; | 
|---|
| 1409 |  | 
|---|
| 1410 | if (!kc) | 
|---|
| 1411 | return -EINVAL; | 
|---|
| 1412 | if (!kc->nsleep) | 
|---|
| 1413 | return -EOPNOTSUPP; | 
|---|
| 1414 |  | 
|---|
| 1415 | if (get_old_timespec32(&t, rqtp)) | 
|---|
| 1416 | return -EFAULT; | 
|---|
| 1417 |  | 
|---|
| 1418 | if (!timespec64_valid(ts: &t)) | 
|---|
| 1419 | return -EINVAL; | 
|---|
| 1420 | if (flags & TIMER_ABSTIME) | 
|---|
| 1421 | rmtp = NULL; | 
|---|
| 1422 | current->restart_block.fn = do_no_restart_syscall; | 
|---|
| 1423 | current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; | 
|---|
| 1424 | current->restart_block.nanosleep.compat_rmtp = rmtp; | 
|---|
| 1425 |  | 
|---|
| 1426 | return kc->nsleep(which_clock, flags, &t); | 
|---|
| 1427 | } | 
|---|
| 1428 |  | 
|---|
| 1429 | #endif | 
|---|
| 1430 |  | 
|---|
| 1431 | static const struct k_clock clock_realtime = { | 
|---|
| 1432 | .clock_getres		= posix_get_hrtimer_res, | 
|---|
| 1433 | .clock_get_timespec	= posix_get_realtime_timespec, | 
|---|
| 1434 | .clock_get_ktime	= posix_get_realtime_ktime, | 
|---|
| 1435 | .clock_set		= posix_clock_realtime_set, | 
|---|
| 1436 | .clock_adj		= posix_clock_realtime_adj, | 
|---|
| 1437 | .nsleep			= common_nsleep, | 
|---|
| 1438 | .timer_create		= common_timer_create, | 
|---|
| 1439 | .timer_set		= common_timer_set, | 
|---|
| 1440 | .timer_get		= common_timer_get, | 
|---|
| 1441 | .timer_del		= common_timer_del, | 
|---|
| 1442 | .timer_rearm		= common_hrtimer_rearm, | 
|---|
| 1443 | .timer_forward		= common_hrtimer_forward, | 
|---|
| 1444 | .timer_remaining	= common_hrtimer_remaining, | 
|---|
| 1445 | .timer_try_to_cancel	= common_hrtimer_try_to_cancel, | 
|---|
| 1446 | .timer_wait_running	= common_timer_wait_running, | 
|---|
| 1447 | .timer_arm		= common_hrtimer_arm, | 
|---|
| 1448 | }; | 
|---|
| 1449 |  | 
|---|
| 1450 | static const struct k_clock clock_monotonic = { | 
|---|
| 1451 | .clock_getres		= posix_get_hrtimer_res, | 
|---|
| 1452 | .clock_get_timespec	= posix_get_monotonic_timespec, | 
|---|
| 1453 | .clock_get_ktime	= posix_get_monotonic_ktime, | 
|---|
| 1454 | .nsleep			= common_nsleep_timens, | 
|---|
| 1455 | .timer_create		= common_timer_create, | 
|---|
| 1456 | .timer_set		= common_timer_set, | 
|---|
| 1457 | .timer_get		= common_timer_get, | 
|---|
| 1458 | .timer_del		= common_timer_del, | 
|---|
| 1459 | .timer_rearm		= common_hrtimer_rearm, | 
|---|
| 1460 | .timer_forward		= common_hrtimer_forward, | 
|---|
| 1461 | .timer_remaining	= common_hrtimer_remaining, | 
|---|
| 1462 | .timer_try_to_cancel	= common_hrtimer_try_to_cancel, | 
|---|
| 1463 | .timer_wait_running	= common_timer_wait_running, | 
|---|
| 1464 | .timer_arm		= common_hrtimer_arm, | 
|---|
| 1465 | }; | 
|---|
| 1466 |  | 
|---|
| 1467 | static const struct k_clock clock_monotonic_raw = { | 
|---|
| 1468 | .clock_getres		= posix_get_hrtimer_res, | 
|---|
| 1469 | .clock_get_timespec	= posix_get_monotonic_raw, | 
|---|
| 1470 | }; | 
|---|
| 1471 |  | 
|---|
| 1472 | static const struct k_clock clock_realtime_coarse = { | 
|---|
| 1473 | .clock_getres		= posix_get_coarse_res, | 
|---|
| 1474 | .clock_get_timespec	= posix_get_realtime_coarse, | 
|---|
| 1475 | }; | 
|---|
| 1476 |  | 
|---|
| 1477 | static const struct k_clock clock_monotonic_coarse = { | 
|---|
| 1478 | .clock_getres		= posix_get_coarse_res, | 
|---|
| 1479 | .clock_get_timespec	= posix_get_monotonic_coarse, | 
|---|
| 1480 | }; | 
|---|
| 1481 |  | 
|---|
| 1482 | static const struct k_clock clock_tai = { | 
|---|
| 1483 | .clock_getres		= posix_get_hrtimer_res, | 
|---|
| 1484 | .clock_get_ktime	= posix_get_tai_ktime, | 
|---|
| 1485 | .clock_get_timespec	= posix_get_tai_timespec, | 
|---|
| 1486 | .nsleep			= common_nsleep, | 
|---|
| 1487 | .timer_create		= common_timer_create, | 
|---|
| 1488 | .timer_set		= common_timer_set, | 
|---|
| 1489 | .timer_get		= common_timer_get, | 
|---|
| 1490 | .timer_del		= common_timer_del, | 
|---|
| 1491 | .timer_rearm		= common_hrtimer_rearm, | 
|---|
| 1492 | .timer_forward		= common_hrtimer_forward, | 
|---|
| 1493 | .timer_remaining	= common_hrtimer_remaining, | 
|---|
| 1494 | .timer_try_to_cancel	= common_hrtimer_try_to_cancel, | 
|---|
| 1495 | .timer_wait_running	= common_timer_wait_running, | 
|---|
| 1496 | .timer_arm		= common_hrtimer_arm, | 
|---|
| 1497 | }; | 
|---|
| 1498 |  | 
|---|
| 1499 | static const struct k_clock clock_boottime = { | 
|---|
| 1500 | .clock_getres		= posix_get_hrtimer_res, | 
|---|
| 1501 | .clock_get_ktime	= posix_get_boottime_ktime, | 
|---|
| 1502 | .clock_get_timespec	= posix_get_boottime_timespec, | 
|---|
| 1503 | .nsleep			= common_nsleep_timens, | 
|---|
| 1504 | .timer_create		= common_timer_create, | 
|---|
| 1505 | .timer_set		= common_timer_set, | 
|---|
| 1506 | .timer_get		= common_timer_get, | 
|---|
| 1507 | .timer_del		= common_timer_del, | 
|---|
| 1508 | .timer_rearm		= common_hrtimer_rearm, | 
|---|
| 1509 | .timer_forward		= common_hrtimer_forward, | 
|---|
| 1510 | .timer_remaining	= common_hrtimer_remaining, | 
|---|
| 1511 | .timer_try_to_cancel	= common_hrtimer_try_to_cancel, | 
|---|
| 1512 | .timer_wait_running	= common_timer_wait_running, | 
|---|
| 1513 | .timer_arm		= common_hrtimer_arm, | 
|---|
| 1514 | }; | 
|---|
| 1515 |  | 
|---|
| 1516 | static const struct k_clock * const posix_clocks[] = { | 
|---|
| 1517 | [CLOCK_REALTIME]		= &clock_realtime, | 
|---|
| 1518 | [CLOCK_MONOTONIC]		= &clock_monotonic, | 
|---|
| 1519 | [CLOCK_PROCESS_CPUTIME_ID]	= &clock_process, | 
|---|
| 1520 | [CLOCK_THREAD_CPUTIME_ID]	= &clock_thread, | 
|---|
| 1521 | [CLOCK_MONOTONIC_RAW]		= &clock_monotonic_raw, | 
|---|
| 1522 | [CLOCK_REALTIME_COARSE]		= &clock_realtime_coarse, | 
|---|
| 1523 | [CLOCK_MONOTONIC_COARSE]	= &clock_monotonic_coarse, | 
|---|
| 1524 | [CLOCK_BOOTTIME]		= &clock_boottime, | 
|---|
| 1525 | [CLOCK_REALTIME_ALARM]		= &alarm_clock, | 
|---|
| 1526 | [CLOCK_BOOTTIME_ALARM]		= &alarm_clock, | 
|---|
| 1527 | [CLOCK_TAI]			= &clock_tai, | 
|---|
| 1528 | #ifdef CONFIG_POSIX_AUX_CLOCKS | 
|---|
| 1529 | [CLOCK_AUX ... CLOCK_AUX_LAST]	= &clock_aux, | 
|---|
| 1530 | #endif | 
|---|
| 1531 | }; | 
|---|
| 1532 |  | 
|---|
| 1533 | static const struct k_clock *clockid_to_kclock(const clockid_t id) | 
|---|
| 1534 | { | 
|---|
| 1535 | clockid_t idx = id; | 
|---|
| 1536 |  | 
|---|
| 1537 | if (id < 0) { | 
|---|
| 1538 | return (id & CLOCKFD_MASK) == CLOCKFD ? | 
|---|
| 1539 | &clock_posix_dynamic : &clock_posix_cpu; | 
|---|
| 1540 | } | 
|---|
| 1541 |  | 
|---|
| 1542 | if (id >= ARRAY_SIZE(posix_clocks)) | 
|---|
| 1543 | return NULL; | 
|---|
| 1544 |  | 
|---|
| 1545 | return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))]; | 
|---|
| 1546 | } | 
|---|
| 1547 |  | 
|---|
| 1548 | static int __init posixtimer_init(void) | 
|---|
| 1549 | { | 
|---|
| 1550 | unsigned long i, size; | 
|---|
| 1551 | unsigned int shift; | 
|---|
| 1552 |  | 
|---|
| 1553 | posix_timers_cache = kmem_cache_create( "posix_timers_cache", | 
|---|
| 1554 | sizeof(struct k_itimer), | 
|---|
| 1555 | __alignof__(struct k_itimer), | 
|---|
| 1556 | SLAB_ACCOUNT, NULL); | 
|---|
| 1557 |  | 
|---|
| 1558 | if (IS_ENABLED(CONFIG_BASE_SMALL)) | 
|---|
| 1559 | size = 512; | 
|---|
| 1560 | else | 
|---|
| 1561 | size = roundup_pow_of_two(512 * num_possible_cpus()); | 
|---|
| 1562 |  | 
|---|
| 1563 | timer_buckets = alloc_large_system_hash(tablename: "posixtimers", bucketsize: sizeof(*timer_buckets), | 
|---|
| 1564 | numentries: size, scale: 0, flags: 0, hash_shift: &shift, NULL, low_limit: size, high_limit: size); | 
|---|
| 1565 | size = 1UL << shift; | 
|---|
| 1566 | timer_hashmask = size - 1; | 
|---|
| 1567 |  | 
|---|
| 1568 | for (i = 0; i < size; i++) { | 
|---|
| 1569 | spin_lock_init(&timer_buckets[i].lock); | 
|---|
| 1570 | INIT_HLIST_HEAD(&timer_buckets[i].head); | 
|---|
| 1571 | } | 
|---|
| 1572 | return 0; | 
|---|
| 1573 | } | 
|---|
| 1574 | core_initcall(posixtimer_init); | 
|---|
| 1575 |  | 
|---|