1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PTP 1588 clock support
4 *
5 * Copyright (C) 2010 OMICRON electronics GmbH
6 */
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/posix-clock.h>
13#include <linux/pps_kernel.h>
14#include <linux/property.h>
15#include <linux/slab.h>
16#include <linux/syscalls.h>
17#include <linux/uaccess.h>
18#include <linux/debugfs.h>
19#include <linux/xarray.h>
20#include <uapi/linux/sched/types.h>
21
22#include "ptp_private.h"
23
24#define PTP_MAX_ALARMS 4
25#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
26#define PTP_PPS_EVENT PPS_CAPTUREASSERT
27#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
28
29const struct class ptp_class = {
30 .name = "ptp",
31 .dev_groups = ptp_groups
32};
33
34/* private globals */
35
36static dev_t ptp_devt;
37
38static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
39
40/* time stamp event queue operations */
41
42static inline int queue_free(struct timestamp_event_queue *q)
43{
44 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
45}
46
47static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
48 struct ptp_clock_event *src)
49{
50 struct ptp_extts_event *dst;
51 struct timespec64 offset_ts;
52 unsigned long flags;
53 s64 seconds;
54 u32 remainder;
55
56 if (src->type == PTP_CLOCK_EXTTS) {
57 seconds = div_u64_rem(dividend: src->timestamp, divisor: 1000000000, remainder: &remainder);
58 } else if (src->type == PTP_CLOCK_EXTOFF) {
59 offset_ts = ns_to_timespec64(nsec: src->offset);
60 seconds = offset_ts.tv_sec;
61 remainder = offset_ts.tv_nsec;
62 } else {
63 WARN(1, "%s: unknown type %d\n", __func__, src->type);
64 return;
65 }
66
67 spin_lock_irqsave(&queue->lock, flags);
68
69 dst = &queue->buf[queue->tail];
70 dst->index = src->index;
71 dst->flags = PTP_EXTTS_EVENT_VALID;
72 dst->t.sec = seconds;
73 dst->t.nsec = remainder;
74 if (src->type == PTP_CLOCK_EXTOFF)
75 dst->flags |= PTP_EXT_OFFSET;
76
77 /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
78 if (!queue_free(q: queue))
79 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
80
81 WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
82
83 spin_unlock_irqrestore(lock: &queue->lock, flags);
84}
85
86/* posix clock implementation */
87
88static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
89{
90 tp->tv_sec = 0;
91 tp->tv_nsec = 1;
92 return 0;
93}
94
95static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
96{
97 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
98
99 if (ptp_clock_freerun(ptp)) {
100 pr_err_ratelimited("ptp: physical clock is free running\n");
101 return -EBUSY;
102 }
103
104 if (!timespec64_valid_settod(ts: tp))
105 return -EINVAL;
106
107 return ptp->info->settime64(ptp->info, tp);
108}
109
110static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
111{
112 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
113 int err;
114
115 if (ptp->info->gettimex64)
116 err = ptp->info->gettimex64(ptp->info, tp, NULL);
117 else
118 err = ptp->info->gettime64(ptp->info, tp);
119 return err;
120}
121
122static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
123{
124 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
125 struct ptp_clock_info *ops;
126 int err = -EOPNOTSUPP;
127
128 if (tx->modes & (ADJ_SETOFFSET | ADJ_FREQUENCY | ADJ_OFFSET) &&
129 ptp_clock_freerun(ptp)) {
130 pr_err("ptp: physical clock is free running\n");
131 return -EBUSY;
132 }
133
134 ops = ptp->info;
135
136 if (tx->modes & ADJ_SETOFFSET) {
137 struct timespec64 ts, ts2;
138 ktime_t kt;
139 s64 delta;
140
141 ts.tv_sec = tx->time.tv_sec;
142 ts.tv_nsec = tx->time.tv_usec;
143
144 if (!(tx->modes & ADJ_NANO))
145 ts.tv_nsec *= 1000;
146
147 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
148 return -EINVAL;
149
150 /* Make sure the offset is valid */
151 err = ptp_clock_gettime(pc, tp: &ts2);
152 if (err)
153 return err;
154 ts2 = timespec64_add(lhs: ts2, rhs: ts);
155 if (!timespec64_valid_settod(ts: &ts2))
156 return -EINVAL;
157
158 kt = timespec64_to_ktime(ts);
159 delta = ktime_to_ns(kt);
160 err = ops->adjtime(ops, delta);
161 } else if (tx->modes & ADJ_FREQUENCY) {
162 long ppb = scaled_ppm_to_ppb(ppm: tx->freq);
163 if (ppb > ops->max_adj || ppb < -ops->max_adj)
164 return -ERANGE;
165 err = ops->adjfine(ops, tx->freq);
166 if (!err)
167 ptp->dialed_frequency = tx->freq;
168 } else if (tx->modes & ADJ_OFFSET) {
169 if (ops->adjphase) {
170 s32 max_phase_adj = ops->getmaxphase(ops);
171 s32 offset = tx->offset;
172
173 if (!(tx->modes & ADJ_NANO))
174 offset *= NSEC_PER_USEC;
175
176 if (offset > max_phase_adj || offset < -max_phase_adj)
177 return -ERANGE;
178
179 err = ops->adjphase(ops, offset);
180 }
181 } else if (tx->modes == 0) {
182 tx->freq = ptp->dialed_frequency;
183 err = 0;
184 }
185
186 return err;
187}
188
189static struct posix_clock_operations ptp_clock_ops = {
190 .owner = THIS_MODULE,
191 .clock_adjtime = ptp_clock_adjtime,
192 .clock_gettime = ptp_clock_gettime,
193 .clock_getres = ptp_clock_getres,
194 .clock_settime = ptp_clock_settime,
195 .ioctl = ptp_ioctl,
196 .open = ptp_open,
197 .release = ptp_release,
198 .poll = ptp_poll,
199 .read = ptp_read,
200};
201
202static void ptp_clock_release(struct device *dev)
203{
204 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
205 struct timestamp_event_queue *tsevq;
206 unsigned long flags;
207
208 ptp_cleanup_pin_groups(ptp);
209 kfree(objp: ptp->vclock_index);
210 mutex_destroy(lock: &ptp->pincfg_mux);
211 mutex_destroy(lock: &ptp->n_vclocks_mux);
212 /* Delete first entry */
213 spin_lock_irqsave(&ptp->tsevqs_lock, flags);
214 tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
215 qlist);
216 list_del(entry: &tsevq->qlist);
217 spin_unlock_irqrestore(lock: &ptp->tsevqs_lock, flags);
218 bitmap_free(bitmap: tsevq->mask);
219 kfree(objp: tsevq);
220 debugfs_remove(dentry: ptp->debugfs_root);
221 xa_erase(&ptp_clocks_map, index: ptp->index);
222 kfree(objp: ptp);
223}
224
225static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
226{
227 if (info->getcyclesx64)
228 return info->getcyclesx64(info, ts, NULL);
229 else
230 return info->gettime64(info, ts);
231}
232
233static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on)
234{
235 return -EOPNOTSUPP;
236}
237
238static void ptp_aux_kworker(struct kthread_work *work)
239{
240 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
241 aux_work.work);
242 struct ptp_clock_info *info = ptp->info;
243 long delay;
244
245 delay = info->do_aux_work(info);
246
247 if (delay >= 0)
248 kthread_queue_delayed_work(worker: ptp->kworker, dwork: &ptp->aux_work, delay);
249}
250
251static ssize_t ptp_n_perout_loopback_read(struct file *filep,
252 char __user *buffer,
253 size_t count, loff_t *pos)
254{
255 struct ptp_clock *ptp = filep->private_data;
256 char buf[12] = {};
257
258 snprintf(buf, size: sizeof(buf), fmt: "%d\n", ptp->info->n_per_lp);
259
260 return simple_read_from_buffer(to: buffer, count, ppos: pos, from: buf, available: strlen(buf));
261}
262
263static const struct file_operations ptp_n_perout_loopback_fops = {
264 .owner = THIS_MODULE,
265 .open = simple_open,
266 .read = ptp_n_perout_loopback_read,
267};
268
269static ssize_t ptp_perout_loopback_write(struct file *filep,
270 const char __user *buffer,
271 size_t count, loff_t *ppos)
272{
273 struct ptp_clock *ptp = filep->private_data;
274 struct ptp_clock_info *ops = ptp->info;
275 unsigned int index, enable;
276 int len, cnt, err;
277 char buf[32] = {};
278
279 if (*ppos || !count)
280 return -EINVAL;
281
282 if (count >= sizeof(buf))
283 return -ENOSPC;
284
285 len = simple_write_to_buffer(to: buf, available: sizeof(buf) - 1,
286 ppos, from: buffer, count);
287 if (len < 0)
288 return len;
289
290 buf[len] = '\0';
291 cnt = sscanf(buf, "%u %u", &index, &enable);
292 if (cnt != 2)
293 return -EINVAL;
294
295 if (index >= ops->n_per_lp)
296 return -EINVAL;
297
298 if (enable != 0 && enable != 1)
299 return -EINVAL;
300
301 err = ops->perout_loopback(ops, index, enable);
302 if (err)
303 return err;
304
305 return count;
306}
307
308static const struct file_operations ptp_perout_loopback_ops = {
309 .owner = THIS_MODULE,
310 .open = simple_open,
311 .write = ptp_perout_loopback_write,
312};
313
314/* public interface */
315
316struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
317 struct device *parent)
318{
319 struct ptp_clock *ptp;
320 struct timestamp_event_queue *queue = NULL;
321 int err, index, major = MAJOR(ptp_devt);
322 char debugfsname[16];
323 size_t size;
324
325 if (info->n_alarm > PTP_MAX_ALARMS)
326 return ERR_PTR(error: -EINVAL);
327
328 /* Initialize a clock structure. */
329 ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL);
330 if (!ptp) {
331 err = -ENOMEM;
332 goto no_memory;
333 }
334
335 err = xa_alloc(xa: &ptp_clocks_map, id: &index, entry: ptp, xa_limit_31b,
336 GFP_KERNEL);
337 if (err)
338 goto no_slot;
339
340 ptp->clock.ops = ptp_clock_ops;
341 ptp->info = info;
342 ptp->devid = MKDEV(major, index);
343 ptp->index = index;
344 INIT_LIST_HEAD(list: &ptp->tsevqs);
345 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
346 if (!queue) {
347 err = -ENOMEM;
348 goto no_memory_queue;
349 }
350 list_add_tail(new: &queue->qlist, head: &ptp->tsevqs);
351 spin_lock_init(&ptp->tsevqs_lock);
352 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
353 if (!queue->mask) {
354 err = -ENOMEM;
355 goto no_memory_bitmap;
356 }
357 bitmap_set(map: queue->mask, start: 0, PTP_MAX_CHANNELS);
358 spin_lock_init(&queue->lock);
359 mutex_init(&ptp->pincfg_mux);
360 mutex_init(&ptp->n_vclocks_mux);
361 init_waitqueue_head(&ptp->tsev_wq);
362
363 if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
364 ptp->has_cycles = true;
365 if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
366 ptp->info->getcycles64 = ptp_getcycles64;
367 } else {
368 /* Free running cycle counter not supported, use time. */
369 ptp->info->getcycles64 = ptp_getcycles64;
370
371 if (ptp->info->gettimex64)
372 ptp->info->getcyclesx64 = ptp->info->gettimex64;
373
374 if (ptp->info->getcrosststamp)
375 ptp->info->getcrosscycles = ptp->info->getcrosststamp;
376 }
377
378 if (!ptp->info->enable)
379 ptp->info->enable = ptp_enable;
380
381 if (ptp->info->do_aux_work) {
382 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
383 ptp->kworker = kthread_run_worker(0, "ptp%d", ptp->index);
384 if (IS_ERR(ptr: ptp->kworker)) {
385 err = PTR_ERR(ptr: ptp->kworker);
386 pr_err("failed to create ptp aux_worker %d\n", err);
387 goto kworker_err;
388 }
389 }
390
391 /* PTP virtual clock is being registered under physical clock */
392 if (parent && parent->class && parent->class->name &&
393 strcmp(parent->class->name, "ptp") == 0)
394 ptp->is_virtual_clock = true;
395
396 if (!ptp->is_virtual_clock) {
397 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
398
399 size = sizeof(int) * ptp->max_vclocks;
400 ptp->vclock_index = kzalloc(size, GFP_KERNEL);
401 if (!ptp->vclock_index) {
402 err = -ENOMEM;
403 goto no_mem_for_vclocks;
404 }
405 }
406
407 err = ptp_populate_pin_groups(ptp);
408 if (err)
409 goto no_pin_groups;
410
411 /* Register a new PPS source. */
412 if (info->pps) {
413 struct pps_source_info pps;
414 memset(s: &pps, c: 0, n: sizeof(pps));
415 snprintf(buf: pps.name, PPS_MAX_NAME_LEN, fmt: "ptp%d", index);
416 pps.mode = PTP_PPS_MODE;
417 pps.owner = info->owner;
418 ptp->pps_source = pps_register_source(info: &pps, PTP_PPS_DEFAULTS);
419 if (IS_ERR(ptr: ptp->pps_source)) {
420 err = PTR_ERR(ptr: ptp->pps_source);
421 pr_err("failed to register pps source\n");
422 goto no_pps;
423 }
424 ptp->pps_source->lookup_cookie = ptp;
425 }
426
427 /* Initialize a new device of our class in our clock structure. */
428 device_initialize(dev: &ptp->dev);
429 ptp->dev.devt = ptp->devid;
430 ptp->dev.class = &ptp_class;
431 ptp->dev.parent = parent;
432 ptp->dev.groups = ptp->pin_attr_groups;
433 ptp->dev.release = ptp_clock_release;
434 dev_set_drvdata(dev: &ptp->dev, data: ptp);
435 dev_set_name(dev: &ptp->dev, name: "ptp%d", ptp->index);
436
437 /* Create a posix clock and link it to the device. */
438 err = posix_clock_register(clk: &ptp->clock, dev: &ptp->dev);
439 if (err) {
440 if (ptp->pps_source)
441 pps_unregister_source(pps: ptp->pps_source);
442
443 if (ptp->kworker)
444 kthread_destroy_worker(worker: ptp->kworker);
445
446 put_device(dev: &ptp->dev);
447
448 pr_err("failed to create posix clock\n");
449 return ERR_PTR(error: err);
450 }
451
452 /* Debugfs initialization */
453 snprintf(buf: debugfsname, size: sizeof(debugfsname), fmt: "ptp%d", ptp->index);
454 ptp->debugfs_root = debugfs_create_dir(name: debugfsname, NULL);
455 if (info->n_per_lp > 0 && info->perout_loopback) {
456 debugfs_create_file("n_perout_loopback", 0400, ptp->debugfs_root,
457 ptp, &ptp_n_perout_loopback_fops);
458 debugfs_create_file("perout_loopback", 0200, ptp->debugfs_root,
459 ptp, &ptp_perout_loopback_ops);
460 }
461
462 return ptp;
463
464no_pps:
465 ptp_cleanup_pin_groups(ptp);
466no_pin_groups:
467 kfree(objp: ptp->vclock_index);
468no_mem_for_vclocks:
469 if (ptp->kworker)
470 kthread_destroy_worker(worker: ptp->kworker);
471kworker_err:
472 mutex_destroy(lock: &ptp->pincfg_mux);
473 mutex_destroy(lock: &ptp->n_vclocks_mux);
474 bitmap_free(bitmap: queue->mask);
475no_memory_bitmap:
476 list_del(entry: &queue->qlist);
477 kfree(objp: queue);
478no_memory_queue:
479 xa_erase(&ptp_clocks_map, index);
480no_slot:
481 kfree(objp: ptp);
482no_memory:
483 return ERR_PTR(error: err);
484}
485EXPORT_SYMBOL(ptp_clock_register);
486
487static int unregister_vclock(struct device *dev, void *data)
488{
489 struct ptp_clock *ptp = dev_get_drvdata(dev);
490
491 ptp_vclock_unregister(info_to_vclock(ptp->info));
492 return 0;
493}
494
495int ptp_clock_unregister(struct ptp_clock *ptp)
496{
497 if (ptp_vclock_in_use(ptp)) {
498 device_for_each_child(parent: &ptp->dev, NULL, fn: unregister_vclock);
499 }
500
501 /* Get the device to stop posix_clock_unregister() doing the last put
502 * and freeing the structure(s)
503 */
504 get_device(dev: &ptp->dev);
505
506 /* Wake up any userspace waiting for an event. */
507 ptp->defunct = 1;
508 wake_up_interruptible(&ptp->tsev_wq);
509
510 /* Tear down the POSIX clock, which removes the user interface. */
511 posix_clock_unregister(clk: &ptp->clock);
512
513 /* Disable all sources of event generation. */
514 ptp_disable_all_events(ptp);
515
516 if (ptp->kworker) {
517 kthread_cancel_delayed_work_sync(work: &ptp->aux_work);
518 kthread_destroy_worker(worker: ptp->kworker);
519 }
520
521 /* Release the clock's resources. */
522 if (ptp->pps_source)
523 pps_unregister_source(pps: ptp->pps_source);
524
525 /* The final put, normally here, will invoke ptp_clock_release(). */
526 put_device(dev: &ptp->dev);
527
528 return 0;
529}
530EXPORT_SYMBOL(ptp_clock_unregister);
531
532void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
533{
534 struct timestamp_event_queue *tsevq;
535 struct pps_event_time evt;
536 unsigned long flags;
537
538 switch (event->type) {
539
540 case PTP_CLOCK_ALARM:
541 break;
542
543 case PTP_CLOCK_EXTTS:
544 case PTP_CLOCK_EXTOFF:
545 /* Enqueue timestamp on selected queues */
546 spin_lock_irqsave(&ptp->tsevqs_lock, flags);
547 list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
548 if (test_bit((unsigned int)event->index, tsevq->mask))
549 enqueue_external_timestamp(queue: tsevq, src: event);
550 }
551 spin_unlock_irqrestore(lock: &ptp->tsevqs_lock, flags);
552 wake_up_interruptible(&ptp->tsev_wq);
553 break;
554
555 case PTP_CLOCK_PPS:
556 pps_get_ts(ts: &evt);
557 pps_event(pps: ptp->pps_source, ts: &evt, PTP_PPS_EVENT, NULL);
558 break;
559
560 case PTP_CLOCK_PPSUSR:
561 pps_event(pps: ptp->pps_source, ts: &event->pps_times,
562 PTP_PPS_EVENT, NULL);
563 break;
564 }
565}
566EXPORT_SYMBOL(ptp_clock_event);
567
568int ptp_clock_index(struct ptp_clock *ptp)
569{
570 return ptp->index;
571}
572EXPORT_SYMBOL(ptp_clock_index);
573
574static int ptp_clock_of_node_match(struct device *dev, const void *data)
575{
576 const struct device_node *parent_np = data;
577
578 return (dev->parent && dev_of_node(dev: dev->parent) == parent_np);
579}
580
581int ptp_clock_index_by_of_node(struct device_node *np)
582{
583 struct ptp_clock *ptp;
584 struct device *dev;
585 int phc_index;
586
587 dev = class_find_device(class: &ptp_class, NULL, data: np,
588 match: ptp_clock_of_node_match);
589 if (!dev)
590 return -1;
591
592 ptp = dev_get_drvdata(dev);
593 phc_index = ptp_clock_index(ptp);
594 put_device(dev);
595
596 return phc_index;
597}
598EXPORT_SYMBOL_GPL(ptp_clock_index_by_of_node);
599
600static int ptp_clock_dev_match(struct device *dev, const void *data)
601{
602 const struct device *parent = data;
603
604 return dev->parent == parent;
605}
606
607int ptp_clock_index_by_dev(struct device *parent)
608{
609 struct ptp_clock *ptp;
610 struct device *dev;
611 int phc_index;
612
613 dev = class_find_device(class: &ptp_class, NULL, data: parent,
614 match: ptp_clock_dev_match);
615 if (!dev)
616 return -1;
617
618 ptp = dev_get_drvdata(dev);
619 phc_index = ptp_clock_index(ptp);
620 put_device(dev);
621
622 return phc_index;
623}
624EXPORT_SYMBOL_GPL(ptp_clock_index_by_dev);
625
626int ptp_find_pin(struct ptp_clock *ptp,
627 enum ptp_pin_function func, unsigned int chan)
628{
629 struct ptp_pin_desc *pin = NULL;
630 int i;
631
632 for (i = 0; i < ptp->info->n_pins; i++) {
633 if (ptp->info->pin_config[i].func == func &&
634 ptp->info->pin_config[i].chan == chan) {
635 pin = &ptp->info->pin_config[i];
636 break;
637 }
638 }
639
640 return pin ? i : -1;
641}
642EXPORT_SYMBOL(ptp_find_pin);
643
644int ptp_find_pin_unlocked(struct ptp_clock *ptp,
645 enum ptp_pin_function func, unsigned int chan)
646{
647 int result;
648
649 mutex_lock(lock: &ptp->pincfg_mux);
650
651 result = ptp_find_pin(ptp, func, chan);
652
653 mutex_unlock(lock: &ptp->pincfg_mux);
654
655 return result;
656}
657EXPORT_SYMBOL(ptp_find_pin_unlocked);
658
659int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
660{
661 return kthread_mod_delayed_work(worker: ptp->kworker, dwork: &ptp->aux_work, delay);
662}
663EXPORT_SYMBOL(ptp_schedule_worker);
664
665void ptp_cancel_worker_sync(struct ptp_clock *ptp)
666{
667 kthread_cancel_delayed_work_sync(work: &ptp->aux_work);
668}
669EXPORT_SYMBOL(ptp_cancel_worker_sync);
670
671/* module operations */
672
673static void __exit ptp_exit(void)
674{
675 class_unregister(class: &ptp_class);
676 unregister_chrdev_region(ptp_devt, MINORMASK + 1);
677 xa_destroy(&ptp_clocks_map);
678}
679
680static int __init ptp_init(void)
681{
682 int err;
683
684 err = class_register(class: &ptp_class);
685 if (err) {
686 pr_err("ptp: failed to allocate class\n");
687 return err;
688 }
689
690 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
691 if (err < 0) {
692 pr_err("ptp: failed to allocate device region\n");
693 goto no_region;
694 }
695
696 pr_info("PTP clock support registered\n");
697 return 0;
698
699no_region:
700 class_unregister(class: &ptp_class);
701 return err;
702}
703
704subsys_initcall(ptp_init);
705module_exit(ptp_exit);
706
707MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
708MODULE_DESCRIPTION("PTP clocks support");
709MODULE_LICENSE("GPL");
710