1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Authors: Waiman Long <longman@redhat.com>
14 */
15
16#ifndef LOCK_EVENT
17#define LOCK_EVENT(name) LOCKEVENT_ ## name,
18#endif
19
20#ifdef CONFIG_QUEUED_SPINLOCKS
21#ifdef CONFIG_PARAVIRT_SPINLOCKS
22/*
23 * Locking events for PV qspinlock.
24 */
25LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */
26LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */
27LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */
28LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */
29LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */
30LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */
31LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */
32LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */
33LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */
34LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */
35LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */
36#endif /* CONFIG_PARAVIRT_SPINLOCKS */
37
38/*
39 * Locking events for qspinlock
40 *
41 * Subtracting lock_use_node[234] from lock_slowpath will give you
42 * lock_use_node1.
43 */
44LOCK_EVENT(lock_pending) /* # of locking ops via pending code */
45LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */
46LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */
47LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */
48LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */
49LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */
50#endif /* CONFIG_QUEUED_SPINLOCKS */
51
52/*
53 * Locking events for Resilient Queued Spin Lock
54 */
55LOCK_EVENT(rqspinlock_lock_timeout) /* # of locking ops that timeout */
56
57/*
58 * Locking events for rwsem
59 */
60LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */
61LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */
62LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */
63LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */
64LOCK_EVENT(rwsem_opt_lock) /* # of opt-acquired write locks */
65LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */
66LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */
67LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
68LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */
69LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
70LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
71LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
72LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
73LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
74LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */
75
76/*
77 * Locking events for rtlock_slowlock()
78 */
79LOCK_EVENT(rtlock_slowlock) /* # of rtlock_slowlock() calls */
80LOCK_EVENT(rtlock_slow_acq1) /* # of locks acquired after wait_lock */
81LOCK_EVENT(rtlock_slow_acq2) /* # of locks acquired in for loop */
82LOCK_EVENT(rtlock_slow_sleep) /* # of sleeps */
83LOCK_EVENT(rtlock_slow_wake) /* # of wakeup's */
84
85/*
86 * Locking events for rt_mutex_slowlock()
87 */
88LOCK_EVENT(rtmutex_slowlock) /* # of rt_mutex_slowlock() calls */
89LOCK_EVENT(rtmutex_slow_block) /* # of rt_mutex_slowlock_block() calls */
90LOCK_EVENT(rtmutex_slow_acq1) /* # of locks acquired after wait_lock */
91LOCK_EVENT(rtmutex_slow_acq2) /* # of locks acquired at the end */
92LOCK_EVENT(rtmutex_slow_acq3) /* # of locks acquired in *block() */
93LOCK_EVENT(rtmutex_slow_sleep) /* # of sleeps */
94LOCK_EVENT(rtmutex_slow_wake) /* # of wakeup's */
95LOCK_EVENT(rtmutex_deadlock) /* # of rt_mutex_handle_deadlock()'s */
96
97/*
98 * Locking events for lockdep
99 */
100LOCK_EVENT(lockdep_acquire)
101LOCK_EVENT(lockdep_lock)
102LOCK_EVENT(lockdep_nocheck)
103