| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | /* | 
|---|
| 3 | * This program is free software; you can redistribute it and/or modify | 
|---|
| 4 | * it under the terms of the GNU General Public License as published by | 
|---|
| 5 | * the Free Software Foundation; either version 2 of the License, or | 
|---|
| 6 | * (at your option) any later version. | 
|---|
| 7 | * | 
|---|
| 8 | * This program is distributed in the hope that it will be useful, | 
|---|
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|---|
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|---|
| 11 | * GNU General Public License for more details. | 
|---|
| 12 | * | 
|---|
| 13 | * Authors: Waiman Long <longman@redhat.com> | 
|---|
| 14 | */ | 
|---|
| 15 |  | 
|---|
| 16 | #ifndef LOCK_EVENT | 
|---|
| 17 | #define LOCK_EVENT(name)	LOCKEVENT_ ## name, | 
|---|
| 18 | #endif | 
|---|
| 19 |  | 
|---|
| 20 | #ifdef CONFIG_QUEUED_SPINLOCKS | 
|---|
| 21 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | 
|---|
| 22 | /* | 
|---|
| 23 | * Locking events for PV qspinlock. | 
|---|
| 24 | */ | 
|---|
| 25 | LOCK_EVENT(pv_hash_hops)	/* Average # of hops per hashing operation */ | 
|---|
| 26 | LOCK_EVENT(pv_kick_unlock)	/* # of vCPU kicks issued at unlock time   */ | 
|---|
| 27 | LOCK_EVENT(pv_kick_wake)	/* # of vCPU kicks for pv_latency_wake	   */ | 
|---|
| 28 | LOCK_EVENT(pv_latency_kick)	/* Average latency (ns) of vCPU kick	   */ | 
|---|
| 29 | LOCK_EVENT(pv_latency_wake)	/* Average latency (ns) of kick-to-wakeup  */ | 
|---|
| 30 | LOCK_EVENT(pv_lock_stealing)	/* # of lock stealing operations	   */ | 
|---|
| 31 | LOCK_EVENT(pv_spurious_wakeup)	/* # of spurious wakeups in non-head vCPUs */ | 
|---|
| 32 | LOCK_EVENT(pv_wait_again)	/* # of wait's after queue head vCPU kick  */ | 
|---|
| 33 | LOCK_EVENT(pv_wait_early)	/* # of early vCPU wait's		   */ | 
|---|
| 34 | LOCK_EVENT(pv_wait_head)	/* # of vCPU wait's at the queue head	   */ | 
|---|
| 35 | LOCK_EVENT(pv_wait_node)	/* # of vCPU wait's at non-head queue node */ | 
|---|
| 36 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ | 
|---|
| 37 |  | 
|---|
| 38 | /* | 
|---|
| 39 | * Locking events for qspinlock | 
|---|
| 40 | * | 
|---|
| 41 | * Subtracting lock_use_node[234] from lock_slowpath will give you | 
|---|
| 42 | * lock_use_node1. | 
|---|
| 43 | */ | 
|---|
| 44 | LOCK_EVENT(lock_pending)	/* # of locking ops via pending code	     */ | 
|---|
| 45 | LOCK_EVENT(lock_slowpath)	/* # of locking ops via MCS lock queue	     */ | 
|---|
| 46 | LOCK_EVENT(lock_use_node2)	/* # of locking ops that use 2nd percpu node */ | 
|---|
| 47 | LOCK_EVENT(lock_use_node3)	/* # of locking ops that use 3rd percpu node */ | 
|---|
| 48 | LOCK_EVENT(lock_use_node4)	/* # of locking ops that use 4th percpu node */ | 
|---|
| 49 | LOCK_EVENT(lock_no_node)	/* # of locking ops w/o using percpu node    */ | 
|---|
| 50 | #endif /* CONFIG_QUEUED_SPINLOCKS */ | 
|---|
| 51 |  | 
|---|
| 52 | /* | 
|---|
| 53 | * Locking events for Resilient Queued Spin Lock | 
|---|
| 54 | */ | 
|---|
| 55 | LOCK_EVENT(rqspinlock_lock_timeout)	/* # of locking ops that timeout	*/ | 
|---|
| 56 |  | 
|---|
| 57 | /* | 
|---|
| 58 | * Locking events for rwsem | 
|---|
| 59 | */ | 
|---|
| 60 | LOCK_EVENT(rwsem_sleep_reader)	/* # of reader sleeps			*/ | 
|---|
| 61 | LOCK_EVENT(rwsem_sleep_writer)	/* # of writer sleeps			*/ | 
|---|
| 62 | LOCK_EVENT(rwsem_wake_reader)	/* # of reader wakeups			*/ | 
|---|
| 63 | LOCK_EVENT(rwsem_wake_writer)	/* # of writer wakeups			*/ | 
|---|
| 64 | LOCK_EVENT(rwsem_opt_lock)	/* # of opt-acquired write locks	*/ | 
|---|
| 65 | LOCK_EVENT(rwsem_opt_fail)	/* # of failed optspins			*/ | 
|---|
| 66 | LOCK_EVENT(rwsem_opt_nospin)	/* # of disabled optspins		*/ | 
|---|
| 67 | LOCK_EVENT(rwsem_rlock)		/* # of read locks acquired		*/ | 
|---|
| 68 | LOCK_EVENT(rwsem_rlock_steal)	/* # of read locks by lock stealing	*/ | 
|---|
| 69 | LOCK_EVENT(rwsem_rlock_fast)	/* # of fast read locks acquired	*/ | 
|---|
| 70 | LOCK_EVENT(rwsem_rlock_fail)	/* # of failed read lock acquisitions	*/ | 
|---|
| 71 | LOCK_EVENT(rwsem_rlock_handoff)	/* # of read lock handoffs		*/ | 
|---|
| 72 | LOCK_EVENT(rwsem_wlock)		/* # of write locks acquired		*/ | 
|---|
| 73 | LOCK_EVENT(rwsem_wlock_fail)	/* # of failed write lock acquisitions	*/ | 
|---|
| 74 | LOCK_EVENT(rwsem_wlock_handoff)	/* # of write lock handoffs		*/ | 
|---|
| 75 |  | 
|---|
| 76 | /* | 
|---|
| 77 | * Locking events for rtlock_slowlock() | 
|---|
| 78 | */ | 
|---|
| 79 | LOCK_EVENT(rtlock_slowlock)	/* # of rtlock_slowlock() calls		*/ | 
|---|
| 80 | LOCK_EVENT(rtlock_slow_acq1)	/* # of locks acquired after wait_lock	*/ | 
|---|
| 81 | LOCK_EVENT(rtlock_slow_acq2)	/* # of locks acquired in for loop	*/ | 
|---|
| 82 | LOCK_EVENT(rtlock_slow_sleep)	/* # of sleeps				*/ | 
|---|
| 83 | LOCK_EVENT(rtlock_slow_wake)	/* # of wakeup's			*/ | 
|---|
| 84 |  | 
|---|
| 85 | /* | 
|---|
| 86 | * Locking events for rt_mutex_slowlock() | 
|---|
| 87 | */ | 
|---|
| 88 | LOCK_EVENT(rtmutex_slowlock)	/* # of rt_mutex_slowlock() calls	*/ | 
|---|
| 89 | LOCK_EVENT(rtmutex_slow_block)	/* # of rt_mutex_slowlock_block() calls	*/ | 
|---|
| 90 | LOCK_EVENT(rtmutex_slow_acq1)	/* # of locks acquired after wait_lock	*/ | 
|---|
| 91 | LOCK_EVENT(rtmutex_slow_acq2)	/* # of locks acquired at the end	*/ | 
|---|
| 92 | LOCK_EVENT(rtmutex_slow_acq3)	/* # of locks acquired in *block()	*/ | 
|---|
| 93 | LOCK_EVENT(rtmutex_slow_sleep)	/* # of sleeps				*/ | 
|---|
| 94 | LOCK_EVENT(rtmutex_slow_wake)	/* # of wakeup's			*/ | 
|---|
| 95 | LOCK_EVENT(rtmutex_deadlock)	/* # of rt_mutex_handle_deadlock()'s	*/ | 
|---|
| 96 |  | 
|---|
| 97 | /* | 
|---|
| 98 | * Locking events for lockdep | 
|---|
| 99 | */ | 
|---|
| 100 | LOCK_EVENT(lockdep_acquire) | 
|---|
| 101 | LOCK_EVENT(lockdep_lock) | 
|---|
| 102 | LOCK_EVENT(lockdep_nocheck) | 
|---|
| 103 |  | 
|---|