| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _LINUX_KERNEL_STAT_H | 
|---|
| 3 | #define _LINUX_KERNEL_STAT_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/smp.h> | 
|---|
| 6 | #include <linux/threads.h> | 
|---|
| 7 | #include <linux/percpu.h> | 
|---|
| 8 | #include <linux/interrupt.h> | 
|---|
| 9 | #include <linux/sched.h> | 
|---|
| 10 | #include <linux/vtime.h> | 
|---|
| 11 | #include <asm/irq.h> | 
|---|
| 12 |  | 
|---|
| 13 | /* | 
|---|
| 14 | * 'kernel_stat.h' contains the definitions needed for doing | 
|---|
| 15 | * some kernel statistics (CPU usage, context switches ...), | 
|---|
| 16 | * used by rstatd/perfmeter | 
|---|
| 17 | */ | 
|---|
| 18 |  | 
|---|
| 19 | enum cpu_usage_stat { | 
|---|
| 20 | CPUTIME_USER, | 
|---|
| 21 | CPUTIME_NICE, | 
|---|
| 22 | CPUTIME_SYSTEM, | 
|---|
| 23 | CPUTIME_SOFTIRQ, | 
|---|
| 24 | CPUTIME_IRQ, | 
|---|
| 25 | CPUTIME_IDLE, | 
|---|
| 26 | CPUTIME_IOWAIT, | 
|---|
| 27 | CPUTIME_STEAL, | 
|---|
| 28 | CPUTIME_GUEST, | 
|---|
| 29 | CPUTIME_GUEST_NICE, | 
|---|
| 30 | #ifdef CONFIG_SCHED_CORE | 
|---|
| 31 | CPUTIME_FORCEIDLE, | 
|---|
| 32 | #endif | 
|---|
| 33 | NR_STATS, | 
|---|
| 34 | }; | 
|---|
| 35 |  | 
|---|
| 36 | struct kernel_cpustat { | 
|---|
| 37 | u64 cpustat[NR_STATS]; | 
|---|
| 38 | }; | 
|---|
| 39 |  | 
|---|
| 40 | struct kernel_stat { | 
|---|
| 41 | unsigned long irqs_sum; | 
|---|
| 42 | unsigned int softirqs[NR_SOFTIRQS]; | 
|---|
| 43 | }; | 
|---|
| 44 |  | 
|---|
| 45 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 
|---|
| 46 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | 
|---|
| 47 |  | 
|---|
| 48 | /* Must have preemption disabled for this to be meaningful. */ | 
|---|
| 49 | #define kstat_this_cpu this_cpu_ptr(&kstat) | 
|---|
| 50 | #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) | 
|---|
| 51 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | 
|---|
| 52 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | 
|---|
| 53 |  | 
|---|
| 54 | extern unsigned long long nr_context_switches_cpu(int cpu); | 
|---|
| 55 | extern unsigned long long nr_context_switches(void); | 
|---|
| 56 |  | 
|---|
| 57 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | 
|---|
| 58 | extern void kstat_incr_irq_this_cpu(unsigned int irq); | 
|---|
| 59 |  | 
|---|
| 60 | static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) | 
|---|
| 61 | { | 
|---|
| 62 | __this_cpu_inc(kstat.softirqs[irq]); | 
|---|
| 63 | } | 
|---|
| 64 |  | 
|---|
| 65 | static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) | 
|---|
| 66 | { | 
|---|
| 67 | return kstat_cpu(cpu).softirqs[irq]; | 
|---|
| 68 | } | 
|---|
| 69 |  | 
|---|
| 70 | static inline unsigned int kstat_cpu_softirqs_sum(int cpu) | 
|---|
| 71 | { | 
|---|
| 72 | int i; | 
|---|
| 73 | unsigned int sum = 0; | 
|---|
| 74 |  | 
|---|
| 75 | for (i = 0; i < NR_SOFTIRQS; i++) | 
|---|
| 76 | sum += kstat_softirqs_cpu(irq: i, cpu); | 
|---|
| 77 |  | 
|---|
| 78 | return sum; | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT | 
|---|
| 82 | extern void kstat_snapshot_irqs(void); | 
|---|
| 83 | extern unsigned int kstat_get_irq_since_snapshot(unsigned int irq); | 
|---|
| 84 | #else | 
|---|
| 85 | static inline void kstat_snapshot_irqs(void) { } | 
|---|
| 86 | static inline unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { return 0; } | 
|---|
| 87 | #endif | 
|---|
| 88 |  | 
|---|
| 89 | /* | 
|---|
| 90 | * Number of interrupts per specific IRQ source, since bootup | 
|---|
| 91 | */ | 
|---|
| 92 | extern unsigned int kstat_irqs_usr(unsigned int irq); | 
|---|
| 93 |  | 
|---|
| 94 | /* | 
|---|
| 95 | * Number of interrupts per cpu, since bootup | 
|---|
| 96 | */ | 
|---|
| 97 | static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu) | 
|---|
| 98 | { | 
|---|
| 99 | return kstat_cpu(cpu).irqs_sum; | 
|---|
| 100 | } | 
|---|
| 101 |  | 
|---|
| 102 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 
|---|
| 103 | extern u64 kcpustat_field(struct kernel_cpustat *kcpustat, | 
|---|
| 104 | enum cpu_usage_stat usage, int cpu); | 
|---|
| 105 | extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu); | 
|---|
| 106 | #else | 
|---|
| 107 | static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat, | 
|---|
| 108 | enum cpu_usage_stat usage, int cpu) | 
|---|
| 109 | { | 
|---|
| 110 | return kcpustat->cpustat[usage]; | 
|---|
| 111 | } | 
|---|
| 112 |  | 
|---|
| 113 | static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu) | 
|---|
| 114 | { | 
|---|
| 115 | *dst = kcpustat_cpu(cpu); | 
|---|
| 116 | } | 
|---|
| 117 |  | 
|---|
| 118 | #endif | 
|---|
| 119 |  | 
|---|
| 120 | extern void account_user_time(struct task_struct *, u64); | 
|---|
| 121 | extern void account_guest_time(struct task_struct *, u64); | 
|---|
| 122 | extern void account_system_time(struct task_struct *, int, u64); | 
|---|
| 123 | extern void account_system_index_time(struct task_struct *, u64, | 
|---|
| 124 | enum cpu_usage_stat); | 
|---|
| 125 | extern void account_steal_time(u64); | 
|---|
| 126 | extern void account_idle_time(u64); | 
|---|
| 127 | extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); | 
|---|
| 128 |  | 
|---|
| 129 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | 
|---|
| 130 | static inline void account_process_tick(struct task_struct *tsk, int user) | 
|---|
| 131 | { | 
|---|
| 132 | vtime_flush(tsk); | 
|---|
| 133 | } | 
|---|
| 134 | #else | 
|---|
| 135 | extern void account_process_tick(struct task_struct *, int user); | 
|---|
| 136 | #endif | 
|---|
| 137 |  | 
|---|
| 138 | extern void account_idle_ticks(unsigned long ticks); | 
|---|
| 139 |  | 
|---|
| 140 | #ifdef CONFIG_SCHED_CORE | 
|---|
| 141 | extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); | 
|---|
| 142 | #endif | 
|---|
| 143 |  | 
|---|
| 144 | #endif /* _LINUX_KERNEL_STAT_H */ | 
|---|
| 145 |  | 
|---|