Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_PERCPU_COUNTER_H
3 : #define _LINUX_PERCPU_COUNTER_H
4 : /*
5 : * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 : *
7 : * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 : */
9 :
10 : #include <linux/spinlock.h>
11 : #include <linux/smp.h>
12 : #include <linux/list.h>
13 : #include <linux/threads.h>
14 : #include <linux/percpu.h>
15 : #include <linux/types.h>
16 : #include <linux/gfp.h>
17 :
18 : #ifdef CONFIG_SMP
19 :
20 : struct percpu_counter {
21 : raw_spinlock_t lock;
22 : s64 count;
23 : #ifdef CONFIG_HOTPLUG_CPU
24 : struct list_head list; /* All percpu_counters are on a list */
25 : #endif
26 : s32 __percpu *counters;
27 : };
28 :
29 : extern int percpu_counter_batch;
30 :
31 : int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
32 : struct lock_class_key *key);
33 :
34 : #define percpu_counter_init(fbc, value, gfp) \
35 : ({ \
36 : static struct lock_class_key __key; \
37 : \
38 : __percpu_counter_init(fbc, value, gfp, &__key); \
39 : })
40 :
41 : void percpu_counter_destroy(struct percpu_counter *fbc);
42 : void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
43 : void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
44 : s32 batch);
45 : s64 __percpu_counter_sum(struct percpu_counter *fbc);
46 : int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
47 : void percpu_counter_sync(struct percpu_counter *fbc);
48 :
49 1982 : static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
50 : {
51 1982 : return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
52 : }
53 :
54 158310 : static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
55 : {
56 157800 : percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
57 2188 : }
58 :
59 216 : static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
60 : {
61 433 : s64 ret = __percpu_counter_sum(fbc);
62 218 : return ret < 0 ? 0 : ret;
63 : }
64 :
65 166 : static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
66 : {
67 166 : return __percpu_counter_sum(fbc);
68 : }
69 :
70 2005 : static inline s64 percpu_counter_read(struct percpu_counter *fbc)
71 : {
72 2005 : return fbc->count;
73 : }
74 :
75 : /*
76 : * It is possible for the percpu_counter_read() to return a small negative
77 : * number for some counter which should never be negative.
78 : *
79 : */
80 66205 : static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
81 : {
82 : /* Prevent reloads of fbc->count */
83 66205 : s64 ret = READ_ONCE(fbc->count);
84 :
85 66205 : if (ret >= 0)
86 : return ret;
87 : return 0;
88 : }
89 :
90 2 : static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
91 : {
92 2 : return (fbc->counters != NULL);
93 : }
94 :
95 : #else /* !CONFIG_SMP */
96 :
97 : struct percpu_counter {
98 : s64 count;
99 : };
100 :
101 : static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
102 : gfp_t gfp)
103 : {
104 : fbc->count = amount;
105 : return 0;
106 : }
107 :
108 : static inline void percpu_counter_destroy(struct percpu_counter *fbc)
109 : {
110 : }
111 :
112 : static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
113 : {
114 : fbc->count = amount;
115 : }
116 :
117 : static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
118 : {
119 : if (fbc->count > rhs)
120 : return 1;
121 : else if (fbc->count < rhs)
122 : return -1;
123 : else
124 : return 0;
125 : }
126 :
127 : static inline int
128 : __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
129 : {
130 : return percpu_counter_compare(fbc, rhs);
131 : }
132 :
133 : static inline void
134 : percpu_counter_add(struct percpu_counter *fbc, s64 amount)
135 : {
136 : preempt_disable();
137 : fbc->count += amount;
138 : preempt_enable();
139 : }
140 :
141 : static inline void
142 : percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
143 : {
144 : percpu_counter_add(fbc, amount);
145 : }
146 :
147 : static inline s64 percpu_counter_read(struct percpu_counter *fbc)
148 : {
149 : return fbc->count;
150 : }
151 :
152 : /*
153 : * percpu_counter is intended to track positive numbers. In the UP case the
154 : * number should never be negative.
155 : */
156 : static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
157 : {
158 : return fbc->count;
159 : }
160 :
161 : static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
162 : {
163 : return percpu_counter_read_positive(fbc);
164 : }
165 :
166 : static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
167 : {
168 : return percpu_counter_read(fbc);
169 : }
170 :
171 : static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
172 : {
173 : return true;
174 : }
175 :
176 : static inline void percpu_counter_sync(struct percpu_counter *fbc)
177 : {
178 : }
179 : #endif /* CONFIG_SMP */
180 :
181 87016 : static inline void percpu_counter_inc(struct percpu_counter *fbc)
182 : {
183 87016 : percpu_counter_add(fbc, 1);
184 66765 : }
185 :
186 64204 : static inline void percpu_counter_dec(struct percpu_counter *fbc)
187 : {
188 64204 : percpu_counter_add(fbc, -1);
189 62292 : }
190 :
191 619 : static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
192 : {
193 446 : percpu_counter_add(fbc, -amount);
194 265 : }
195 :
196 : #endif /* _LINUX_PERCPU_COUNTER_H */
|