Line data Source code
1 : /*
2 : * INETPEER - A storage for permanent information about peers
3 : *
4 : * This source is covered by the GNU GPL, the same as all kernel sources.
5 : *
6 : * Authors: Andrey V. Savochkin <saw@msu.ru>
7 : */
8 :
9 : #include <linux/cache.h>
10 : #include <linux/module.h>
11 : #include <linux/types.h>
12 : #include <linux/slab.h>
13 : #include <linux/interrupt.h>
14 : #include <linux/spinlock.h>
15 : #include <linux/random.h>
16 : #include <linux/timer.h>
17 : #include <linux/time.h>
18 : #include <linux/kernel.h>
19 : #include <linux/mm.h>
20 : #include <linux/net.h>
21 : #include <linux/workqueue.h>
22 : #include <net/ip.h>
23 : #include <net/inetpeer.h>
24 : #include <net/secure_seq.h>
25 :
26 : /*
27 : * Theory of operations.
28 : * We keep one entry for each peer IP address. The nodes contains long-living
29 : * information about the peer which doesn't depend on routes.
30 : *
31 : * Nodes are removed only when reference counter goes to 0.
32 : * When it's happened the node may be removed when a sufficient amount of
33 : * time has been passed since its last use. The less-recently-used entry can
34 : * also be removed if the pool is overloaded i.e. if the total amount of
35 : * entries is greater-or-equal than the threshold.
36 : *
37 : * Node pool is organised as an RB tree.
38 : * Such an implementation has been chosen not just for fun. It's a way to
39 : * prevent easy and efficient DoS attacks by creating hash collisions. A huge
40 : * amount of long living nodes in a single hash slot would significantly delay
41 : * lookups performed with disabled BHs.
42 : *
43 : * Serialisation issues.
44 : * 1. Nodes may appear in the tree only with the pool lock held.
45 : * 2. Nodes may disappear from the tree only with the pool lock held
46 : * AND reference count being 0.
47 : * 3. Global variable peer_total is modified under the pool lock.
48 : * 4. struct inet_peer fields modification:
49 : * rb_node: pool lock
50 : * refcnt: atomically against modifications on other CPU;
51 : * usually under some other lock to prevent node disappearing
52 : * daddr: unchangeable
53 : */
54 :
55 : static struct kmem_cache *peer_cachep __ro_after_init;
56 :
57 1 : void inet_peer_base_init(struct inet_peer_base *bp)
58 : {
59 1 : bp->rb_root = RB_ROOT;
60 1 : seqlock_init(&bp->lock);
61 1 : bp->total = 0;
62 1 : }
63 : EXPORT_SYMBOL_GPL(inet_peer_base_init);
64 :
65 : #define PEER_MAX_GC 32
66 :
67 : /* Exported for sysctl_net_ipv4. */
68 : int inet_peer_threshold __read_mostly; /* start to throw entries more
69 : * aggressively at this stage */
70 : int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
71 : int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
72 :
73 : /* Called from ip_output.c:ip_init */
74 1 : void __init inet_initpeers(void)
75 : {
76 1 : u64 nr_entries;
77 :
78 : /* 1% of physical memory */
79 1 : nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
80 : 100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
81 :
82 1 : inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
83 :
84 1 : peer_cachep = kmem_cache_create("inet_peer_cache",
85 : sizeof(struct inet_peer),
86 : 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
87 : NULL);
88 1 : }
89 :
90 : /* Called with rcu_read_lock() or base->lock held */
91 0 : static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
92 : struct inet_peer_base *base,
93 : unsigned int seq,
94 : struct inet_peer *gc_stack[],
95 : unsigned int *gc_cnt,
96 : struct rb_node **parent_p,
97 : struct rb_node ***pp_p)
98 : {
99 0 : struct rb_node **pp, *parent, *next;
100 0 : struct inet_peer *p;
101 :
102 0 : pp = &base->rb_root.rb_node;
103 0 : parent = NULL;
104 0 : while (1) {
105 0 : int cmp;
106 :
107 0 : next = rcu_dereference_raw(*pp);
108 0 : if (!next)
109 : break;
110 0 : parent = next;
111 0 : p = rb_entry(parent, struct inet_peer, rb_node);
112 0 : cmp = inetpeer_addr_cmp(daddr, &p->daddr);
113 0 : if (cmp == 0) {
114 0 : if (!refcount_inc_not_zero(&p->refcnt))
115 : break;
116 : return p;
117 : }
118 0 : if (gc_stack) {
119 0 : if (*gc_cnt < PEER_MAX_GC)
120 0 : gc_stack[(*gc_cnt)++] = p;
121 0 : } else if (unlikely(read_seqretry(&base->lock, seq))) {
122 : break;
123 : }
124 0 : if (cmp == -1)
125 0 : pp = &next->rb_left;
126 : else
127 0 : pp = &next->rb_right;
128 : }
129 0 : *parent_p = parent;
130 0 : *pp_p = pp;
131 0 : return NULL;
132 : }
133 :
134 0 : static void inetpeer_free_rcu(struct rcu_head *head)
135 : {
136 0 : kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
137 0 : }
138 :
139 : /* perform garbage collect on all items stacked during a lookup */
140 0 : static void inet_peer_gc(struct inet_peer_base *base,
141 : struct inet_peer *gc_stack[],
142 : unsigned int gc_cnt)
143 : {
144 0 : struct inet_peer *p;
145 0 : __u32 delta, ttl;
146 0 : int i;
147 :
148 0 : if (base->total >= inet_peer_threshold)
149 : ttl = 0; /* be aggressive */
150 : else
151 0 : ttl = inet_peer_maxttl
152 0 : - (inet_peer_maxttl - inet_peer_minttl) / HZ *
153 0 : base->total / inet_peer_threshold * HZ;
154 0 : for (i = 0; i < gc_cnt; i++) {
155 0 : p = gc_stack[i];
156 :
157 : /* The READ_ONCE() pairs with the WRITE_ONCE()
158 : * in inet_putpeer()
159 : */
160 0 : delta = (__u32)jiffies - READ_ONCE(p->dtime);
161 :
162 0 : if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
163 0 : gc_stack[i] = NULL;
164 : }
165 0 : for (i = 0; i < gc_cnt; i++) {
166 0 : p = gc_stack[i];
167 0 : if (p) {
168 0 : rb_erase(&p->rb_node, &base->rb_root);
169 0 : base->total--;
170 0 : call_rcu(&p->rcu, inetpeer_free_rcu);
171 : }
172 : }
173 0 : }
174 :
175 0 : struct inet_peer *inet_getpeer(struct inet_peer_base *base,
176 : const struct inetpeer_addr *daddr,
177 : int create)
178 : {
179 0 : struct inet_peer *p, *gc_stack[PEER_MAX_GC];
180 0 : struct rb_node **pp, *parent;
181 0 : unsigned int gc_cnt, seq;
182 0 : int invalidated;
183 :
184 : /* Attempt a lockless lookup first.
185 : * Because of a concurrent writer, we might not find an existing entry.
186 : */
187 0 : rcu_read_lock();
188 0 : seq = read_seqbegin(&base->lock);
189 0 : p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
190 0 : invalidated = read_seqretry(&base->lock, seq);
191 0 : rcu_read_unlock();
192 :
193 0 : if (p)
194 : return p;
195 :
196 : /* If no writer did a change during our lookup, we can return early. */
197 0 : if (!create && !invalidated)
198 : return NULL;
199 :
200 : /* retry an exact lookup, taking the lock before.
201 : * At least, nodes should be hot in our cache.
202 : */
203 0 : parent = NULL;
204 0 : write_seqlock_bh(&base->lock);
205 :
206 0 : gc_cnt = 0;
207 0 : p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
208 0 : if (!p && create) {
209 0 : p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
210 0 : if (p) {
211 0 : p->daddr = *daddr;
212 0 : p->dtime = (__u32)jiffies;
213 0 : refcount_set(&p->refcnt, 2);
214 0 : atomic_set(&p->rid, 0);
215 0 : p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
216 0 : p->rate_tokens = 0;
217 0 : p->n_redirects = 0;
218 : /* 60*HZ is arbitrary, but chosen enough high so that the first
219 : * calculation of tokens is at its maximum.
220 : */
221 0 : p->rate_last = jiffies - 60*HZ;
222 :
223 0 : rb_link_node(&p->rb_node, parent, pp);
224 0 : rb_insert_color(&p->rb_node, &base->rb_root);
225 0 : base->total++;
226 : }
227 : }
228 0 : if (gc_cnt)
229 0 : inet_peer_gc(base, gc_stack, gc_cnt);
230 0 : write_sequnlock_bh(&base->lock);
231 :
232 0 : return p;
233 : }
234 : EXPORT_SYMBOL_GPL(inet_getpeer);
235 :
236 0 : void inet_putpeer(struct inet_peer *p)
237 : {
238 : /* The WRITE_ONCE() pairs with itself (we run lockless)
239 : * and the READ_ONCE() in inet_peer_gc()
240 : */
241 0 : WRITE_ONCE(p->dtime, (__u32)jiffies);
242 :
243 0 : if (refcount_dec_and_test(&p->refcnt))
244 0 : call_rcu(&p->rcu, inetpeer_free_rcu);
245 0 : }
246 : EXPORT_SYMBOL_GPL(inet_putpeer);
247 :
248 : /*
249 : * Check transmit rate limitation for given message.
250 : * The rate information is held in the inet_peer entries now.
251 : * This function is generic and could be used for other purposes
252 : * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
253 : *
254 : * Note that the same inet_peer fields are modified by functions in
255 : * route.c too, but these work for packet destinations while xrlim_allow
256 : * works for icmp destinations. This means the rate limiting information
257 : * for one "ip object" is shared - and these ICMPs are twice limited:
258 : * by source and by destination.
259 : *
260 : * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
261 : * SHOULD allow setting of rate limits
262 : *
263 : * Shared between ICMPv4 and ICMPv6.
264 : */
265 : #define XRLIM_BURST_FACTOR 6
266 0 : bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
267 : {
268 0 : unsigned long now, token;
269 0 : bool rc = false;
270 :
271 0 : if (!peer)
272 : return true;
273 :
274 0 : token = peer->rate_tokens;
275 0 : now = jiffies;
276 0 : token += now - peer->rate_last;
277 0 : peer->rate_last = now;
278 0 : if (token > XRLIM_BURST_FACTOR * timeout)
279 : token = XRLIM_BURST_FACTOR * timeout;
280 0 : if (token >= timeout) {
281 0 : token -= timeout;
282 0 : rc = true;
283 : }
284 0 : peer->rate_tokens = token;
285 0 : return rc;
286 : }
287 : EXPORT_SYMBOL(inet_peer_xrlim_allow);
288 :
289 0 : void inetpeer_invalidate_tree(struct inet_peer_base *base)
290 : {
291 0 : struct rb_node *p = rb_first(&base->rb_root);
292 :
293 0 : while (p) {
294 0 : struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
295 :
296 0 : p = rb_next(p);
297 0 : rb_erase(&peer->rb_node, &base->rb_root);
298 0 : inet_putpeer(peer);
299 0 : cond_resched();
300 : }
301 :
302 0 : base->total = 0;
303 0 : }
304 : EXPORT_SYMBOL(inetpeer_invalidate_tree);
|