Line data Source code
1 : /*
2 : * Rusty Russell (C)2000 -- This code is GPL.
3 : * Patrick McHardy (c) 2006-2012
4 : */
5 :
6 : #include <linux/kernel.h>
7 : #include <linux/slab.h>
8 : #include <linux/init.h>
9 : #include <linux/module.h>
10 : #include <linux/proc_fs.h>
11 : #include <linux/skbuff.h>
12 : #include <linux/netfilter.h>
13 : #include <linux/netfilter_ipv4.h>
14 : #include <linux/netfilter_ipv6.h>
15 : #include <linux/netfilter_bridge.h>
16 : #include <linux/seq_file.h>
17 : #include <linux/rcupdate.h>
18 : #include <net/protocol.h>
19 : #include <net/netfilter/nf_queue.h>
20 : #include <net/dst.h>
21 :
22 : #include "nf_internals.h"
23 :
24 : /*
25 : * Hook for nfnetlink_queue to register its queue handler.
26 : * We do this so that most of the NFQUEUE code can be modular.
27 : *
28 : * Once the queue is registered it must reinject all packets it
29 : * receives, no matter what.
30 : */
31 :
32 : /* return EBUSY when somebody else is registered, return EEXIST if the
33 : * same handler is registered, return 0 in case of success. */
34 0 : void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
35 : {
36 : /* should never happen, we only have one queueing backend in kernel */
37 0 : WARN_ON(rcu_access_pointer(net->nf.queue_handler));
38 0 : rcu_assign_pointer(net->nf.queue_handler, qh);
39 0 : }
40 : EXPORT_SYMBOL(nf_register_queue_handler);
41 :
42 : /* The caller must flush their queue before this */
43 0 : void nf_unregister_queue_handler(struct net *net)
44 : {
45 0 : RCU_INIT_POINTER(net->nf.queue_handler, NULL);
46 0 : }
47 : EXPORT_SYMBOL(nf_unregister_queue_handler);
48 :
49 0 : static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
50 : {
51 0 : struct nf_hook_state *state = &entry->state;
52 :
53 : /* Release those devices we held, or Alexey will kill me. */
54 0 : if (state->in)
55 0 : dev_put(state->in);
56 0 : if (state->out)
57 0 : dev_put(state->out);
58 0 : if (state->sk)
59 0 : sock_put(state->sk);
60 :
61 : #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
62 : if (entry->physin)
63 : dev_put(entry->physin);
64 : if (entry->physout)
65 : dev_put(entry->physout);
66 : #endif
67 0 : }
68 :
69 0 : void nf_queue_entry_free(struct nf_queue_entry *entry)
70 : {
71 0 : nf_queue_entry_release_refs(entry);
72 0 : kfree(entry);
73 0 : }
74 : EXPORT_SYMBOL_GPL(nf_queue_entry_free);
75 :
76 0 : static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
77 : {
78 : #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
79 : const struct sk_buff *skb = entry->skb;
80 : struct nf_bridge_info *nf_bridge;
81 :
82 : nf_bridge = nf_bridge_info_get(skb);
83 : if (nf_bridge) {
84 : entry->physin = nf_bridge_get_physindev(skb);
85 : entry->physout = nf_bridge_get_physoutdev(skb);
86 : } else {
87 : entry->physin = NULL;
88 : entry->physout = NULL;
89 : }
90 : #endif
91 0 : }
92 :
93 : /* Bump dev refs so they don't vanish while packet is out */
94 0 : void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
95 : {
96 0 : struct nf_hook_state *state = &entry->state;
97 :
98 0 : if (state->in)
99 0 : dev_hold(state->in);
100 0 : if (state->out)
101 0 : dev_hold(state->out);
102 0 : if (state->sk)
103 0 : sock_hold(state->sk);
104 :
105 : #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
106 : if (entry->physin)
107 : dev_hold(entry->physin);
108 : if (entry->physout)
109 : dev_hold(entry->physout);
110 : #endif
111 0 : }
112 : EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
113 :
114 0 : void nf_queue_nf_hook_drop(struct net *net)
115 : {
116 0 : const struct nf_queue_handler *qh;
117 :
118 0 : rcu_read_lock();
119 0 : qh = rcu_dereference(net->nf.queue_handler);
120 0 : if (qh)
121 0 : qh->nf_hook_drop(net);
122 0 : rcu_read_unlock();
123 0 : }
124 : EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
125 :
126 0 : static void nf_ip_saveroute(const struct sk_buff *skb,
127 : struct nf_queue_entry *entry)
128 : {
129 0 : struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
130 :
131 0 : if (entry->state.hook == NF_INET_LOCAL_OUT) {
132 0 : const struct iphdr *iph = ip_hdr(skb);
133 :
134 0 : rt_info->tos = iph->tos;
135 0 : rt_info->daddr = iph->daddr;
136 0 : rt_info->saddr = iph->saddr;
137 0 : rt_info->mark = skb->mark;
138 : }
139 0 : }
140 :
141 0 : static void nf_ip6_saveroute(const struct sk_buff *skb,
142 : struct nf_queue_entry *entry)
143 : {
144 0 : struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
145 :
146 0 : if (entry->state.hook == NF_INET_LOCAL_OUT) {
147 0 : const struct ipv6hdr *iph = ipv6_hdr(skb);
148 :
149 0 : rt_info->daddr = iph->daddr;
150 0 : rt_info->saddr = iph->saddr;
151 0 : rt_info->mark = skb->mark;
152 : }
153 0 : }
154 :
155 0 : static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
156 : unsigned int index, unsigned int queuenum)
157 : {
158 0 : struct nf_queue_entry *entry = NULL;
159 0 : const struct nf_queue_handler *qh;
160 0 : struct net *net = state->net;
161 0 : unsigned int route_key_size;
162 0 : int status;
163 :
164 : /* QUEUE == DROP if no one is waiting, to be safe. */
165 0 : qh = rcu_dereference(net->nf.queue_handler);
166 0 : if (!qh)
167 : return -ESRCH;
168 :
169 0 : switch (state->pf) {
170 : case AF_INET:
171 : route_key_size = sizeof(struct ip_rt_info);
172 : break;
173 0 : case AF_INET6:
174 0 : route_key_size = sizeof(struct ip6_rt_info);
175 0 : break;
176 0 : default:
177 0 : route_key_size = 0;
178 0 : break;
179 : }
180 :
181 0 : entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
182 0 : if (!entry)
183 : return -ENOMEM;
184 :
185 0 : if (skb_dst(skb) && !skb_dst_force(skb)) {
186 0 : kfree(entry);
187 0 : return -ENETDOWN;
188 : }
189 :
190 0 : *entry = (struct nf_queue_entry) {
191 : .skb = skb,
192 0 : .state = *state,
193 : .hook_index = index,
194 0 : .size = sizeof(*entry) + route_key_size,
195 : };
196 :
197 0 : __nf_queue_entry_init_physdevs(entry);
198 :
199 0 : nf_queue_entry_get_refs(entry);
200 :
201 0 : switch (entry->state.pf) {
202 0 : case AF_INET:
203 0 : nf_ip_saveroute(skb, entry);
204 0 : break;
205 0 : case AF_INET6:
206 0 : nf_ip6_saveroute(skb, entry);
207 0 : break;
208 : }
209 :
210 0 : status = qh->outfn(entry, queuenum);
211 0 : if (status < 0) {
212 0 : nf_queue_entry_free(entry);
213 0 : return status;
214 : }
215 :
216 : return 0;
217 : }
218 :
219 : /* Packets leaving via this function must come back through nf_reinject(). */
220 0 : int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
221 : unsigned int index, unsigned int verdict)
222 : {
223 0 : int ret;
224 :
225 0 : ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
226 0 : if (ret < 0) {
227 0 : if (ret == -ESRCH &&
228 0 : (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
229 : return 1;
230 0 : kfree_skb(skb);
231 : }
232 :
233 : return 0;
234 : }
235 : EXPORT_SYMBOL_GPL(nf_queue);
236 :
237 0 : static unsigned int nf_iterate(struct sk_buff *skb,
238 : struct nf_hook_state *state,
239 : const struct nf_hook_entries *hooks,
240 : unsigned int *index)
241 : {
242 0 : const struct nf_hook_entry *hook;
243 0 : unsigned int verdict, i = *index;
244 :
245 0 : while (i < hooks->num_hook_entries) {
246 0 : hook = &hooks->hooks[i];
247 0 : repeat:
248 0 : verdict = nf_hook_entry_hookfn(hook, skb, state);
249 0 : if (verdict != NF_ACCEPT) {
250 0 : *index = i;
251 0 : if (verdict != NF_REPEAT)
252 0 : return verdict;
253 0 : goto repeat;
254 : }
255 0 : i++;
256 : }
257 :
258 0 : *index = i;
259 0 : return NF_ACCEPT;
260 : }
261 :
262 0 : static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
263 : {
264 0 : switch (pf) {
265 : #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
266 : case NFPROTO_BRIDGE:
267 : return rcu_dereference(net->nf.hooks_bridge[hooknum]);
268 : #endif
269 : case NFPROTO_IPV4:
270 0 : return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
271 : case NFPROTO_IPV6:
272 0 : return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
273 : default:
274 0 : WARN_ON_ONCE(1);
275 0 : return NULL;
276 : }
277 :
278 : return NULL;
279 : }
280 :
281 : /* Caller must hold rcu read-side lock */
282 0 : void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
283 : {
284 0 : const struct nf_hook_entry *hook_entry;
285 0 : const struct nf_hook_entries *hooks;
286 0 : struct sk_buff *skb = entry->skb;
287 0 : const struct net *net;
288 0 : unsigned int i;
289 0 : int err;
290 0 : u8 pf;
291 :
292 0 : net = entry->state.net;
293 0 : pf = entry->state.pf;
294 :
295 0 : hooks = nf_hook_entries_head(net, pf, entry->state.hook);
296 :
297 0 : i = entry->hook_index;
298 0 : if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
299 0 : kfree_skb(skb);
300 0 : nf_queue_entry_free(entry);
301 0 : return;
302 : }
303 :
304 0 : hook_entry = &hooks->hooks[i];
305 :
306 : /* Continue traversal iff userspace said ok... */
307 0 : if (verdict == NF_REPEAT)
308 0 : verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
309 :
310 0 : if (verdict == NF_ACCEPT) {
311 0 : if (nf_reroute(skb, entry) < 0)
312 : verdict = NF_DROP;
313 : }
314 :
315 0 : if (verdict == NF_ACCEPT) {
316 0 : next_hook:
317 0 : ++i;
318 0 : verdict = nf_iterate(skb, &entry->state, hooks, &i);
319 : }
320 :
321 0 : switch (verdict & NF_VERDICT_MASK) {
322 : case NF_ACCEPT:
323 : case NF_STOP:
324 0 : local_bh_disable();
325 0 : entry->state.okfn(entry->state.net, entry->state.sk, skb);
326 0 : local_bh_enable();
327 : break;
328 0 : case NF_QUEUE:
329 0 : err = nf_queue(skb, &entry->state, i, verdict);
330 0 : if (err == 1)
331 0 : goto next_hook;
332 : break;
333 : case NF_STOLEN:
334 : break;
335 0 : default:
336 0 : kfree_skb(skb);
337 : }
338 :
339 0 : nf_queue_entry_free(entry);
340 : }
341 : EXPORT_SYMBOL(nf_reinject);
|