cfg80211: wext: really don't store non-WEP keys
[cascardo/linux.git] / net / sched / cls_bpf.c
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28
29 #define CLS_BPF_NAME_LEN        256
30
31 struct cls_bpf_head {
32         struct list_head plist;
33         u32 hgen;
34         struct rcu_head rcu;
35 };
36
37 struct cls_bpf_prog {
38         struct bpf_prog *filter;
39         struct list_head link;
40         struct tcf_result res;
41         bool exts_integrated;
42         struct tcf_exts exts;
43         u32 handle;
44         union {
45                 u32 bpf_fd;
46                 u16 bpf_num_ops;
47         };
48         struct sock_filter *bpf_ops;
49         const char *bpf_name;
50         struct tcf_proto *tp;
51         struct rcu_head rcu;
52 };
53
54 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55         [TCA_BPF_CLASSID]       = { .type = NLA_U32 },
56         [TCA_BPF_FLAGS]         = { .type = NLA_U32 },
57         [TCA_BPF_FD]            = { .type = NLA_U32 },
58         [TCA_BPF_NAME]          = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
59         [TCA_BPF_OPS_LEN]       = { .type = NLA_U16 },
60         [TCA_BPF_OPS]           = { .type = NLA_BINARY,
61                                     .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
62 };
63
64 static int cls_bpf_exec_opcode(int code)
65 {
66         switch (code) {
67         case TC_ACT_OK:
68         case TC_ACT_SHOT:
69         case TC_ACT_STOLEN:
70         case TC_ACT_REDIRECT:
71         case TC_ACT_UNSPEC:
72                 return code;
73         default:
74                 return TC_ACT_UNSPEC;
75         }
76 }
77
78 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
79                             struct tcf_result *res)
80 {
81         struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
82         bool at_ingress = skb_at_tc_ingress(skb);
83         struct cls_bpf_prog *prog;
84         int ret = -1;
85
86         /* Needed here for accessing maps. */
87         rcu_read_lock();
88         list_for_each_entry_rcu(prog, &head->plist, link) {
89                 int filter_res;
90
91                 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
92
93                 if (at_ingress) {
94                         /* It is safe to push/pull even if skb_shared() */
95                         __skb_push(skb, skb->mac_len);
96                         bpf_compute_data_end(skb);
97                         filter_res = BPF_PROG_RUN(prog->filter, skb);
98                         __skb_pull(skb, skb->mac_len);
99                 } else {
100                         bpf_compute_data_end(skb);
101                         filter_res = BPF_PROG_RUN(prog->filter, skb);
102                 }
103
104                 if (prog->exts_integrated) {
105                         res->class   = 0;
106                         res->classid = TC_H_MAJ(prog->res.classid) |
107                                        qdisc_skb_cb(skb)->tc_classid;
108
109                         ret = cls_bpf_exec_opcode(filter_res);
110                         if (ret == TC_ACT_UNSPEC)
111                                 continue;
112                         break;
113                 }
114
115                 if (filter_res == 0)
116                         continue;
117                 if (filter_res != -1) {
118                         res->class   = 0;
119                         res->classid = filter_res;
120                 } else {
121                         *res = prog->res;
122                 }
123
124                 ret = tcf_exts_exec(skb, &prog->exts, res);
125                 if (ret < 0)
126                         continue;
127
128                 break;
129         }
130         rcu_read_unlock();
131
132         return ret;
133 }
134
135 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
136 {
137         return !prog->bpf_ops;
138 }
139
140 static int cls_bpf_init(struct tcf_proto *tp)
141 {
142         struct cls_bpf_head *head;
143
144         head = kzalloc(sizeof(*head), GFP_KERNEL);
145         if (head == NULL)
146                 return -ENOBUFS;
147
148         INIT_LIST_HEAD_RCU(&head->plist);
149         rcu_assign_pointer(tp->root, head);
150
151         return 0;
152 }
153
154 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
155 {
156         tcf_exts_destroy(&prog->exts);
157
158         if (cls_bpf_is_ebpf(prog))
159                 bpf_prog_put(prog->filter);
160         else
161                 bpf_prog_destroy(prog->filter);
162
163         kfree(prog->bpf_name);
164         kfree(prog->bpf_ops);
165         kfree(prog);
166 }
167
168 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
169 {
170         struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
171
172         cls_bpf_delete_prog(prog->tp, prog);
173 }
174
175 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
176 {
177         struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
178
179         list_del_rcu(&prog->link);
180         tcf_unbind_filter(tp, &prog->res);
181         call_rcu(&prog->rcu, __cls_bpf_delete_prog);
182
183         return 0;
184 }
185
186 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
187 {
188         struct cls_bpf_head *head = rtnl_dereference(tp->root);
189         struct cls_bpf_prog *prog, *tmp;
190
191         if (!force && !list_empty(&head->plist))
192                 return false;
193
194         list_for_each_entry_safe(prog, tmp, &head->plist, link) {
195                 list_del_rcu(&prog->link);
196                 tcf_unbind_filter(tp, &prog->res);
197                 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
198         }
199
200         RCU_INIT_POINTER(tp->root, NULL);
201         kfree_rcu(head, rcu);
202         return true;
203 }
204
205 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
206 {
207         struct cls_bpf_head *head = rtnl_dereference(tp->root);
208         struct cls_bpf_prog *prog;
209         unsigned long ret = 0UL;
210
211         if (head == NULL)
212                 return 0UL;
213
214         list_for_each_entry(prog, &head->plist, link) {
215                 if (prog->handle == handle) {
216                         ret = (unsigned long) prog;
217                         break;
218                 }
219         }
220
221         return ret;
222 }
223
224 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
225 {
226         struct sock_filter *bpf_ops;
227         struct sock_fprog_kern fprog_tmp;
228         struct bpf_prog *fp;
229         u16 bpf_size, bpf_num_ops;
230         int ret;
231
232         bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
233         if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
234                 return -EINVAL;
235
236         bpf_size = bpf_num_ops * sizeof(*bpf_ops);
237         if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
238                 return -EINVAL;
239
240         bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
241         if (bpf_ops == NULL)
242                 return -ENOMEM;
243
244         memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
245
246         fprog_tmp.len = bpf_num_ops;
247         fprog_tmp.filter = bpf_ops;
248
249         ret = bpf_prog_create(&fp, &fprog_tmp);
250         if (ret < 0) {
251                 kfree(bpf_ops);
252                 return ret;
253         }
254
255         prog->bpf_ops = bpf_ops;
256         prog->bpf_num_ops = bpf_num_ops;
257         prog->bpf_name = NULL;
258         prog->filter = fp;
259
260         return 0;
261 }
262
263 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
264                                  const struct tcf_proto *tp)
265 {
266         struct bpf_prog *fp;
267         char *name = NULL;
268         u32 bpf_fd;
269
270         bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
271
272         fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
273         if (IS_ERR(fp))
274                 return PTR_ERR(fp);
275
276         if (tb[TCA_BPF_NAME]) {
277                 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
278                                nla_len(tb[TCA_BPF_NAME]),
279                                GFP_KERNEL);
280                 if (!name) {
281                         bpf_prog_put(fp);
282                         return -ENOMEM;
283                 }
284         }
285
286         prog->bpf_ops = NULL;
287         prog->bpf_fd = bpf_fd;
288         prog->bpf_name = name;
289         prog->filter = fp;
290
291         if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
292                 netif_keep_dst(qdisc_dev(tp->q));
293
294         return 0;
295 }
296
297 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
298                                    struct cls_bpf_prog *prog,
299                                    unsigned long base, struct nlattr **tb,
300                                    struct nlattr *est, bool ovr)
301 {
302         bool is_bpf, is_ebpf, have_exts = false;
303         struct tcf_exts exts;
304         int ret;
305
306         is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
307         is_ebpf = tb[TCA_BPF_FD];
308         if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
309                 return -EINVAL;
310
311         ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
312         if (ret < 0)
313                 return ret;
314         ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
315         if (ret < 0)
316                 goto errout;
317
318         if (tb[TCA_BPF_FLAGS]) {
319                 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
320
321                 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
322                         ret = -EINVAL;
323                         goto errout;
324                 }
325
326                 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
327         }
328
329         prog->exts_integrated = have_exts;
330
331         ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
332                        cls_bpf_prog_from_efd(tb, prog, tp);
333         if (ret < 0)
334                 goto errout;
335
336         if (tb[TCA_BPF_CLASSID]) {
337                 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
338                 tcf_bind_filter(tp, &prog->res, base);
339         }
340
341         tcf_exts_change(tp, &prog->exts, &exts);
342         return 0;
343
344 errout:
345         tcf_exts_destroy(&exts);
346         return ret;
347 }
348
349 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
350                                    struct cls_bpf_head *head)
351 {
352         unsigned int i = 0x80000000;
353         u32 handle;
354
355         do {
356                 if (++head->hgen == 0x7FFFFFFF)
357                         head->hgen = 1;
358         } while (--i > 0 && cls_bpf_get(tp, head->hgen));
359
360         if (unlikely(i == 0)) {
361                 pr_err("Insufficient number of handles\n");
362                 handle = 0;
363         } else {
364                 handle = head->hgen;
365         }
366
367         return handle;
368 }
369
370 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
371                           struct tcf_proto *tp, unsigned long base,
372                           u32 handle, struct nlattr **tca,
373                           unsigned long *arg, bool ovr)
374 {
375         struct cls_bpf_head *head = rtnl_dereference(tp->root);
376         struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
377         struct nlattr *tb[TCA_BPF_MAX + 1];
378         struct cls_bpf_prog *prog;
379         int ret;
380
381         if (tca[TCA_OPTIONS] == NULL)
382                 return -EINVAL;
383
384         ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
385         if (ret < 0)
386                 return ret;
387
388         prog = kzalloc(sizeof(*prog), GFP_KERNEL);
389         if (!prog)
390                 return -ENOBUFS;
391
392         ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
393         if (ret < 0)
394                 goto errout;
395
396         if (oldprog) {
397                 if (handle && oldprog->handle != handle) {
398                         ret = -EINVAL;
399                         goto errout;
400                 }
401         }
402
403         if (handle == 0)
404                 prog->handle = cls_bpf_grab_new_handle(tp, head);
405         else
406                 prog->handle = handle;
407         if (prog->handle == 0) {
408                 ret = -EINVAL;
409                 goto errout;
410         }
411
412         ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
413         if (ret < 0)
414                 goto errout;
415
416         if (oldprog) {
417                 list_replace_rcu(&oldprog->link, &prog->link);
418                 tcf_unbind_filter(tp, &oldprog->res);
419                 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
420         } else {
421                 list_add_rcu(&prog->link, &head->plist);
422         }
423
424         *arg = (unsigned long) prog;
425         return 0;
426
427 errout:
428         tcf_exts_destroy(&prog->exts);
429         kfree(prog);
430         return ret;
431 }
432
433 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
434                                  struct sk_buff *skb)
435 {
436         struct nlattr *nla;
437
438         if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
439                 return -EMSGSIZE;
440
441         nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
442                           sizeof(struct sock_filter));
443         if (nla == NULL)
444                 return -EMSGSIZE;
445
446         memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
447
448         return 0;
449 }
450
451 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
452                                   struct sk_buff *skb)
453 {
454         if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
455                 return -EMSGSIZE;
456
457         if (prog->bpf_name &&
458             nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
459                 return -EMSGSIZE;
460
461         return 0;
462 }
463
464 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
465                         struct sk_buff *skb, struct tcmsg *tm)
466 {
467         struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
468         struct nlattr *nest;
469         u32 bpf_flags = 0;
470         int ret;
471
472         if (prog == NULL)
473                 return skb->len;
474
475         tm->tcm_handle = prog->handle;
476
477         nest = nla_nest_start(skb, TCA_OPTIONS);
478         if (nest == NULL)
479                 goto nla_put_failure;
480
481         if (prog->res.classid &&
482             nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
483                 goto nla_put_failure;
484
485         if (cls_bpf_is_ebpf(prog))
486                 ret = cls_bpf_dump_ebpf_info(prog, skb);
487         else
488                 ret = cls_bpf_dump_bpf_info(prog, skb);
489         if (ret)
490                 goto nla_put_failure;
491
492         if (tcf_exts_dump(skb, &prog->exts) < 0)
493                 goto nla_put_failure;
494
495         if (prog->exts_integrated)
496                 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
497         if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
498                 goto nla_put_failure;
499
500         nla_nest_end(skb, nest);
501
502         if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
503                 goto nla_put_failure;
504
505         return skb->len;
506
507 nla_put_failure:
508         nla_nest_cancel(skb, nest);
509         return -1;
510 }
511
512 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
513 {
514         struct cls_bpf_head *head = rtnl_dereference(tp->root);
515         struct cls_bpf_prog *prog;
516
517         list_for_each_entry(prog, &head->plist, link) {
518                 if (arg->count < arg->skip)
519                         goto skip;
520                 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
521                         arg->stop = 1;
522                         break;
523                 }
524 skip:
525                 arg->count++;
526         }
527 }
528
529 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
530         .kind           =       "bpf",
531         .owner          =       THIS_MODULE,
532         .classify       =       cls_bpf_classify,
533         .init           =       cls_bpf_init,
534         .destroy        =       cls_bpf_destroy,
535         .get            =       cls_bpf_get,
536         .change         =       cls_bpf_change,
537         .delete         =       cls_bpf_delete,
538         .walk           =       cls_bpf_walk,
539         .dump           =       cls_bpf_dump,
540 };
541
542 static int __init cls_bpf_init_mod(void)
543 {
544         return register_tcf_proto_ops(&cls_bpf_ops);
545 }
546
547 static void __exit cls_bpf_exit_mod(void)
548 {
549         unregister_tcf_proto_ops(&cls_bpf_ops);
550 }
551
552 module_init(cls_bpf_init_mod);
553 module_exit(cls_bpf_exit_mod);