Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[cascardo/linux.git] / arch / s390 / net / bpf_jit_comp.c
index 82f165f..d5f10a4 100644 (file)
@@ -9,6 +9,8 @@
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/filter.h>
+#include <linux/random.h>
+#include <linux/init.h>
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
 #include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
        EMIT2(0x07fe);
 }
 
+/* Helper to find the offset of pkt_type in sk_buff
+ * Make sure its still a 3bit field starting at the MSBs within a byte.
+ */
+#define PKT_TYPE_MAX 0xe0
+static int pkt_type_offset;
+
+static int __init bpf_pkt_type_offset_init(void)
+{
+       struct sk_buff skb_probe = {
+               .pkt_type = ~0,
+       };
+       char *ct = (char *)&skb_probe;
+       int off;
+
+       pkt_type_offset = -1;
+       for (off = 0; off < sizeof(struct sk_buff); off++) {
+               if (!ct[off])
+                       continue;
+               if (ct[off] == PKT_TYPE_MAX)
+                       pkt_type_offset = off;
+               else {
+                       /* Found non matching bit pattern, fix needed. */
+                       WARN_ON_ONCE(1);
+                       pkt_type_offset = -1;
+                       return -1;
+               }
+       }
+       return 0;
+}
+device_initcall(bpf_pkt_type_offset_init);
+
 /*
  * make sure we dont leak kernel information to user
  */
@@ -720,6 +753,16 @@ call_fn:   /* lg %r1,<d(function)>(%r13) */
                        EMIT4_DISP(0x88500000, 12);
                }
                break;
+       case BPF_S_ANC_PKTTYPE:
+               if (pkt_type_offset < 0)
+                       goto out;
+               /* lhi %r5,0 */
+               EMIT4(0xa7580000);
+               /* ic %r5,<d(pkt_type_offset)>(%r2) */
+               EMIT4_DISP(0x43502000, pkt_type_offset);
+               /* srl %r5,5 */
+               EMIT4_DISP(0x88500000, 5);
+               break;
        case BPF_S_ANC_CPU: /* A = smp_processor_id() */
 #ifdef CONFIG_SMP
                /* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
        return -1;
 }
 
+/*
+ * Note: for security reasons, bpf code will follow a randomly
+ *      sized amount of illegal instructions.
+ */
+struct bpf_binary_header {
+       unsigned int pages;
+       u8 image[];
+};
+
+static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
+                                                 u8 **image_ptr)
+{
+       struct bpf_binary_header *header;
+       unsigned int sz, hole;
+
+       /* Most BPF filters are really small, but if some of them fill a page,
+        * allow at least 128 extra bytes for illegal instructions.
+        */
+       sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
+       header = module_alloc(sz);
+       if (!header)
+               return NULL;
+       memset(header, 0, sz);
+       header->pages = sz / PAGE_SIZE;
+       hole = sz - bpfsize + sizeof(*header);
+       /* Insert random number of illegal instructions before BPF code
+        * and make sure the first instruction starts at an even address.
+        */
+       *image_ptr = &header->image[(prandom_u32() % hole) & -2];
+       return header;
+}
+
 void bpf_jit_compile(struct sk_filter *fp)
 {
+       struct bpf_binary_header *header = NULL;
        unsigned long size, prg_len, lit_len;
        struct bpf_jit jit, cjit;
        unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
                } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
                        prg_len = jit.prg - jit.start;
                        lit_len = jit.lit - jit.mid;
-                       size = max_t(unsigned long, prg_len + lit_len,
-                                    sizeof(struct work_struct));
+                       size = prg_len + lit_len;
                        if (size >= BPF_SIZE_MAX)
                                goto out;
-                       jit.start = module_alloc(size);
-                       if (!jit.start)
+                       header = bpf_alloc_binary(size, &jit.start);
+                       if (!header)
                                goto out;
                        jit.prg = jit.mid = jit.start + prg_len;
                        jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
                cjit = jit;
        }
        if (bpf_jit_enable > 1) {
-               pr_err("flen=%d proglen=%lu pass=%d image=%p\n",
-                      fp->len, jit.end - jit.start, pass, jit.start);
-               if (jit.start) {
-                       printk(KERN_ERR "JIT code:\n");
+               bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
+               if (jit.start)
                        print_fn_code(jit.start, jit.mid - jit.start);
-                       print_hex_dump(KERN_ERR, "JIT literals:\n",
-                                      DUMP_PREFIX_ADDRESS, 16, 1,
-                                      jit.mid, jit.end - jit.mid, false);
-               }
        }
-       if (jit.start)
+       if (jit.start) {
+               set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *) jit.start;
+       }
 out:
        kfree(addrs);
 }
 
-static void jit_free_defer(struct work_struct *arg)
-{
-       module_free(NULL, arg);
-}
-
-/* run from softirq, we must use a work_struct to call
- * module_free() from process context
- */
 void bpf_jit_free(struct sk_filter *fp)
 {
-       struct work_struct *work;
+       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+       struct bpf_binary_header *header = (void *)addr;
 
        if (fp->bpf_func == sk_run_filter)
                return;
-       work = (struct work_struct *)fp->bpf_func;
-       INIT_WORK(work, jit_free_defer);
-       schedule_work(work);
+       set_memory_rw(addr, header->pages);
+       module_free(NULL, header);
 }