dmaengine: add driver for Samsung s3c24xx SoCs
[cascardo/linux.git] / mm / vmalloc.c
index 99d045a..1074543 100644 (file)
@@ -388,12 +388,12 @@ nocache:
                addr = ALIGN(first->va_end, align);
                if (addr < vstart)
                        goto nocache;
-               if (addr + size - 1 < addr)
+               if (addr + size < addr)
                        goto overflow;
 
        } else {
                addr = ALIGN(vstart, align);
-               if (addr + size - 1 < addr)
+               if (addr + size < addr)
                        goto overflow;
 
                n = vmap_area_root.rb_node;
@@ -420,7 +420,7 @@ nocache:
                if (addr + cached_hole_size < first->va_start)
                        cached_hole_size = first->va_start - addr;
                addr = ALIGN(first->va_end, align);
-               if (addr + size - 1 < addr)
+               if (addr + size < addr)
                        goto overflow;
 
                if (list_is_last(&first->list, &vmap_area_list))
@@ -752,9 +752,7 @@ struct vmap_block_queue {
 struct vmap_block {
        spinlock_t lock;
        struct vmap_area *va;
-       struct vmap_block_queue *vbq;
        unsigned long free, dirty;
-       DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
        DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
        struct list_head free_list;
        struct rcu_head rcu_head;
@@ -820,7 +818,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
        vb->va = va;
        vb->free = VMAP_BBMAP_BITS;
        vb->dirty = 0;
-       bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
        bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
        INIT_LIST_HEAD(&vb->free_list);
 
@@ -832,7 +829,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
        radix_tree_preload_end();
 
        vbq = &get_cpu_var(vmap_block_queue);
-       vb->vbq = vbq;
        spin_lock(&vbq->lock);
        list_add_rcu(&vb->free_list, &vbq->free);
        spin_unlock(&vbq->lock);
@@ -873,7 +869,6 @@ static void purge_fragmented_blocks(int cpu)
                if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
                        vb->free = 0; /* prevent further allocs after releasing lock */
                        vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
-                       bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
                        bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
                        spin_lock(&vbq->lock);
                        list_del_rcu(&vb->free_list);
@@ -1021,15 +1016,16 @@ void vm_unmap_aliases(void)
 
                rcu_read_lock();
                list_for_each_entry_rcu(vb, &vbq->free, free_list) {
-                       int i;
+                       int i, j;
 
                        spin_lock(&vb->lock);
                        i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
-                       while (i < VMAP_BBMAP_BITS) {
+                       if (i < VMAP_BBMAP_BITS) {
                                unsigned long s, e;
-                               int j;
-                               j = find_next_zero_bit(vb->dirty_map,
-                                       VMAP_BBMAP_BITS, i);
+
+                               j = find_last_bit(vb->dirty_map,
+                                                       VMAP_BBMAP_BITS);
+                               j = j + 1; /* need exclusive index */
 
                                s = vb->va->va_start + (i << PAGE_SHIFT);
                                e = vb->va->va_start + (j << PAGE_SHIFT);
@@ -1039,10 +1035,6 @@ void vm_unmap_aliases(void)
                                        start = s;
                                if (e > end)
                                        end = e;
-
-                               i = j;
-                               i = find_next_bit(vb->dirty_map,
-                                                       VMAP_BBMAP_BITS, i);
                        }
                        spin_unlock(&vb->lock);
                }
@@ -1266,7 +1258,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
 {
        unsigned long addr = (unsigned long)area->addr;
-       unsigned long end = addr + area->size - PAGE_SIZE;
+       unsigned long end = addr + get_vm_area_size(area);
        int err;
 
        err = vmap_page_range(addr, end, prot, *pages);
@@ -1292,15 +1284,15 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
        spin_unlock(&vmap_area_lock);
 }
 
-static void clear_vm_unlist(struct vm_struct *vm)
+static void clear_vm_uninitialized_flag(struct vm_struct *vm)
 {
        /*
-        * Before removing VM_UNLIST,
+        * Before removing VM_UNINITIALIZED,
         * we should make sure that vm has proper values.
         * Pair with smp_rmb() in show_numa_info().
         */
        smp_wmb();
-       vm->flags &= ~VM_UNLIST;
+       vm->flags &= ~VM_UNINITIALIZED;
 }
 
 static struct vm_struct *__get_vm_area_node(unsigned long size,
@@ -1480,7 +1472,6 @@ static void __vunmap(const void *addr, int deallocate_pages)
  *     conventions for vfree() arch-depenedent would be a really bad idea)
  *
  *     NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
- *     
  */
 void vfree(const void *addr)
 {
@@ -1492,8 +1483,8 @@ void vfree(const void *addr)
                return;
        if (unlikely(in_interrupt())) {
                struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
-               llist_add((struct llist_node *)addr, &p->list);
-               schedule_work(&p->wq);
+               if (llist_add((struct llist_node *)addr, &p->list))
+                       schedule_work(&p->wq);
        } else
                __vunmap(addr, 1);
 }
@@ -1562,7 +1553,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        unsigned int nr_pages, array_size, i;
        gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
 
-       nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
+       nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
        array_size = (nr_pages * sizeof(struct page *));
 
        area->nr_pages = nr_pages;
@@ -1638,21 +1629,21 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        if (!size || (size >> PAGE_SHIFT) > totalram_pages)
                goto fail;
 
-       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
+       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
                                  start, end, node, gfp_mask, caller);
        if (!area)
                goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
        if (!addr)
-               return NULL;
+               goto fail;
 
        /*
-        * In this function, newly allocated vm_struct has VM_UNLIST flag.
-        * It means that vm_struct is not fully initialized.
+        * In this function, newly allocated vm_struct has VM_UNINITIALIZED
+        * flag. It means that vm_struct is not fully initialized.
         * Now, it is fully initialized, so remove this flag here.
         */
-       clear_vm_unlist(area);
+       clear_vm_uninitialized_flag(area);
 
        /*
         * A ref_count = 3 is needed because the vm_struct and vmap_area
@@ -1994,7 +1985,7 @@ long vread(char *buf, char *addr, unsigned long count)
 
                vm = va->vm;
                vaddr = (char *) vm->addr;
-               if (addr >= vaddr + vm->size - PAGE_SIZE)
+               if (addr >= vaddr + get_vm_area_size(vm))
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
@@ -2004,7 +1995,7 @@ long vread(char *buf, char *addr, unsigned long count)
                        addr++;
                        count--;
                }
-               n = vaddr + vm->size - PAGE_SIZE - addr;
+               n = vaddr + get_vm_area_size(vm) - addr;
                if (n > count)
                        n = count;
                if (!(vm->flags & VM_IOREMAP))
@@ -2076,7 +2067,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
 
                vm = va->vm;
                vaddr = (char *) vm->addr;
-               if (addr >= vaddr + vm->size - PAGE_SIZE)
+               if (addr >= vaddr + get_vm_area_size(vm))
                        continue;
                while (addr < vaddr) {
                        if (count == 0)
@@ -2085,7 +2076,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
                        addr++;
                        count--;
                }
-               n = vaddr + vm->size - PAGE_SIZE - addr;
+               n = vaddr + get_vm_area_size(vm) - addr;
                if (n > count)
                        n = count;
                if (!(vm->flags & VM_IOREMAP)) {
@@ -2572,11 +2563,6 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
                if (!counters)
                        return;
 
-               /* Pair with smp_wmb() in clear_vm_unlist() */
-               smp_rmb();
-               if (v->flags & VM_UNLIST)
-                       return;
-
                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
 
                for (nr = 0; nr < v->nr_pages; nr++)
@@ -2605,6 +2591,11 @@ static int s_show(struct seq_file *m, void *p)
 
        v = va->vm;
 
+       /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+       smp_rmb();
+       if (v->flags & VM_UNINITIALIZED)
+               return 0;
+
        seq_printf(m, "0x%pK-0x%pK %7ld",
                v->addr, v->addr + v->size, v->size);