N: Martin Kepplinger
E: martink@posteo.de
-E: martin.kepplinger@theobroma-systems.com
+E: martin.kepplinger@ginzinger.com
W: http://www.martinkepplinger.com
D: mma8452 accelerators iio driver
-D: Kernel cleanups
+D: pegasus_notetaker input driver
+D: Kernel fixes and cleanups
S: Garnisonstraße 26
S: 4020 Linz
S: Austria
printk("print_dma_descriptors start\n");
printk("iop:\n");
- printk("\tsid: 0x%lld\n", iop->sid);
+ printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit;
- struct restart_block restart_block;
};
/*
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
* consistent with the vaddr_start/vaddr_end variables.
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
- BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START);
- BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
- config_enabled(CONFIG_EFI)) &&
+ BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+ IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
idev->id_vendor, idev->id_device);
}
-ipack_device_attr(id_format, "0x%hhu\n");
+ipack_device_attr(id_format, "0x%hhx\n");
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(id_device);
spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
- gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
bad_entry:
EXOFS_ERR(
"ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
- "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
+ "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len);
{
struct mm_struct *mm = file->private_data;
unsigned int nwords = 0;
+
+ if (!mm)
+ return 0;
do {
nwords += 2;
} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
#include <generated/autoksyms.h>
#define __EXPORT_SYMBOL(sym, val, sec) \
- __cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
+ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
#define __cond_export_sym(sym, val, sec, conf) \
___cond_export_sym(sym, val, sec, conf)
#define ___cond_export_sym(sym, val, sec, enabled) \
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
-#define config_enabled(cfg) ___is_defined(cfg)
#define __is_defined(x) ___is_defined(x)
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
*/
-#define IS_BUILTIN(option) config_enabled(option)
+#define IS_BUILTIN(option) __is_defined(option)
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise.
*/
-#define IS_MODULE(option) config_enabled(option##_MODULE)
+#define IS_MODULE(option) __is_defined(option##_MODULE)
/*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
size_t alen;
alen = min(len, DATALEN_MSG);
- msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
+ msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT);
if (msg == NULL)
return NULL;
while (len > 0) {
struct msg_msgseg *seg;
alen = min(len, DATALEN_SEG);
- seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
+ seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
if (seg == NULL)
goto out_err;
*pseg = seg;
/*
* We are interested in code coverage as a function of a syscall inputs,
* so we ignore code executed in interrupts.
+ * The checks for whether we are in an interrupt are open-coded, because
+ * 1. We can't use in_interrupt() here, since it also returns true
+ * when we are inside local_bh_disable() section.
+ * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
+ * since that leads to slower generated code (three separate tests,
+ * one for each of the flags).
*/
- if (!t || in_interrupt())
+ if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
+ | NMI_MASK)))
return;
mode = READ_ONCE(t->kcov_mode);
if (mode == KCOV_MODE_TRACE) {
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
default 0 if KASAN
+ default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1024 if !64BIT
default 2048 if 64BIT
help
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit = 0, end_bit, remain;
+ int nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
if (size > atomic_read(&chunk->avail))
continue;
+ start_bit = 0;
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = algo(chunk->bits, end_bit, start_bit,
STACK_ALLOC_ALIGN)
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
-#define STACK_ALLOC_SLABS_CAP 1024
+#define STACK_ALLOC_SLABS_CAP 8192
#define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
(1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- scan_block(task_stack_page(p), task_stack_page(p) +
- THREAD_SIZE, NULL);
+ void *stack = try_get_task_stack(p);
+ if (stack) {
+ scan_block(stack, stack + THREAD_SIZE, NULL);
+ put_task_stack(p);
+ }
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
err = memcg_init_list_lru(lru, memcg_aware);
if (err) {
kfree(lru->node);
+ /* Do this so a list_lru_destroy() doesn't crash: */
+ lru->node = NULL;
goto out;
}
current->flags & PF_EXITING))
goto force;
+ /*
+ * Prevent unbounded recursion when reclaim operations need to
+ * allocate memory. This might exceed the limits temporarily,
+ * but we prefer facilitating memory reclaim and getting back
+ * under the limit over triggering OOM kills in these cases.
+ */
+ if (unlikely(current->flags & PF_MEMALLOC))
+ goto force;
+
if (unlikely(task_in_memcg_oom(current)))
goto nomem;
}
*p = '\0';
- printk("(%s) ", tmp);
+ printk(KERN_CONT "(%s) ", tmp);
}
/*
free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
show_node(zone);
- printk("%s"
+ printk(KERN_CONT
+ "%s"
" free:%lukB"
" min:%lukB"
" low:%lukB"
K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
- printk(" %ld", zone->lowmem_reserve[i]);
- printk("\n");
+ printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
+ printk(KERN_CONT "\n");
}
for_each_populated_zone(zone) {
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
show_node(zone);
- printk("%s: ", zone->name);
+ printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
}
spin_unlock_irqrestore(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
- printk("%lu*%lukB ", nr[order], K(1UL) << order);
+ printk(KERN_CONT "%lu*%lukB ",
+ nr[order], K(1UL) << order);
if (nr[order])
show_migration_types(types[order]);
}
- printk("= %lukB\n", K(total));
+ printk(KERN_CONT "= %lukB\n", K(total));
}
hugetlb_show_meminfo();
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
+ parent->num_slabs = 0;
}
#define MAKE_LIST(cachep, listp, slab, nodeid) \
* guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched().
*/
- if (force_change)
+ if (old_shared && force_change)
synchronize_sched();
fail:
for_each_kmem_cache_node(cachep, node, n) {
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
unsigned long active_slabs = 0, num_slabs = 0;
+ unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+ unsigned long num_slabs_full;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->slabs_full, lru) {
- active_objs += cachep->num;
- active_slabs++;
- }
+ num_slabs = n->num_slabs;
list_for_each_entry(page, &n->slabs_partial, lru) {
active_objs += page->active;
- active_slabs++;
+ num_slabs_partial++;
}
list_for_each_entry(page, &n->slabs_free, lru)
- num_slabs++;
+ num_slabs_free++;
free_objects += n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags);
- num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
+ active_slabs = num_slabs - num_slabs_free;
+ num_slabs_full = num_slabs -
+ (num_slabs_partial + num_slabs_free);
+ active_objs += (num_slabs_full * cachep->num);
+
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
node, active_slabs, num_slabs, active_objs, num_objs,
free_objects);
page = list_entry(p, struct page, lru);
list_del(&page->lru);
+ n->num_slabs--;
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
list_add_tail(&page->lru, &(n->slabs_free));
else
fixup_slab_list(cachep, n, page, &list);
+
+ n->num_slabs++;
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
spin_unlock(&n->list_lock);
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
+ n->num_slabs--;
}
}
unsigned long num_objs;
unsigned long active_slabs = 0;
unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+ unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+ unsigned long num_slabs_full = 0;
const char *name;
char *error = NULL;
int node;
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(page, &n->slabs_full, lru) {
- if (page->active != cachep->num && !error)
- error = "slabs_full accounting error";
- active_objs += cachep->num;
- active_slabs++;
- }
+ num_slabs += n->num_slabs;
+
list_for_each_entry(page, &n->slabs_partial, lru) {
if (page->active == cachep->num && !error)
error = "slabs_partial accounting error";
if (!page->active && !error)
error = "slabs_partial accounting error";
active_objs += page->active;
- active_slabs++;
+ num_slabs_partial++;
}
+
list_for_each_entry(page, &n->slabs_free, lru) {
if (page->active && !error)
error = "slabs_free accounting error";
- num_slabs++;
+ num_slabs_free++;
}
+
free_objects += n->free_objects;
if (n->shared)
shared_avail += n->shared->avail;
spin_unlock_irq(&n->list_lock);
}
- num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
+ active_slabs = num_slabs - num_slabs_free;
+ num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
+ active_objs += (num_slabs_full * cachep->num);
+
if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
+ unsigned long num_slabs;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
sc.gfp_mask,
sc.reclaim_idx);
+ current->flags |= PF_MEMALLOC;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+ current->flags &= ~PF_MEMALLOC;
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);