#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
-/* Not all arches define cache_line_size */
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
static int kmem_size = sizeof(struct kmem_cache);
#ifdef CONFIG_SMP
static void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
- slab_bug(s, reason);
+ slab_bug(s, "%s", reason);
print_trailer(s, page, object);
}
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
- slab_bug(s, fmt);
+ slab_bug(s, "%s", buf);
print_page_info(page);
dump_stack();
}
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
- struct zone **z;
+ struct zoneref *z;
+ struct zone *zone;
+ enum zone_type high_zoneidx = gfp_zone(flags);
struct page *page;
/*
get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
- zonelist = &NODE_DATA(
- slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
- for (z = zonelist->zones; *z; z++) {
+ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
struct kmem_cache_node *n;
- n = get_node(s, zone_to_nid(*z));
+ n = get_node(s, zone_to_nid(zone));
- if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
+ if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > MIN_PARTIAL) {
page = get_partial_node(n);
if (page)