X-Git-Url: https://nv-tegra.nvidia.com/r/gitweb?p=linux-2.6.git;a=blobdiff_plain;f=mm%2Fslub.c;fp=mm%2Fslub.c;h=992ecd4f0d393c748dceec95e3ddd714b16557b4;hp=d821ce6fff39b1872c2f8c8b42ed44f656c7fbc0;hb=e97e386b126c2d60b8da61ce1e4964b41b3d1514;hpb=c124f5b54f879e5870befcc076addbd5d614663f diff --git a/mm/slub.c b/mm/slub.c index d821ce6fff3..992ecd4f0d3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -186,11 +186,6 @@ static inline void ClearSlabDebug(struct page *page) #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ -/* Not all arches define cache_line_size */ -#ifndef cache_line_size -#define cache_line_size() L1_CACHE_BYTES -#endif - static int kmem_size = sizeof(struct kmem_cache); #ifdef CONFIG_SMP @@ -1330,7 +1325,9 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) { #ifdef CONFIG_NUMA struct zonelist *zonelist; - struct zone **z; + struct zoneref *z; + struct zone *zone; + enum zone_type high_zoneidx = gfp_zone(flags); struct page *page; /* @@ -1355,14 +1352,13 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) get_cycles() % 1024 > s->remote_node_defrag_ratio) return NULL; - zonelist = &NODE_DATA( - slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)]; - for (z = zonelist->zones; *z; z++) { + zonelist = node_zonelist(slab_node(current->mempolicy), flags); + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { struct kmem_cache_node *n; - n = get_node(s, zone_to_nid(*z)); + n = get_node(s, zone_to_nid(zone)); - if (n && cpuset_zone_allowed_hardwall(*z, flags) && + if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > MIN_PARTIAL) { page = get_partial_node(n); if (page)