mm: failslab: Add support to force slab alloc failures based on size.
Krishna Reddy [Wed, 18 Jul 2012 21:06:50 +0000 (14:06 -0700)]
Any alloc request, with  size greater than PAGE_SIZE, to
slab allocator is not guarnateed to succeed, even though
enough memory is available, as memory can get fully fragmented
over the time.
This allows finding the slab allocator requests with size
greater than PAGE_SIZE early and avoid finding issues much late
in product life cyle.

Change-Id: Ibf13e626a671d41569415a56e775ac5e96b90ba3
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/116855
GVS: Gerrit_Virtual_Submit
Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>
(cherry picked from commit 604a65f8e3c9472886b48b1a287f78f11235d1ce)
Reviewed-on: http://git-master/r/118193
Reviewed-by: Alex Waterman <alexw@nvidia.com>

mm/failslab.c

index 0dd7b8f..4959868 100644 (file)
@@ -5,14 +5,29 @@ static struct {
        struct fault_attr attr;
        u32 ignore_gfp_wait;
        int cache_filter;
+       u32 size;
 } failslab = {
        .attr = FAULT_ATTR_INITIALIZER,
        .ignore_gfp_wait = 1,
        .cache_filter = 0,
+       .size = 0,
 };
 
+static void fail_dump(struct fault_attr *attr)
+{
+       if (attr->verbose > 0)
+               printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n");
+       if (attr->verbose > 1)
+               dump_stack();
+}
+
 bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
 {
+       if (failslab.size && size > failslab.size) {
+               fail_dump(&failslab.attr);
+               return true;
+       }
+
        if (gfpflags & __GFP_NOFAIL)
                return false;
 
@@ -48,6 +63,10 @@ static int __init failslab_debugfs_init(void)
                                &failslab.cache_filter))
                goto fail;
 
+       if (!debugfs_create_u32("size", mode, dir,
+                               &failslab.size))
+               goto fail;
+
        return 0;
 fail:
        debugfs_remove_recursive(dir);