Allen Martin | fc468d8 | 2016-11-15 17:57:52 -0800 | [diff] [blame] | 1 | From fea1d7aa02b10e147f9f5f8037eaa606a59d1610 Mon Sep 17 00:00:00 2001 |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 2 | From: Thomas Gleixner <tglx@linutronix.de> |
| 3 | Date: Tue, 14 Jul 2015 14:26:34 +0200 |
Allen Martin | fc468d8 | 2016-11-15 17:57:52 -0800 | [diff] [blame] | 4 | Subject: [PATCH 174/351] idr: Use local lock instead of preempt enable/disable |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 5 | X-NVConfidentiality: public |
| 6 | |
| 7 | We need to protect the per cpu variable and prevent migration. |
| 8 | |
| 9 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 10 | --- |
| 11 | include/linux/idr.h | 4 ++++ |
| 12 | lib/idr.c | 43 +++++++++++++++++++++++++++++++++++++------ |
| 13 | 2 files changed, 41 insertions(+), 6 deletions(-) |
| 14 | |
| 15 | diff --git a/include/linux/idr.h b/include/linux/idr.h |
| 16 | index 013fd9bc4cb6..f62be0aec911 100644 |
| 17 | --- a/include/linux/idr.h |
| 18 | +++ b/include/linux/idr.h |
| 19 | @@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp); |
| 20 | * Each idr_preload() should be matched with an invocation of this |
| 21 | * function. See idr_preload() for details. |
| 22 | */ |
| 23 | +#ifdef CONFIG_PREEMPT_RT_FULL |
| 24 | +void idr_preload_end(void); |
| 25 | +#else |
| 26 | static inline void idr_preload_end(void) |
| 27 | { |
| 28 | preempt_enable(); |
| 29 | } |
| 30 | +#endif |
| 31 | |
| 32 | /** |
| 33 | * idr_find - return pointer for given id |
| 34 | diff --git a/lib/idr.c b/lib/idr.c |
| 35 | index 6098336df267..9decbe914595 100644 |
| 36 | --- a/lib/idr.c |
| 37 | +++ b/lib/idr.c |
| 38 | @@ -30,6 +30,7 @@ |
| 39 | #include <linux/idr.h> |
| 40 | #include <linux/spinlock.h> |
| 41 | #include <linux/percpu.h> |
| 42 | +#include <linux/locallock.h> |
| 43 | |
| 44 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
| 45 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) |
| 46 | @@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); |
| 47 | static DEFINE_PER_CPU(int, idr_preload_cnt); |
| 48 | static DEFINE_SPINLOCK(simple_ida_lock); |
| 49 | |
| 50 | +#ifdef CONFIG_PREEMPT_RT_FULL |
| 51 | +static DEFINE_LOCAL_IRQ_LOCK(idr_lock); |
| 52 | + |
| 53 | +static inline void idr_preload_lock(void) |
| 54 | +{ |
| 55 | + local_lock(idr_lock); |
| 56 | +} |
| 57 | + |
| 58 | +static inline void idr_preload_unlock(void) |
| 59 | +{ |
| 60 | + local_unlock(idr_lock); |
| 61 | +} |
| 62 | + |
| 63 | +void idr_preload_end(void) |
| 64 | +{ |
| 65 | + idr_preload_unlock(); |
| 66 | +} |
| 67 | +EXPORT_SYMBOL(idr_preload_end); |
| 68 | +#else |
| 69 | +static inline void idr_preload_lock(void) |
| 70 | +{ |
| 71 | + preempt_disable(); |
| 72 | +} |
| 73 | + |
| 74 | +static inline void idr_preload_unlock(void) |
| 75 | +{ |
| 76 | + preempt_enable(); |
| 77 | +} |
| 78 | +#endif |
| 79 | + |
| 80 | + |
| 81 | /* the maximum ID which can be allocated given idr->layers */ |
| 82 | static int idr_max(int layers) |
| 83 | { |
| 84 | @@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) |
| 85 | * context. See idr_preload() for details. |
| 86 | */ |
| 87 | if (!in_interrupt()) { |
| 88 | - preempt_disable(); |
| 89 | + idr_preload_lock(); |
| 90 | new = __this_cpu_read(idr_preload_head); |
| 91 | if (new) { |
| 92 | __this_cpu_write(idr_preload_head, new->ary[0]); |
| 93 | __this_cpu_dec(idr_preload_cnt); |
| 94 | new->ary[0] = NULL; |
| 95 | } |
| 96 | - preempt_enable(); |
| 97 | + idr_preload_unlock(); |
| 98 | if (new) |
| 99 | return new; |
| 100 | } |
| 101 | @@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, |
| 102 | idr_mark_full(pa, id); |
| 103 | } |
| 104 | |
| 105 | - |
| 106 | /** |
| 107 | * idr_preload - preload for idr_alloc() |
| 108 | * @gfp_mask: allocation mask to use for preloading |
| 109 | @@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask) |
| 110 | WARN_ON_ONCE(in_interrupt()); |
| 111 | might_sleep_if(gfpflags_allow_blocking(gfp_mask)); |
| 112 | |
| 113 | - preempt_disable(); |
| 114 | + idr_preload_lock(); |
| 115 | |
| 116 | /* |
| 117 | * idr_alloc() is likely to succeed w/o full idr_layer buffer and |
| 118 | @@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask) |
| 119 | while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { |
| 120 | struct idr_layer *new; |
| 121 | |
| 122 | - preempt_enable(); |
| 123 | + idr_preload_unlock(); |
| 124 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
| 125 | - preempt_disable(); |
| 126 | + idr_preload_lock(); |
| 127 | if (!new) |
| 128 | break; |
| 129 | |
| 130 | -- |
Allen Martin | fc468d8 | 2016-11-15 17:57:52 -0800 | [diff] [blame] | 131 | 2.10.1 |
Allen Martin | 685e0f8 | 2016-07-26 19:34:29 -0700 | [diff] [blame] | 132 | |