blkcg: add blkg_policy_data->plid
[linux-3.10.git] / block / blk-core.c
index a182b58..c973249 100644 (file)
@@ -40,6 +40,7 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
 
 DEFINE_IDA(blk_queue_ida);
 
@@ -219,12 +220,13 @@ static void blk_delay_work(struct work_struct *work)
  * Description:
  *   Sometimes queueing needs to be postponed for a little while, to allow
  *   resources to come back. This function will make sure that queueing is
- *   restarted around the specified time.
+ *   restarted around the specified time. Queue lock must be held.
  */
 void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 {
-       queue_delayed_work(kblockd_workqueue, &q->delay_work,
-                               msecs_to_jiffies(msecs));
+       if (likely(!blk_queue_dead(q)))
+               queue_delayed_work(kblockd_workqueue, &q->delay_work,
+                                  msecs_to_jiffies(msecs));
 }
 EXPORT_SYMBOL(blk_delay_queue);
 
@@ -293,6 +295,34 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
+ * __blk_run_queue_uncond - run a queue whether or not it has been stopped
+ * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on a queue if there are any pending requests.
+ *    May be used to restart request handling after a request has completed.
+ *    This variant runs the queue whether or not the queue has been
+ *    stopped. Must be called with the queue lock held and interrupts
+ *    disabled. See also @blk_run_queue.
+ */
+inline void __blk_run_queue_uncond(struct request_queue *q)
+{
+       if (unlikely(blk_queue_dead(q)))
+               return;
+
+       /*
+        * Some request_fn implementations, e.g. scsi_request_fn(), unlock
+        * the queue lock internally. As a result multiple threads may be
+        * running such a request function concurrently. Keep track of the
+        * number of active request_fn invocations such that blk_drain_queue()
+        * can wait until all these request_fn calls have finished.
+        */
+       q->request_fn_active++;
+       q->request_fn(q);
+       q->request_fn_active--;
+}
+
+/**
  * __blk_run_queue - run a single device queue
  * @q: The queue to run
  *
@@ -305,7 +335,7 @@ void __blk_run_queue(struct request_queue *q)
        if (unlikely(blk_queue_stopped(q)))
                return;
 
-       q->request_fn(q);
+       __blk_run_queue_uncond(q);
 }
 EXPORT_SYMBOL(__blk_run_queue);
 
@@ -315,11 +345,11 @@ EXPORT_SYMBOL(__blk_run_queue);
  *
  * Description:
  *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- *    of us.
+ *    of us. The caller must hold the queue lock.
  */
 void blk_run_queue_async(struct request_queue *q)
 {
-       if (likely(!blk_queue_stopped(q)))
+       if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 }
 EXPORT_SYMBOL(blk_run_queue_async);
@@ -388,6 +418,7 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
                        __blk_run_queue(q);
 
                drain |= q->nr_rqs_elvpriv;
+               drain |= q->request_fn_active;
 
                /*
                 * Unfortunately, requests are queued at and tracked from
@@ -477,8 +508,8 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
  *
- * Mark @q DYING, drain all pending requests, destroy and put it.  All
- * future requests will be failed immediately with -ENODEV.
+ * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
+ * put it.  All future requests will be failed immediately with -ENODEV.
  */
 void blk_cleanup_queue(struct request_queue *q)
 {
@@ -507,9 +538,13 @@ void blk_cleanup_queue(struct request_queue *q)
        spin_unlock_irq(lock);
        mutex_unlock(&q->sysfs_lock);
 
-       /* drain all requests queued before DYING marking */
+       /*
+        * Drain all requests queued before DYING marking. Set DEAD flag to
+        * prevent that q->request_fn() gets invoked after draining finished.
+        */
        spin_lock_irq(lock);
        __blk_drain_queue(q, true);
+       queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
        /* @q won't process any more request, flush async actions */
@@ -2890,27 +2925,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
 {
        trace_block_unplug(q, depth, !from_schedule);
 
-       /*
-        * Don't mess with a dying queue.
-        */
-       if (unlikely(blk_queue_dying(q))) {
-               spin_unlock(q->queue_lock);
-               return;
-       }
-
-       /*
-        * If we are punting this to kblockd, then we can safely drop
-        * the queue_lock before waking kblockd (which needs to take
-        * this lock).
-        */
-       if (from_schedule) {
-               spin_unlock(q->queue_lock);
+       if (from_schedule)
                blk_run_queue_async(q);
-       } else {
+       else
                __blk_run_queue(q);
-               spin_unlock(q->queue_lock);
-       }
-
+       spin_unlock(q->queue_lock);
 }
 
 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)