EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
+DEFINE_IDA(blk_queue_ida);
+
/*
* For the allocated request tables
*/
/**
* blk_drain_queue - drain requests from request_queue
* @q: queue to drain
+ * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
*
- * Drain ELV_PRIV requests from @q. The caller is responsible for ensuring
- * that no new requests which need to be drained are queued.
+ * Drain requests from @q. If @drain_all is set, all requests are drained.
+ * If not, only ELVPRIV requests are drained. The caller is responsible
+ * for ensuring that no new requests which need to be drained are queued.
*/
-void blk_drain_queue(struct request_queue *q)
+void blk_drain_queue(struct request_queue *q, bool drain_all)
{
while (true) {
- int nr_rqs;
+ bool drain = false;
+ int i;
spin_lock_irq(q->queue_lock);
elv_drain_elevator(q);
+ if (drain_all)
+ blk_throtl_drain(q);
__blk_run_queue(q);
- nr_rqs = q->rq.elvpriv;
+
+ drain |= q->rq.elvpriv;
+
+ /*
+ * Unfortunately, requests are queued at and tracked from
+ * multiple places and there's no single counter which can
+ * be drained. Check all the queues and counters.
+ */
+ if (drain_all) {
+ drain |= !list_empty(&q->queue_head);
+ for (i = 0; i < 2; i++) {
+ drain |= q->rq.count[i];
+ drain |= q->in_flight[i];
+ drain |= !list_empty(&q->flush_queue[i]);
+ }
+ }
spin_unlock_irq(q->queue_lock);
- if (!nr_rqs)
+ if (!drain)
break;
msleep(10);
}
}
-/*
- * Note: If a driver supplied the queue lock, it is disconnected
- * by this function. The actual state of the lock doesn't matter
- * here as the request_queue isn't accessible after this point
- * (QUEUE_FLAG_DEAD is set) and no other requests will be queued.
+/**
+ * blk_cleanup_queue - shutdown a request queue
+ * @q: request queue to shutdown
+ *
+ * Mark @q DEAD, drain all pending requests, destroy and put it. All
+ * future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
- /*
- * We know we have process context here, so we can be a little
- * cautious and ensure that pending block actions on this device
- * are done before moving on. Going into this function, we should
- * not have processes doing IO to this device.
- */
- blk_sync_queue(q);
+ spinlock_t *lock = q->queue_lock;
- del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+ /* mark @q DEAD, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
- mutex_unlock(&q->sysfs_lock);
+
+ spin_lock_irq(lock);
+ queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ queue_flag_set(QUEUE_FLAG_DEAD, q);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
+ spin_unlock_irq(lock);
+ mutex_unlock(&q->sysfs_lock);
+
+ /*
+ * Drain all requests queued before DEAD marking. The caller might
+ * be trying to tear down @q before its elevator is initialized, in
+ * which case we don't want to call into draining.
+ */
+ if (q->elevator)
+ blk_drain_queue(q, true);
+
+ /* @q won't process any more request, flush async actions */
+ del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+ blk_sync_queue(q);
+
+ /* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
if (!q)
return NULL;
+ q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+ if (q->id < 0)
+ goto fail_q;
+
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0;
q->backing_dev_info.name = "block";
err = bdi_init(&q->backing_dev_info);
- if (err) {
- kmem_cache_free(blk_requestq_cachep, q);
- return NULL;
- }
+ if (err)
+ goto fail_id;
- if (blk_throtl_init(q)) {
- kmem_cache_free(blk_requestq_cachep, q);
- return NULL;
- }
+ if (blk_throtl_init(q))
+ goto fail_id;
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
q->queue_lock = &q->__queue_lock;
return q;
+
+fail_id:
+ ida_simple_remove(&blk_queue_ida, q->id);
+fail_q:
+ kmem_cache_free(blk_requestq_cachep, q);
+ return NULL;
}
EXPORT_SYMBOL(blk_alloc_queue_node);
}
EXPORT_SYMBOL(blk_init_allocated_queue_node);
-int blk_get_queue(struct request_queue *q)
+bool blk_get_queue(struct request_queue *q)
{
- if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
- kobject_get(&q->kobj);
- return 0;
+ if (likely(!blk_queue_dead(q))) {
+ __blk_get_queue(q);
+ return true;
}
- return 1;
+ return false;
}
EXPORT_SYMBOL(blk_get_queue);
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ if (unlikely(blk_queue_dead(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
struct io_context *ioc;
struct request_list *rl = &q->rq;
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+ if (unlikely(blk_queue_dead(q)))
return NULL;
prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
__elv_add_request(q, rq, where);
}
-/**
- * blk_insert_request - insert a special request into a request queue
- * @q: request queue where request should be inserted
- * @rq: request to be inserted
- * @at_head: insert request at head or tail of queue
- * @data: private data
- *
- * Description:
- * Many block devices need to execute commands asynchronously, so they don't
- * block the whole kernel from preemption during request execution. This is
- * accomplished normally by inserting aritficial requests tagged as
- * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
- * be scheduled for actual execution by the request queue.
- *
- * We have the option of inserting the head or the tail of the queue.
- * Typically we use the tail for new ioctls and so forth. We use the head
- * of the queue for things like a QUEUE_FULL message from a device, or a
- * host that is unable to accept a particular command.
- */
-void blk_insert_request(struct request_queue *q, struct request *rq,
- int at_head, void *data)
-{
- int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
- unsigned long flags;
-
- /*
- * tell I/O scheduler that this isn't a regular read/write (ie it
- * must not attempt merges on this) and that it acts as a soft
- * barrier
- */
- rq->cmd_type = REQ_TYPE_SPECIAL;
-
- rq->special = data;
-
- spin_lock_irqsave(q->queue_lock, flags);
-
- /*
- * If command is tagged, release the tag
- */
- if (blk_rq_tagged(rq))
- blk_queue_end_tag(q, rq);
-
- add_acct_request(q, rq, where);
- __blk_run_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_insert_request);
-
static void part_round_stats_single(int cpu, struct hd_struct *part,
unsigned long now)
{
return true;
}
-/*
- * Attempts to merge with the plugged list in the current process. Returns
- * true if merge was successful, otherwise false.
+/**
+ * attempt_plug_merge - try to merge with %current's plugged list
+ * @q: request_queue new bio is being queued at
+ * @bio: new bio being queued
+ * @request_count: out parameter for number of traversed plugged requests
+ *
+ * Determine whether @bio being queued on @q can be merged with a request
+ * on %current's plugged list. Returns %true if merge was successful,
+ * otherwise %false.
+ *
+ * This function is called without @q->queue_lock; however, elevator is
+ * accessed iff there already are requests on the plugged list which in
+ * turn guarantees validity of the elevator.
+ *
+ * Note that, on successful merge, elevator operation
+ * elevator_bio_merged_fn() will be called without queue lock. Elevator
+ * must be ready for this.
*/
-static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q,
- struct bio *bio, unsigned int *request_count)
+static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
+ unsigned int *request_count)
{
struct blk_plug *plug;
struct request *rq;
bool ret = false;
- plug = tsk->plug;
+ plug = current->plug;
if (!plug)
goto out;
*request_count = 0;
void init_request_from_bio(struct request *req, struct bio *bio)
{
- req->cpu = bio->bi_comp_cpu;
req->cmd_type = REQ_TYPE_FS;
req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
- if (attempt_plug_merge(current, q, bio, &request_count))
+ if (attempt_plug_merge(q, bio, &request_count))
return;
spin_lock_irq(q->queue_lock);
*/
init_request_from_bio(req, bio);
- if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
- bio_flagged(bio, BIO_CPU_AFFINE))
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
req->cpu = raw_smp_processor_id();
plug = current->plug;
*/
if (list_empty(&plug->list))
trace_block_plug(q);
- else if (!plug->should_sort) {
- struct request *__rq;
+ else {
+ if (!plug->should_sort) {
+ struct request *__rq;
- __rq = list_entry_rq(plug->list.prev);
- if (__rq->q != q)
- plug->should_sort = 1;
+ __rq = list_entry_rq(plug->list.prev);
+ if (__rq->q != q)
+ plug->should_sort = 1;
+ }
+ if (request_count >= BLK_MAX_REQUEST_COUNT) {
+ blk_flush_plug_list(plug, false);
+ trace_block_plug(q);
+ }
}
- if (request_count >= BLK_MAX_REQUEST_COUNT)
- blk_flush_plug_list(plug, false);
list_add_tail(&req->queuelist, &plug->list);
drive_stat_acct(req, 1);
} else {
goto end_io;
}
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
- goto end_io;
-
part = bio->bi_bdev->bd_part;
if (should_fail_request(part, bio->bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ return -ENODEV;
+ }
/*
* Submitting request must be dequeued before calling this function
where = ELEVATOR_INSERT_FLUSH;
add_acct_request(q, rq, where);
+ if (where == ELEVATOR_INSERT_FLUSH)
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
return 0;
{
trace_block_unplug(q, depth, !from_schedule);
+ /*
+ * Don't mess with dead queue.
+ */
+ if (unlikely(blk_queue_dead(q))) {
+ spin_unlock(q->queue_lock);
+ return;
+ }
+
/*
* If we are punting this to kblockd, then we can safely drop
* the queue_lock before waking kblockd (which needs to take
depth = 0;
spin_lock(q->queue_lock);
}
+
+ /*
+ * Short-circuit if @q is dead
+ */
+ if (unlikely(blk_queue_dead(q))) {
+ __blk_end_request_all(rq, -ENODEV);
+ continue;
+ }
+
/*
* rq is already accounted, so use raw insert
*/