Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 3 | * Copyright 2006-2007 Pierre Ossman |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | */ |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/blkdev.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 13 | #include <linux/freezer.h> |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 14 | #include <linux/kthread.h> |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 15 | #include <linux/scatterlist.h> |
Santosh Shilimkar | 8e0cb8a | 2013-07-29 14:20:15 +0100 | [diff] [blame] | 16 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | |
| 18 | #include <linux/mmc/card.h> |
| 19 | #include <linux/mmc/host.h> |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 20 | |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 21 | #include "queue.h" |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 22 | #include "block.h" |
Ulf Hansson | 55244c5 | 2017-01-13 14:14:08 +0100 | [diff] [blame] | 23 | #include "core.h" |
Ulf Hansson | 4facdde | 2017-01-13 14:14:14 +0100 | [diff] [blame] | 24 | #include "card.h" |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 25 | #include "host.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | /* |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 28 | * Prepare a MMC request. This just filters out odd stuff. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | */ |
| 30 | static int mmc_prep_request(struct request_queue *q, struct request *req) |
| 31 | { |
Sujit Reddy Thumma | a8ad82cc | 2011-12-08 14:05:50 +0530 | [diff] [blame] | 32 | struct mmc_queue *mq = q->queuedata; |
| 33 | |
Linus Walleij | 14f4ca7 | 2017-09-20 10:02:01 +0200 | [diff] [blame] | 34 | if (mq && mmc_card_removed(mq->card)) |
Sujit Reddy Thumma | a8ad82cc | 2011-12-08 14:05:50 +0530 | [diff] [blame] | 35 | return BLKPREP_KILL; |
| 36 | |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 37 | req->rq_flags |= RQF_DONTPREP; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 38 | req_to_mmc_queue_req(req)->retries = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Pierre Ossman | 9c9f2d6 | 2007-05-16 17:29:21 +0200 | [diff] [blame] | 40 | return BLKPREP_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | } |
| 42 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 43 | static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) |
| 44 | { |
| 45 | /* Allow only 1 DCMD at a time */ |
| 46 | return mq->in_flight[MMC_ISSUE_DCMD]; |
| 47 | } |
| 48 | |
| 49 | void mmc_cqe_check_busy(struct mmc_queue *mq) |
| 50 | { |
| 51 | if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) |
| 52 | mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; |
| 53 | |
| 54 | mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; |
| 55 | } |
| 56 | |
| 57 | static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) |
| 58 | { |
| 59 | return host->caps2 & MMC_CAP2_CQE_DCMD; |
| 60 | } |
| 61 | |
| 62 | enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, |
| 63 | struct request *req) |
| 64 | { |
| 65 | switch (req_op(req)) { |
| 66 | case REQ_OP_DRV_IN: |
| 67 | case REQ_OP_DRV_OUT: |
| 68 | case REQ_OP_DISCARD: |
| 69 | case REQ_OP_SECURE_ERASE: |
| 70 | return MMC_ISSUE_SYNC; |
| 71 | case REQ_OP_FLUSH: |
| 72 | return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; |
| 73 | default: |
| 74 | return MMC_ISSUE_ASYNC; |
| 75 | } |
| 76 | } |
| 77 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 78 | enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) |
| 79 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 80 | struct mmc_host *host = mq->card->host; |
| 81 | |
| 82 | if (mq->use_cqe) |
| 83 | return mmc_cqe_issue_type(host, req); |
| 84 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 85 | if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) |
| 86 | return MMC_ISSUE_ASYNC; |
| 87 | |
| 88 | return MMC_ISSUE_SYNC; |
| 89 | } |
| 90 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 91 | static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) |
| 92 | { |
| 93 | if (!mq->recovery_needed) { |
| 94 | mq->recovery_needed = true; |
| 95 | schedule_work(&mq->recovery_work); |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | void mmc_cqe_recovery_notifier(struct mmc_request *mrq) |
| 100 | { |
| 101 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
| 102 | brq.mrq); |
| 103 | struct request *req = mmc_queue_req_to_req(mqrq); |
| 104 | struct request_queue *q = req->q; |
| 105 | struct mmc_queue *mq = q->queuedata; |
| 106 | unsigned long flags; |
| 107 | |
| 108 | spin_lock_irqsave(q->queue_lock, flags); |
| 109 | __mmc_cqe_recovery_notifier(mq); |
| 110 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 111 | } |
| 112 | |
| 113 | static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) |
| 114 | { |
| 115 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); |
| 116 | struct mmc_request *mrq = &mqrq->brq.mrq; |
| 117 | struct mmc_queue *mq = req->q->queuedata; |
| 118 | struct mmc_host *host = mq->card->host; |
| 119 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
| 120 | bool recovery_needed = false; |
| 121 | |
| 122 | switch (issue_type) { |
| 123 | case MMC_ISSUE_ASYNC: |
| 124 | case MMC_ISSUE_DCMD: |
| 125 | if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { |
| 126 | if (recovery_needed) |
| 127 | __mmc_cqe_recovery_notifier(mq); |
| 128 | return BLK_EH_RESET_TIMER; |
| 129 | } |
| 130 | /* No timeout */ |
| 131 | return BLK_EH_HANDLED; |
| 132 | default: |
| 133 | /* Timeout is handled by mmc core */ |
| 134 | return BLK_EH_RESET_TIMER; |
| 135 | } |
| 136 | } |
| 137 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 138 | static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, |
| 139 | bool reserved) |
| 140 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 141 | struct request_queue *q = req->q; |
| 142 | struct mmc_queue *mq = q->queuedata; |
| 143 | unsigned long flags; |
| 144 | int ret; |
| 145 | |
| 146 | spin_lock_irqsave(q->queue_lock, flags); |
| 147 | |
| 148 | if (mq->recovery_needed || !mq->use_cqe) |
| 149 | ret = BLK_EH_RESET_TIMER; |
| 150 | else |
| 151 | ret = mmc_cqe_timed_out(req); |
| 152 | |
| 153 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 154 | |
| 155 | return ret; |
| 156 | } |
| 157 | |
| 158 | static void mmc_mq_recovery_handler(struct work_struct *work) |
| 159 | { |
| 160 | struct mmc_queue *mq = container_of(work, struct mmc_queue, |
| 161 | recovery_work); |
| 162 | struct request_queue *q = mq->queue; |
| 163 | |
| 164 | mmc_get_card(mq->card, &mq->ctx); |
| 165 | |
| 166 | mq->in_recovery = true; |
| 167 | |
| 168 | mmc_blk_cqe_recovery(mq); |
| 169 | |
| 170 | mq->in_recovery = false; |
| 171 | |
| 172 | spin_lock_irq(q->queue_lock); |
| 173 | mq->recovery_needed = false; |
| 174 | spin_unlock_irq(q->queue_lock); |
| 175 | |
| 176 | mmc_put_card(mq->card, &mq->ctx); |
| 177 | |
| 178 | blk_mq_run_hw_queues(q, true); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 179 | } |
| 180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | static int mmc_queue_thread(void *d) |
| 182 | { |
| 183 | struct mmc_queue *mq = d; |
| 184 | struct request_queue *q = mq->queue; |
Adrian Hunter | e0097cf | 2016-11-29 12:09:10 +0200 | [diff] [blame] | 185 | struct mmc_context_info *cntx = &mq->card->host->context_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 187 | current->flags |= PF_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | down(&mq->thread_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | do { |
Adrian Hunter | cdf8a6f | 2017-03-13 14:36:35 +0200 | [diff] [blame] | 191 | struct request *req; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
| 193 | spin_lock_irq(q->queue_lock); |
| 194 | set_current_state(TASK_INTERRUPTIBLE); |
Jens Axboe | 7eaceac | 2011-03-10 08:52:07 +0100 | [diff] [blame] | 195 | req = blk_fetch_request(q); |
Adrian Hunter | e0097cf | 2016-11-29 12:09:10 +0200 | [diff] [blame] | 196 | mq->asleep = false; |
| 197 | cntx->is_waiting_last_req = false; |
| 198 | cntx->is_new_req = false; |
| 199 | if (!req) { |
| 200 | /* |
| 201 | * Dispatch queue is empty so set flags for |
| 202 | * mmc_request_fn() to wake us up. |
| 203 | */ |
Adrian Hunter | cdf8a6f | 2017-03-13 14:36:35 +0200 | [diff] [blame] | 204 | if (mq->qcnt) |
Adrian Hunter | e0097cf | 2016-11-29 12:09:10 +0200 | [diff] [blame] | 205 | cntx->is_waiting_last_req = true; |
| 206 | else |
| 207 | mq->asleep = true; |
| 208 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | spin_unlock_irq(q->queue_lock); |
| 210 | |
Adrian Hunter | cdf8a6f | 2017-03-13 14:36:35 +0200 | [diff] [blame] | 211 | if (req || mq->qcnt) { |
Per Forlin | ee8a43a | 2011-07-01 18:55:33 +0200 | [diff] [blame] | 212 | set_current_state(TASK_RUNNING); |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 213 | mmc_blk_issue_rq(mq, req); |
Rabin Vincent | a8c27c0 | 2015-06-14 19:26:11 +0200 | [diff] [blame] | 214 | cond_resched(); |
Per Forlin | ee8a43a | 2011-07-01 18:55:33 +0200 | [diff] [blame] | 215 | } else { |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 216 | if (kthread_should_stop()) { |
| 217 | set_current_state(TASK_RUNNING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | break; |
Vitaly Wool | 7b30d28 | 2006-12-07 20:08:02 +0100 | [diff] [blame] | 219 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | up(&mq->thread_sem); |
| 221 | schedule(); |
| 222 | down(&mq->thread_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } while (1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | up(&mq->thread_sem); |
| 226 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | return 0; |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Generic MMC request handler. This is called for any queue on a |
| 232 | * particular host. When the host is not busy, we look for a request |
| 233 | * on any queue on this host, and attempt to issue it. This may |
| 234 | * not be the queue we were asked to process. |
| 235 | */ |
Venkatraman S | 1b50f5f | 2012-04-13 17:54:11 +0530 | [diff] [blame] | 236 | static void mmc_request_fn(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | { |
| 238 | struct mmc_queue *mq = q->queuedata; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 239 | struct request *req; |
Konstantin Dorfman | 2220eed | 2013-01-14 14:28:17 -0500 | [diff] [blame] | 240 | struct mmc_context_info *cntx; |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 241 | |
| 242 | if (!mq) { |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 243 | while ((req = blk_fetch_request(q)) != NULL) { |
Christoph Hellwig | e806402 | 2016-10-20 15:12:13 +0200 | [diff] [blame] | 244 | req->rq_flags |= RQF_QUIET; |
Christoph Hellwig | 2a842ac | 2017-06-03 09:38:04 +0200 | [diff] [blame] | 245 | __blk_end_request_all(req, BLK_STS_IOERR); |
Adrian Hunter | 5fa83ce | 2010-01-08 14:43:00 -0800 | [diff] [blame] | 246 | } |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 247 | return; |
| 248 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
Konstantin Dorfman | 2220eed | 2013-01-14 14:28:17 -0500 | [diff] [blame] | 250 | cntx = &mq->card->host->context_info; |
Adrian Hunter | e0097cf | 2016-11-29 12:09:10 +0200 | [diff] [blame] | 251 | |
| 252 | if (cntx->is_waiting_last_req) { |
| 253 | cntx->is_new_req = true; |
| 254 | wake_up_interruptible(&cntx->wait); |
| 255 | } |
| 256 | |
| 257 | if (mq->asleep) |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 258 | wake_up_process(mq->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | } |
| 260 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 261 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 262 | { |
| 263 | struct scatterlist *sg; |
| 264 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 265 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
Adrian Hunter | 7b410d0 | 2017-03-13 14:36:36 +0200 | [diff] [blame] | 266 | if (sg) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 267 | sg_init_table(sg, sg_len); |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 268 | |
| 269 | return sg; |
| 270 | } |
| 271 | |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 272 | static void mmc_queue_setup_discard(struct request_queue *q, |
| 273 | struct mmc_card *card) |
| 274 | { |
| 275 | unsigned max_discard; |
| 276 | |
| 277 | max_discard = mmc_calc_max_discard(card); |
| 278 | if (!max_discard) |
| 279 | return; |
| 280 | |
| 281 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 282 | blk_queue_max_discard_sectors(q, max_discard); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 283 | q->limits.discard_granularity = card->pref_erase << 9; |
| 284 | /* granularity must not be greater than max. discard */ |
| 285 | if (card->pref_erase > max_discard) |
| 286 | q->limits.discard_granularity = 0; |
Maya Erez | 775a936 | 2013-04-18 15:41:55 +0300 | [diff] [blame] | 287 | if (mmc_can_secure_erase_trim(card)) |
Christoph Hellwig | 288dab8 | 2016-06-09 16:00:36 +0200 | [diff] [blame] | 288 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 289 | } |
| 290 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 291 | /** |
| 292 | * mmc_init_request() - initialize the MMC-specific per-request data |
| 293 | * @q: the request queue |
| 294 | * @req: the request |
| 295 | * @gfp: memory allocation policy |
| 296 | */ |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 297 | static int __mmc_init_request(struct mmc_queue *mq, struct request *req, |
| 298 | gfp_t gfp) |
Adrian Hunter | f2b8b52 | 2016-11-29 12:09:12 +0200 | [diff] [blame] | 299 | { |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 300 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 301 | struct mmc_card *card = mq->card; |
| 302 | struct mmc_host *host = card->host; |
Adrian Hunter | c853982 | 2016-11-29 12:09:11 +0200 | [diff] [blame] | 303 | |
Linus Walleij | de3ee99 | 2017-09-20 10:56:14 +0200 | [diff] [blame] | 304 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
| 305 | if (!mq_rq->sg) |
| 306 | return -ENOMEM; |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 307 | |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 308 | return 0; |
| 309 | } |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 310 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 311 | static int mmc_init_request(struct request_queue *q, struct request *req, |
| 312 | gfp_t gfp) |
| 313 | { |
| 314 | return __mmc_init_request(q->queuedata, req, gfp); |
| 315 | } |
| 316 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 317 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 318 | { |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 319 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 320 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 321 | kfree(mq_rq->sg); |
| 322 | mq_rq->sg = NULL; |
Adrian Hunter | c09949c | 2016-11-29 12:09:14 +0200 | [diff] [blame] | 323 | } |
| 324 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 325 | static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, |
| 326 | unsigned int hctx_idx, unsigned int numa_node) |
| 327 | { |
| 328 | return __mmc_init_request(set->driver_data, req, GFP_KERNEL); |
| 329 | } |
| 330 | |
| 331 | static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, |
| 332 | unsigned int hctx_idx) |
| 333 | { |
| 334 | struct mmc_queue *mq = set->driver_data; |
| 335 | |
| 336 | mmc_exit_request(mq->queue, req); |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests |
| 341 | * will not be dispatched in parallel. |
| 342 | */ |
| 343 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 344 | const struct blk_mq_queue_data *bd) |
| 345 | { |
| 346 | struct request *req = bd->rq; |
| 347 | struct request_queue *q = req->q; |
| 348 | struct mmc_queue *mq = q->queuedata; |
| 349 | struct mmc_card *card = mq->card; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 350 | struct mmc_host *host = card->host; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 351 | enum mmc_issue_type issue_type; |
| 352 | enum mmc_issued issued; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 353 | bool get_card, cqe_retune_ok; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 354 | int ret; |
| 355 | |
| 356 | if (mmc_card_removed(mq->card)) { |
| 357 | req->rq_flags |= RQF_QUIET; |
| 358 | return BLK_STS_IOERR; |
| 359 | } |
| 360 | |
| 361 | issue_type = mmc_issue_type(mq, req); |
| 362 | |
| 363 | spin_lock_irq(q->queue_lock); |
| 364 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 365 | if (mq->recovery_needed) { |
| 366 | spin_unlock_irq(q->queue_lock); |
| 367 | return BLK_STS_RESOURCE; |
| 368 | } |
| 369 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 370 | switch (issue_type) { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 371 | case MMC_ISSUE_DCMD: |
| 372 | if (mmc_cqe_dcmd_busy(mq)) { |
| 373 | mq->cqe_busy |= MMC_CQE_DCMD_BUSY; |
| 374 | spin_unlock_irq(q->queue_lock); |
| 375 | return BLK_STS_RESOURCE; |
| 376 | } |
| 377 | break; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 378 | case MMC_ISSUE_ASYNC: |
| 379 | break; |
| 380 | default: |
| 381 | /* |
| 382 | * Timeouts are handled by mmc core, and we don't have a host |
| 383 | * API to abort requests, so we can't handle the timeout anyway. |
| 384 | * However, when the timeout happens, blk_mq_complete_request() |
| 385 | * no longer works (to stop the request disappearing under us). |
| 386 | * To avoid racing with that, set a large timeout. |
| 387 | */ |
| 388 | req->timeout = 600 * HZ; |
| 389 | break; |
| 390 | } |
| 391 | |
| 392 | mq->in_flight[issue_type] += 1; |
| 393 | get_card = (mmc_tot_in_flight(mq) == 1); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 394 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 395 | |
| 396 | spin_unlock_irq(q->queue_lock); |
| 397 | |
| 398 | if (!(req->rq_flags & RQF_DONTPREP)) { |
| 399 | req_to_mmc_queue_req(req)->retries = 0; |
| 400 | req->rq_flags |= RQF_DONTPREP; |
| 401 | } |
| 402 | |
| 403 | if (get_card) |
| 404 | mmc_get_card(card, &mq->ctx); |
| 405 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 406 | if (mq->use_cqe) { |
| 407 | host->retune_now = host->need_retune && cqe_retune_ok && |
| 408 | !host->hold_retune; |
| 409 | } |
| 410 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 411 | blk_mq_start_request(req); |
| 412 | |
| 413 | issued = mmc_blk_mq_issue_rq(mq, req); |
| 414 | |
| 415 | switch (issued) { |
| 416 | case MMC_REQ_BUSY: |
| 417 | ret = BLK_STS_RESOURCE; |
| 418 | break; |
| 419 | case MMC_REQ_FAILED_TO_START: |
| 420 | ret = BLK_STS_IOERR; |
| 421 | break; |
| 422 | default: |
| 423 | ret = BLK_STS_OK; |
| 424 | break; |
| 425 | } |
| 426 | |
| 427 | if (issued != MMC_REQ_STARTED) { |
| 428 | bool put_card = false; |
| 429 | |
| 430 | spin_lock_irq(q->queue_lock); |
| 431 | mq->in_flight[issue_type] -= 1; |
| 432 | if (mmc_tot_in_flight(mq) == 0) |
| 433 | put_card = true; |
| 434 | spin_unlock_irq(q->queue_lock); |
| 435 | if (put_card) |
| 436 | mmc_put_card(card, &mq->ctx); |
| 437 | } |
| 438 | |
| 439 | return ret; |
| 440 | } |
| 441 | |
| 442 | static const struct blk_mq_ops mmc_mq_ops = { |
| 443 | .queue_rq = mmc_mq_queue_rq, |
| 444 | .init_request = mmc_mq_init_request, |
| 445 | .exit_request = mmc_mq_exit_request, |
| 446 | .complete = mmc_blk_mq_complete, |
| 447 | .timeout = mmc_mq_timed_out, |
| 448 | }; |
| 449 | |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 450 | static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) |
| 451 | { |
| 452 | struct mmc_host *host = card->host; |
| 453 | u64 limit = BLK_BOUNCE_HIGH; |
| 454 | |
| 455 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 456 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
| 457 | |
| 458 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); |
| 459 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); |
| 460 | if (mmc_can_erase(card)) |
| 461 | mmc_queue_setup_discard(mq->queue, card); |
| 462 | |
| 463 | blk_queue_bounce_limit(mq->queue, limit); |
| 464 | blk_queue_max_hw_sectors(mq->queue, |
| 465 | min(host->max_blk_count, host->max_req_size / 512)); |
| 466 | blk_queue_max_segments(mq->queue, host->max_segs); |
| 467 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 468 | |
| 469 | /* Initialize thread_sem even if it is not used */ |
| 470 | sema_init(&mq->thread_sem, 1); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 471 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 472 | INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 473 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); |
| 474 | |
| 475 | mutex_init(&mq->complete_lock); |
| 476 | |
| 477 | init_waitqueue_head(&mq->wait); |
| 478 | } |
| 479 | |
| 480 | static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth, |
| 481 | const struct blk_mq_ops *mq_ops, spinlock_t *lock) |
| 482 | { |
| 483 | int ret; |
| 484 | |
| 485 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); |
| 486 | mq->tag_set.ops = mq_ops; |
| 487 | mq->tag_set.queue_depth = q_depth; |
| 488 | mq->tag_set.numa_node = NUMA_NO_NODE; |
| 489 | mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | |
| 490 | BLK_MQ_F_BLOCKING; |
| 491 | mq->tag_set.nr_hw_queues = 1; |
| 492 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); |
| 493 | mq->tag_set.driver_data = mq; |
| 494 | |
| 495 | ret = blk_mq_alloc_tag_set(&mq->tag_set); |
| 496 | if (ret) |
| 497 | return ret; |
| 498 | |
| 499 | mq->queue = blk_mq_init_queue(&mq->tag_set); |
| 500 | if (IS_ERR(mq->queue)) { |
| 501 | ret = PTR_ERR(mq->queue); |
| 502 | goto free_tag_set; |
| 503 | } |
| 504 | |
| 505 | mq->queue->queue_lock = lock; |
| 506 | mq->queue->queuedata = mq; |
| 507 | |
| 508 | return 0; |
| 509 | |
| 510 | free_tag_set: |
| 511 | blk_mq_free_tag_set(&mq->tag_set); |
| 512 | |
| 513 | return ret; |
| 514 | } |
| 515 | |
| 516 | /* Set queue depth to get a reasonable value for q->nr_requests */ |
| 517 | #define MMC_QUEUE_DEPTH 64 |
| 518 | |
| 519 | static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, |
| 520 | spinlock_t *lock) |
| 521 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 522 | struct mmc_host *host = card->host; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 523 | int q_depth; |
| 524 | int ret; |
| 525 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 526 | /* |
| 527 | * The queue depth for CQE must match the hardware because the request |
| 528 | * tag is used to index the hardware queue. |
| 529 | */ |
| 530 | if (mq->use_cqe) |
| 531 | q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); |
| 532 | else |
| 533 | q_depth = MMC_QUEUE_DEPTH; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 534 | |
| 535 | ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); |
| 536 | if (ret) |
| 537 | return ret; |
| 538 | |
| 539 | blk_queue_rq_timeout(mq->queue, 60 * HZ); |
| 540 | |
| 541 | mmc_setup_queue(mq, card); |
| 542 | |
| 543 | return 0; |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 544 | } |
| 545 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | /** |
| 547 | * mmc_init_queue - initialise a queue structure. |
| 548 | * @mq: mmc queue |
| 549 | * @card: mmc card to attach this queue |
| 550 | * @lock: queue lock |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 551 | * @subname: partition subname |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | * |
| 553 | * Initialise a MMC card request queue. |
| 554 | */ |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 555 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
| 556 | spinlock_t *lock, const char *subname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | { |
| 558 | struct mmc_host *host = card->host; |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 559 | int ret = -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | mq->card = card; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 562 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 563 | mq->use_cqe = host->cqe_enabled; |
| 564 | |
| 565 | if (mq->use_cqe || mmc_host_use_blk_mq(host)) |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 566 | return mmc_mq_init(mq, card, lock); |
| 567 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 568 | mq->queue = blk_alloc_queue(GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | if (!mq->queue) |
| 570 | return -ENOMEM; |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 571 | mq->queue->queue_lock = lock; |
| 572 | mq->queue->request_fn = mmc_request_fn; |
| 573 | mq->queue->init_rq_fn = mmc_init_request; |
| 574 | mq->queue->exit_rq_fn = mmc_exit_request; |
| 575 | mq->queue->cmd_size = sizeof(struct mmc_queue_req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | mq->queue->queuedata = mq; |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 577 | mq->qcnt = 0; |
| 578 | ret = blk_init_allocated_queue(mq->queue); |
| 579 | if (ret) { |
| 580 | blk_cleanup_queue(mq->queue); |
| 581 | return ret; |
| 582 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 584 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
| 585 | |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 586 | mmc_setup_queue(mq, card); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | |
Adrian Hunter | d09408a | 2011-06-23 13:40:28 +0300 | [diff] [blame] | 588 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
| 589 | host->index, subname ? subname : ""); |
Ethan Du | de528fa | 2010-09-30 18:40:27 -0400 | [diff] [blame] | 590 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 591 | if (IS_ERR(mq->thread)) { |
| 592 | ret = PTR_ERR(mq->thread); |
Adrian Hunter | c09949c | 2016-11-29 12:09:14 +0200 | [diff] [blame] | 593 | goto cleanup_queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | } |
| 595 | |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 596 | return 0; |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 597 | |
Adrian Hunter | 7b410d0 | 2017-03-13 14:36:36 +0200 | [diff] [blame] | 598 | cleanup_queue: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | blk_cleanup_queue(mq->queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | return ret; |
| 601 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 603 | static void mmc_mq_queue_suspend(struct mmc_queue *mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | { |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 605 | blk_mq_quiesce_queue(mq->queue); |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 606 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 607 | /* |
| 608 | * The host remains claimed while there are outstanding requests, so |
| 609 | * simply claiming and releasing here ensures there are none. |
| 610 | */ |
| 611 | mmc_claim_host(mq->card->host); |
| 612 | mmc_release_host(mq->card->host); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 615 | static void mmc_mq_queue_resume(struct mmc_queue *mq) |
| 616 | { |
| 617 | blk_mq_unquiesce_queue(mq->queue); |
| 618 | } |
| 619 | |
| 620 | static void __mmc_queue_suspend(struct mmc_queue *mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 622 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | unsigned long flags; |
| 624 | |
Linus Walleij | 9491be5 | 2017-02-01 13:47:56 +0100 | [diff] [blame] | 625 | if (!mq->suspended) { |
| 626 | mq->suspended |= true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | |
| 628 | spin_lock_irqsave(q->queue_lock, flags); |
| 629 | blk_stop_queue(q); |
| 630 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 631 | |
| 632 | down(&mq->thread_sem); |
| 633 | } |
| 634 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 636 | static void __mmc_queue_resume(struct mmc_queue *mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 638 | struct request_queue *q = mq->queue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | unsigned long flags; |
| 640 | |
Linus Walleij | 9491be5 | 2017-02-01 13:47:56 +0100 | [diff] [blame] | 641 | if (mq->suspended) { |
| 642 | mq->suspended = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | |
| 644 | up(&mq->thread_sem); |
| 645 | |
| 646 | spin_lock_irqsave(q->queue_lock, flags); |
| 647 | blk_start_queue(q); |
| 648 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 649 | } |
| 650 | } |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 651 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 652 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 653 | { |
| 654 | struct request_queue *q = mq->queue; |
| 655 | unsigned long flags; |
| 656 | |
| 657 | if (q->mq_ops) { |
| 658 | /* |
| 659 | * The legacy code handled the possibility of being suspended, |
| 660 | * so do that here too. |
| 661 | */ |
| 662 | if (blk_queue_quiesced(q)) |
| 663 | blk_mq_unquiesce_queue(q); |
| 664 | goto out_cleanup; |
| 665 | } |
| 666 | |
| 667 | /* Make sure the queue isn't suspended, as that will deadlock */ |
| 668 | mmc_queue_resume(mq); |
| 669 | |
| 670 | /* Then terminate our worker thread */ |
| 671 | kthread_stop(mq->thread); |
| 672 | |
| 673 | /* Empty the queue */ |
| 674 | spin_lock_irqsave(q->queue_lock, flags); |
| 675 | q->queuedata = NULL; |
| 676 | blk_start_queue(q); |
| 677 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 678 | |
| 679 | out_cleanup: |
| 680 | blk_cleanup_queue(q); |
| 681 | |
| 682 | /* |
| 683 | * A request can be completed before the next request, potentially |
| 684 | * leaving a complete_work with nothing to do. Such a work item might |
| 685 | * still be queued at this point. Flush it. |
| 686 | */ |
| 687 | flush_work(&mq->complete_work); |
| 688 | |
| 689 | mq->card = NULL; |
| 690 | } |
| 691 | |
| 692 | /** |
| 693 | * mmc_queue_suspend - suspend a MMC request queue |
| 694 | * @mq: MMC queue to suspend |
| 695 | * |
| 696 | * Stop the block request queue, and wait for our thread to |
| 697 | * complete any outstanding requests. This ensures that we |
| 698 | * won't suspend while a request is being processed. |
| 699 | */ |
| 700 | void mmc_queue_suspend(struct mmc_queue *mq) |
| 701 | { |
| 702 | struct request_queue *q = mq->queue; |
| 703 | |
| 704 | if (q->mq_ops) |
| 705 | mmc_mq_queue_suspend(mq); |
| 706 | else |
| 707 | __mmc_queue_suspend(mq); |
| 708 | } |
| 709 | |
| 710 | /** |
| 711 | * mmc_queue_resume - resume a previously suspended MMC request queue |
| 712 | * @mq: MMC queue to resume |
| 713 | */ |
| 714 | void mmc_queue_resume(struct mmc_queue *mq) |
| 715 | { |
| 716 | struct request_queue *q = mq->queue; |
| 717 | |
| 718 | if (q->mq_ops) |
| 719 | mmc_mq_queue_resume(mq); |
| 720 | else |
| 721 | __mmc_queue_resume(mq); |
| 722 | } |
| 723 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 724 | /* |
| 725 | * Prepare the sg list(s) to be handed of to the host driver |
| 726 | */ |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 727 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 728 | { |
Linus Walleij | 67e69d5 | 2017-05-19 15:37:27 +0200 | [diff] [blame] | 729 | struct request *req = mmc_queue_req_to_req(mqrq); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 730 | |
Linus Walleij | de3ee99 | 2017-09-20 10:56:14 +0200 | [diff] [blame] | 731 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 732 | } |