Thomas Gleixner | d2912cb | 2019-06-04 10:11:33 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 4 | * Copyright 2006-2007 Pierre Ossman |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/module.h> |
| 8 | #include <linux/blkdev.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 9 | #include <linux/freezer.h> |
Christoph Hellwig | 87598a2 | 2006-11-13 20:23:52 +0100 | [diff] [blame] | 10 | #include <linux/kthread.h> |
Jens Axboe | 45711f1 | 2007-10-22 21:19:53 +0200 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
Santosh Shilimkar | 8e0cb8a | 2013-07-29 14:20:15 +0100 | [diff] [blame] | 12 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | #include <linux/mmc/card.h> |
| 15 | #include <linux/mmc/host.h> |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 16 | |
Pierre Ossman | 98ac216 | 2006-12-23 20:03:02 +0100 | [diff] [blame] | 17 | #include "queue.h" |
Linus Walleij | 29eb7bd | 2016-09-20 11:34:38 +0200 | [diff] [blame] | 18 | #include "block.h" |
Ulf Hansson | 55244c5 | 2017-01-13 14:14:08 +0100 | [diff] [blame] | 19 | #include "core.h" |
Ulf Hansson | 4facdde | 2017-01-13 14:14:14 +0100 | [diff] [blame] | 20 | #include "card.h" |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 21 | #include "host.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 23 | static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) |
| 24 | { |
| 25 | /* Allow only 1 DCMD at a time */ |
| 26 | return mq->in_flight[MMC_ISSUE_DCMD]; |
| 27 | } |
| 28 | |
| 29 | void mmc_cqe_check_busy(struct mmc_queue *mq) |
| 30 | { |
| 31 | if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) |
| 32 | mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; |
| 33 | |
| 34 | mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL; |
| 35 | } |
| 36 | |
| 37 | static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) |
| 38 | { |
| 39 | return host->caps2 & MMC_CAP2_CQE_DCMD; |
| 40 | } |
| 41 | |
Colin Ian King | 15ff294 | 2017-11-30 11:37:38 +0000 | [diff] [blame] | 42 | static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, |
| 43 | struct request *req) |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 44 | { |
| 45 | switch (req_op(req)) { |
| 46 | case REQ_OP_DRV_IN: |
| 47 | case REQ_OP_DRV_OUT: |
| 48 | case REQ_OP_DISCARD: |
| 49 | case REQ_OP_SECURE_ERASE: |
| 50 | return MMC_ISSUE_SYNC; |
| 51 | case REQ_OP_FLUSH: |
| 52 | return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; |
| 53 | default: |
| 54 | return MMC_ISSUE_ASYNC; |
| 55 | } |
| 56 | } |
| 57 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 58 | enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) |
| 59 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 60 | struct mmc_host *host = mq->card->host; |
| 61 | |
| 62 | if (mq->use_cqe) |
| 63 | return mmc_cqe_issue_type(host, req); |
| 64 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 65 | if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) |
| 66 | return MMC_ISSUE_ASYNC; |
| 67 | |
| 68 | return MMC_ISSUE_SYNC; |
| 69 | } |
| 70 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 71 | static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) |
| 72 | { |
| 73 | if (!mq->recovery_needed) { |
| 74 | mq->recovery_needed = true; |
| 75 | schedule_work(&mq->recovery_work); |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | void mmc_cqe_recovery_notifier(struct mmc_request *mrq) |
| 80 | { |
| 81 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
| 82 | brq.mrq); |
| 83 | struct request *req = mmc_queue_req_to_req(mqrq); |
| 84 | struct request_queue *q = req->q; |
| 85 | struct mmc_queue *mq = q->queuedata; |
| 86 | unsigned long flags; |
| 87 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 88 | spin_lock_irqsave(&mq->lock, flags); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 89 | __mmc_cqe_recovery_notifier(mq); |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 90 | spin_unlock_irqrestore(&mq->lock, flags); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) |
| 94 | { |
| 95 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); |
| 96 | struct mmc_request *mrq = &mqrq->brq.mrq; |
| 97 | struct mmc_queue *mq = req->q->queuedata; |
| 98 | struct mmc_host *host = mq->card->host; |
| 99 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
| 100 | bool recovery_needed = false; |
| 101 | |
| 102 | switch (issue_type) { |
| 103 | case MMC_ISSUE_ASYNC: |
| 104 | case MMC_ISSUE_DCMD: |
| 105 | if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { |
| 106 | if (recovery_needed) |
| 107 | __mmc_cqe_recovery_notifier(mq); |
| 108 | return BLK_EH_RESET_TIMER; |
| 109 | } |
Christoph Hellwig | ad73d6f | 2018-05-29 15:52:35 +0200 | [diff] [blame] | 110 | /* No timeout (XXX: huh? comment doesn't make much sense) */ |
| 111 | blk_mq_complete_request(req); |
| 112 | return BLK_EH_DONE; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 113 | default: |
| 114 | /* Timeout is handled by mmc core */ |
| 115 | return BLK_EH_RESET_TIMER; |
| 116 | } |
| 117 | } |
| 118 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 119 | static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, |
| 120 | bool reserved) |
| 121 | { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 122 | struct request_queue *q = req->q; |
| 123 | struct mmc_queue *mq = q->queuedata; |
| 124 | unsigned long flags; |
| 125 | int ret; |
| 126 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 127 | spin_lock_irqsave(&mq->lock, flags); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 128 | |
| 129 | if (mq->recovery_needed || !mq->use_cqe) |
| 130 | ret = BLK_EH_RESET_TIMER; |
| 131 | else |
| 132 | ret = mmc_cqe_timed_out(req); |
| 133 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 134 | spin_unlock_irqrestore(&mq->lock, flags); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 135 | |
| 136 | return ret; |
| 137 | } |
| 138 | |
| 139 | static void mmc_mq_recovery_handler(struct work_struct *work) |
| 140 | { |
| 141 | struct mmc_queue *mq = container_of(work, struct mmc_queue, |
| 142 | recovery_work); |
| 143 | struct request_queue *q = mq->queue; |
| 144 | |
| 145 | mmc_get_card(mq->card, &mq->ctx); |
| 146 | |
| 147 | mq->in_recovery = true; |
| 148 | |
Adrian Hunter | 10f21df4 | 2017-11-29 15:41:07 +0200 | [diff] [blame] | 149 | if (mq->use_cqe) |
| 150 | mmc_blk_cqe_recovery(mq); |
| 151 | else |
| 152 | mmc_blk_mq_recovery(mq); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 153 | |
| 154 | mq->in_recovery = false; |
| 155 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 156 | spin_lock_irq(&mq->lock); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 157 | mq->recovery_needed = false; |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 158 | spin_unlock_irq(&mq->lock); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 159 | |
| 160 | mmc_put_card(mq->card, &mq->ctx); |
| 161 | |
| 162 | blk_mq_run_hw_queues(q, true); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 163 | } |
| 164 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 165 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 166 | { |
| 167 | struct scatterlist *sg; |
| 168 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 169 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
Adrian Hunter | 7b410d0 | 2017-03-13 14:36:36 +0200 | [diff] [blame] | 170 | if (sg) |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 171 | sg_init_table(sg, sg_len); |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 172 | |
| 173 | return sg; |
| 174 | } |
| 175 | |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 176 | static void mmc_queue_setup_discard(struct request_queue *q, |
| 177 | struct mmc_card *card) |
| 178 | { |
| 179 | unsigned max_discard; |
| 180 | |
| 181 | max_discard = mmc_calc_max_discard(card); |
| 182 | if (!max_discard) |
| 183 | return; |
| 184 | |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 185 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
Jens Axboe | 2bb4cd5 | 2015-07-14 08:15:12 -0600 | [diff] [blame] | 186 | blk_queue_max_discard_sectors(q, max_discard); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 187 | q->limits.discard_granularity = card->pref_erase << 9; |
| 188 | /* granularity must not be greater than max. discard */ |
| 189 | if (card->pref_erase > max_discard) |
| 190 | q->limits.discard_granularity = 0; |
Maya Erez | 775a936 | 2013-04-18 15:41:55 +0300 | [diff] [blame] | 191 | if (mmc_can_secure_erase_trim(card)) |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 192 | blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); |
Adrian Hunter | e056a1b | 2011-06-28 17:16:02 +0300 | [diff] [blame] | 193 | } |
| 194 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 195 | /** |
| 196 | * mmc_init_request() - initialize the MMC-specific per-request data |
| 197 | * @q: the request queue |
| 198 | * @req: the request |
| 199 | * @gfp: memory allocation policy |
| 200 | */ |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 201 | static int __mmc_init_request(struct mmc_queue *mq, struct request *req, |
| 202 | gfp_t gfp) |
Adrian Hunter | f2b8b52 | 2016-11-29 12:09:12 +0200 | [diff] [blame] | 203 | { |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 204 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 205 | struct mmc_card *card = mq->card; |
| 206 | struct mmc_host *host = card->host; |
Adrian Hunter | c853982 | 2016-11-29 12:09:11 +0200 | [diff] [blame] | 207 | |
Linus Walleij | de3ee99 | 2017-09-20 10:56:14 +0200 | [diff] [blame] | 208 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
| 209 | if (!mq_rq->sg) |
| 210 | return -ENOMEM; |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 211 | |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 212 | return 0; |
| 213 | } |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 214 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 215 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
Adrian Hunter | c5bda0c | 2016-11-29 12:09:15 +0200 | [diff] [blame] | 216 | { |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 217 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
Adrian Hunter | 64e29e42 | 2016-11-29 12:09:13 +0200 | [diff] [blame] | 218 | |
Linus Walleij | 304419d | 2017-05-18 11:29:32 +0200 | [diff] [blame] | 219 | kfree(mq_rq->sg); |
| 220 | mq_rq->sg = NULL; |
Adrian Hunter | c09949c | 2016-11-29 12:09:14 +0200 | [diff] [blame] | 221 | } |
| 222 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 223 | static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, |
| 224 | unsigned int hctx_idx, unsigned int numa_node) |
| 225 | { |
| 226 | return __mmc_init_request(set->driver_data, req, GFP_KERNEL); |
| 227 | } |
| 228 | |
| 229 | static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, |
| 230 | unsigned int hctx_idx) |
| 231 | { |
| 232 | struct mmc_queue *mq = set->driver_data; |
| 233 | |
| 234 | mmc_exit_request(mq->queue, req); |
| 235 | } |
| 236 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 237 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 238 | const struct blk_mq_queue_data *bd) |
| 239 | { |
| 240 | struct request *req = bd->rq; |
| 241 | struct request_queue *q = req->q; |
| 242 | struct mmc_queue *mq = q->queuedata; |
| 243 | struct mmc_card *card = mq->card; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 244 | struct mmc_host *host = card->host; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 245 | enum mmc_issue_type issue_type; |
| 246 | enum mmc_issued issued; |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 247 | bool get_card, cqe_retune_ok; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 248 | int ret; |
| 249 | |
| 250 | if (mmc_card_removed(mq->card)) { |
| 251 | req->rq_flags |= RQF_QUIET; |
| 252 | return BLK_STS_IOERR; |
| 253 | } |
| 254 | |
| 255 | issue_type = mmc_issue_type(mq, req); |
| 256 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 257 | spin_lock_irq(&mq->lock); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 258 | |
Adrian Hunter | 26caddf | 2018-08-21 15:05:55 +0300 | [diff] [blame] | 259 | if (mq->recovery_needed || mq->busy) { |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 260 | spin_unlock_irq(&mq->lock); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 261 | return BLK_STS_RESOURCE; |
| 262 | } |
| 263 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 264 | switch (issue_type) { |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 265 | case MMC_ISSUE_DCMD: |
| 266 | if (mmc_cqe_dcmd_busy(mq)) { |
| 267 | mq->cqe_busy |= MMC_CQE_DCMD_BUSY; |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 268 | spin_unlock_irq(&mq->lock); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 269 | return BLK_STS_RESOURCE; |
| 270 | } |
| 271 | break; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 272 | case MMC_ISSUE_ASYNC: |
| 273 | break; |
| 274 | default: |
| 275 | /* |
| 276 | * Timeouts are handled by mmc core, and we don't have a host |
| 277 | * API to abort requests, so we can't handle the timeout anyway. |
| 278 | * However, when the timeout happens, blk_mq_complete_request() |
| 279 | * no longer works (to stop the request disappearing under us). |
| 280 | * To avoid racing with that, set a large timeout. |
| 281 | */ |
| 282 | req->timeout = 600 * HZ; |
| 283 | break; |
| 284 | } |
| 285 | |
Adrian Hunter | 26caddf | 2018-08-21 15:05:55 +0300 | [diff] [blame] | 286 | /* Parallel dispatch of requests is not supported at the moment */ |
| 287 | mq->busy = true; |
| 288 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 289 | mq->in_flight[issue_type] += 1; |
| 290 | get_card = (mmc_tot_in_flight(mq) == 1); |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 291 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 292 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 293 | spin_unlock_irq(&mq->lock); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 294 | |
| 295 | if (!(req->rq_flags & RQF_DONTPREP)) { |
| 296 | req_to_mmc_queue_req(req)->retries = 0; |
| 297 | req->rq_flags |= RQF_DONTPREP; |
| 298 | } |
| 299 | |
| 300 | if (get_card) |
| 301 | mmc_get_card(card, &mq->ctx); |
| 302 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 303 | if (mq->use_cqe) { |
| 304 | host->retune_now = host->need_retune && cqe_retune_ok && |
| 305 | !host->hold_retune; |
| 306 | } |
| 307 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 308 | blk_mq_start_request(req); |
| 309 | |
| 310 | issued = mmc_blk_mq_issue_rq(mq, req); |
| 311 | |
| 312 | switch (issued) { |
| 313 | case MMC_REQ_BUSY: |
| 314 | ret = BLK_STS_RESOURCE; |
| 315 | break; |
| 316 | case MMC_REQ_FAILED_TO_START: |
| 317 | ret = BLK_STS_IOERR; |
| 318 | break; |
| 319 | default: |
| 320 | ret = BLK_STS_OK; |
| 321 | break; |
| 322 | } |
| 323 | |
| 324 | if (issued != MMC_REQ_STARTED) { |
| 325 | bool put_card = false; |
| 326 | |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 327 | spin_lock_irq(&mq->lock); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 328 | mq->in_flight[issue_type] -= 1; |
| 329 | if (mmc_tot_in_flight(mq) == 0) |
| 330 | put_card = true; |
Adrian Hunter | 26caddf | 2018-08-21 15:05:55 +0300 | [diff] [blame] | 331 | mq->busy = false; |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 332 | spin_unlock_irq(&mq->lock); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 333 | if (put_card) |
| 334 | mmc_put_card(card, &mq->ctx); |
Adrian Hunter | 26caddf | 2018-08-21 15:05:55 +0300 | [diff] [blame] | 335 | } else { |
| 336 | WRITE_ONCE(mq->busy, false); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 337 | } |
| 338 | |
| 339 | return ret; |
| 340 | } |
| 341 | |
| 342 | static const struct blk_mq_ops mmc_mq_ops = { |
| 343 | .queue_rq = mmc_mq_queue_rq, |
| 344 | .init_request = mmc_mq_init_request, |
| 345 | .exit_request = mmc_mq_exit_request, |
| 346 | .complete = mmc_blk_mq_complete, |
| 347 | .timeout = mmc_mq_timed_out, |
| 348 | }; |
| 349 | |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 350 | static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) |
| 351 | { |
| 352 | struct mmc_host *host = card->host; |
| 353 | u64 limit = BLK_BOUNCE_HIGH; |
Ming Lei | c53336c | 2019-02-28 00:02:11 +0800 | [diff] [blame] | 354 | unsigned block_size = 512; |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 355 | |
| 356 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 357 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
| 358 | |
Bart Van Assche | 8b904b5 | 2018-03-07 17:10:10 -0800 | [diff] [blame] | 359 | blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); |
| 360 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 361 | if (mmc_can_erase(card)) |
| 362 | mmc_queue_setup_discard(mq->queue, card); |
| 363 | |
| 364 | blk_queue_bounce_limit(mq->queue, limit); |
| 365 | blk_queue_max_hw_sectors(mq->queue, |
| 366 | min(host->max_blk_count, host->max_req_size / 512)); |
| 367 | blk_queue_max_segments(mq->queue, host->max_segs); |
Ming Lei | c53336c | 2019-02-28 00:02:11 +0800 | [diff] [blame] | 368 | |
| 369 | if (mmc_card_mmc(card)) |
| 370 | block_size = card->ext_csd.data_sector_size; |
| 371 | |
| 372 | blk_queue_logical_block_size(mq->queue, block_size); |
| 373 | blk_queue_max_segment_size(mq->queue, |
| 374 | round_down(host->max_seg_size, block_size)); |
Adrian Hunter | c8b5fd0 | 2017-09-22 15:36:57 +0300 | [diff] [blame] | 375 | |
Christoph Hellwig | cf1db7f | 2019-06-05 21:08:27 +0200 | [diff] [blame] | 376 | dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); |
| 377 | |
Adrian Hunter | 1e8e55b | 2017-11-29 15:41:04 +0200 | [diff] [blame] | 378 | INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 379 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); |
| 380 | |
| 381 | mutex_init(&mq->complete_lock); |
| 382 | |
| 383 | init_waitqueue_head(&mq->wait); |
| 384 | } |
| 385 | |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 386 | /* Set queue depth to get a reasonable value for q->nr_requests */ |
| 387 | #define MMC_QUEUE_DEPTH 64 |
| 388 | |
| 389 | /** |
| 390 | * mmc_init_queue - initialise a queue structure. |
| 391 | * @mq: mmc queue |
| 392 | * @card: mmc card to attach this queue |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 393 | * |
| 394 | * Initialise a MMC card request queue. |
| 395 | */ |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 396 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 397 | { |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 398 | struct mmc_host *host = card->host; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 399 | int ret; |
| 400 | |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 401 | mq->card = card; |
| 402 | mq->use_cqe = host->cqe_enabled; |
Christoph Hellwig | f5d72c5 | 2018-11-16 09:10:06 +0100 | [diff] [blame] | 403 | |
| 404 | spin_lock_init(&mq->lock); |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 405 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 406 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 407 | mq->tag_set.ops = &mmc_mq_ops; |
| 408 | /* |
| 409 | * The queue depth for CQE must match the hardware because the request |
| 410 | * tag is used to index the hardware queue. |
| 411 | */ |
| 412 | if (mq->use_cqe) |
| 413 | mq->tag_set.queue_depth = |
| 414 | min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); |
| 415 | else |
| 416 | mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 417 | mq->tag_set.numa_node = NUMA_NO_NODE; |
Ming Lei | 56d18f6 | 2019-02-15 19:13:24 +0800 | [diff] [blame] | 418 | mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 419 | mq->tag_set.nr_hw_queues = 1; |
| 420 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); |
| 421 | mq->tag_set.driver_data = mq; |
| 422 | |
| 423 | ret = blk_mq_alloc_tag_set(&mq->tag_set); |
| 424 | if (ret) |
| 425 | return ret; |
| 426 | |
| 427 | mq->queue = blk_mq_init_queue(&mq->tag_set); |
| 428 | if (IS_ERR(mq->queue)) { |
| 429 | ret = PTR_ERR(mq->queue); |
| 430 | goto free_tag_set; |
| 431 | } |
| 432 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 433 | mq->queue->queuedata = mq; |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 434 | blk_queue_rq_timeout(mq->queue, 60 * HZ); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 435 | |
Christoph Hellwig | b061b32 | 2018-11-14 17:02:16 +0100 | [diff] [blame] | 436 | mmc_setup_queue(mq, card); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 437 | return 0; |
| 438 | |
| 439 | free_tag_set: |
| 440 | blk_mq_free_tag_set(&mq->tag_set); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 441 | return ret; |
| 442 | } |
| 443 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 444 | void mmc_queue_suspend(struct mmc_queue *mq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | { |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 446 | blk_mq_quiesce_queue(mq->queue); |
Pierre Ossman | 89b4e13 | 2006-11-14 22:08:16 +0100 | [diff] [blame] | 447 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 448 | /* |
| 449 | * The host remains claimed while there are outstanding requests, so |
| 450 | * simply claiming and releasing here ensures there are none. |
| 451 | */ |
| 452 | mmc_claim_host(mq->card->host); |
| 453 | mmc_release_host(mq->card->host); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 456 | void mmc_queue_resume(struct mmc_queue *mq) |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 457 | { |
| 458 | blk_mq_unquiesce_queue(mq->queue); |
| 459 | } |
| 460 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 461 | void mmc_cleanup_queue(struct mmc_queue *mq) |
| 462 | { |
| 463 | struct request_queue *q = mq->queue; |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 464 | |
Adrian Hunter | 0fbfd12 | 2017-11-29 15:41:18 +0200 | [diff] [blame] | 465 | /* |
| 466 | * The legacy code handled the possibility of being suspended, |
| 467 | * so do that here too. |
| 468 | */ |
| 469 | if (blk_queue_quiesced(q)) |
| 470 | blk_mq_unquiesce_queue(q); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 471 | |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 472 | blk_cleanup_queue(q); |
Raul E Rangel | 43d8dab | 2019-05-02 13:07:14 -0600 | [diff] [blame] | 473 | blk_mq_free_tag_set(&mq->tag_set); |
Adrian Hunter | 8119697 | 2017-11-29 15:41:03 +0200 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * A request can be completed before the next request, potentially |
| 477 | * leaving a complete_work with nothing to do. Such a work item might |
| 478 | * still be queued at this point. Flush it. |
| 479 | */ |
| 480 | flush_work(&mq->complete_work); |
| 481 | |
| 482 | mq->card = NULL; |
| 483 | } |
| 484 | |
Pierre Ossman | 2ff1fa6 | 2008-07-22 14:35:42 +0200 | [diff] [blame] | 485 | /* |
| 486 | * Prepare the sg list(s) to be handed of to the host driver |
| 487 | */ |
Per Forlin | 97868a2 | 2011-07-09 17:12:36 -0400 | [diff] [blame] | 488 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 489 | { |
Linus Walleij | 67e69d5 | 2017-05-19 15:37:27 +0200 | [diff] [blame] | 490 | struct request *req = mmc_queue_req_to_req(mqrq); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 491 | |
Linus Walleij | de3ee99 | 2017-09-20 10:56:14 +0200 | [diff] [blame] | 492 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
Pierre Ossman | 98ccf14 | 2007-05-12 00:26:16 +0200 | [diff] [blame] | 493 | } |