blob: 690221747b4705416d9f4e4f9b7236ddaf53ac9a [file] [log] [blame]
Fred Isaman155e7522011-07-30 20:52:39 -04001/*
2 * linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 * Module for the NFSv4.1 pNFS block layout driver.
5 *
6 * Copyright (c) 2006 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
Fred Isaman9549ec02011-07-30 20:52:53 -040032
Fred Isaman155e7522011-07-30 20:52:39 -040033#include <linux/module.h>
34#include <linux/init.h>
Jim Reesfe0a9b72011-07-30 20:52:42 -040035#include <linux/mount.h>
36#include <linux/namei.h>
Fred Isaman9549ec02011-07-30 20:52:53 -040037#include <linux/bio.h> /* struct bio */
Heiko Carstens88c9e422011-08-02 09:57:35 +020038#include <linux/prefetch.h>
Peng Tao62965562012-09-25 14:55:57 +080039#include <linux/pagevec.h>
Fred Isaman155e7522011-07-30 20:52:39 -040040
Jim Rees10bd295a2012-04-09 22:33:39 -040041#include "../pnfs.h"
Trond Myklebust76e697b2012-11-26 14:20:49 -050042#include "../nfs4session.h"
Jim Rees10bd295a2012-04-09 22:33:39 -040043#include "../internal.h"
Fred Isaman155e7522011-07-30 20:52:39 -040044#include "blocklayout.h"
45
46#define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
Christoph Hellwig80672532014-09-10 08:23:34 -070052static bool is_hole(struct pnfs_block_extent *be)
Fred Isaman9549ec02011-07-30 20:52:53 -040053{
Christoph Hellwig80672532014-09-10 08:23:34 -070054 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
56 return true;
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
59 default:
60 return false;
61 }
Fred Isaman650e2d32011-07-30 20:52:54 -040062}
63
Fred Isaman9549ec02011-07-30 20:52:53 -040064/* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
66 */
67struct parallel_io {
68 struct kref refcnt;
Christoph Hellwig80672532014-09-10 08:23:34 -070069 void (*pnfs_callback) (void *data);
Fred Isaman9549ec02011-07-30 20:52:53 -040070 void *data;
71};
72
73static inline struct parallel_io *alloc_parallel(void *data)
74{
75 struct parallel_io *rv;
76
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
78 if (rv) {
79 rv->data = data;
80 kref_init(&rv->refcnt);
81 }
82 return rv;
83}
84
85static inline void get_parallel(struct parallel_io *p)
86{
87 kref_get(&p->refcnt);
88}
89
90static void destroy_parallel(struct kref *kref)
91{
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -070095 p->pnfs_callback(p->data);
Fred Isaman9549ec02011-07-30 20:52:53 -040096 kfree(p);
97}
98
99static inline void put_parallel(struct parallel_io *p)
100{
101 kref_put(&p->refcnt, destroy_parallel);
102}
103
104static struct bio *
Mike Christie4e49ea42016-06-05 14:31:41 -0500105bl_submit_bio(struct bio *bio)
Fred Isaman9549ec02011-07-30 20:52:53 -0400106{
107 if (bio) {
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
Mike Christie95fe6c12016-06-05 14:31:48 -0500110 bio_op(bio) == READ ? "read" : "write",
Mike Christie4e49ea42016-06-05 14:31:41 -0500111 bio->bi_iter.bi_size,
Kent Overstreet4f024f32013-10-11 15:44:27 -0700112 (unsigned long long)bio->bi_iter.bi_sector);
Mike Christie4e49ea42016-06-05 14:31:41 -0500113 submit_bio(bio);
Fred Isaman9549ec02011-07-30 20:52:53 -0400114 }
115 return NULL;
116}
117
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700118static struct bio *
119bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200120 bio_end_io_t end_io, struct parallel_io *par)
Fred Isaman9549ec02011-07-30 20:52:53 -0400121{
122 struct bio *bio;
123
Peng Tao74a6eeb2012-01-12 23:18:48 +0800124 npg = min(npg, BIO_MAX_PAGES);
Fred Isaman9549ec02011-07-30 20:52:53 -0400125 bio = bio_alloc(GFP_NOIO, npg);
Peng Tao74a6eeb2012-01-12 23:18:48 +0800126 if (!bio && (current->flags & PF_MEMALLOC)) {
127 while (!bio && (npg /= 2))
128 bio = bio_alloc(GFP_NOIO, npg);
129 }
Fred Isaman9549ec02011-07-30 20:52:53 -0400130
Peng Tao74a6eeb2012-01-12 23:18:48 +0800131 if (bio) {
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700132 bio->bi_iter.bi_sector = disk_sector;
Christoph Hellwig74d46992017-08-23 19:10:32 +0200133 bio_set_dev(bio, bdev);
Peng Tao74a6eeb2012-01-12 23:18:48 +0800134 bio->bi_end_io = end_io;
135 bio->bi_private = par;
136 }
Fred Isaman9549ec02011-07-30 20:52:53 -0400137 return bio;
138}
139
Benjamin Coddingtonf34462c2018-01-25 09:36:26 -0500140static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
141{
142 return offset >= map->start && offset < map->start + map->len;
143}
144
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700145static struct bio *
146do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
147 struct page *page, struct pnfs_block_dev_map *map,
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200148 struct pnfs_block_extent *be, bio_end_io_t end_io,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700149 struct parallel_io *par, unsigned int offset, int *len)
Fred Isaman9549ec02011-07-30 20:52:53 -0400150{
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700151 struct pnfs_block_dev *dev =
152 container_of(be->be_device, struct pnfs_block_dev, node);
153 u64 disk_addr, end;
154
Peng Taofe6e1e82012-08-24 00:27:51 +0800155 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700156 npg, rw, (unsigned long long)isect, offset, *len);
157
158 /* translate to device offset */
159 isect += be->be_v_offset;
160 isect -= be->be_f_offset;
161
162 /* translate to physical disk offset */
163 disk_addr = (u64)isect << SECTOR_SHIFT;
Benjamin Coddingtonf34462c2018-01-25 09:36:26 -0500164 if (!offset_in_map(disk_addr, map)) {
165 if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700166 return ERR_PTR(-EIO);
Mike Christie4e49ea42016-06-05 14:31:41 -0500167 bio = bl_submit_bio(bio);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700168 }
169 disk_addr += map->disk_offset;
170 disk_addr -= map->start;
171
172 /* limit length to what the device mapping allows */
173 end = disk_addr + *len;
174 if (end >= map->start + map->len)
175 *len = map->start + map->len - disk_addr;
176
Fred Isaman9549ec02011-07-30 20:52:53 -0400177retry:
178 if (!bio) {
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700179 bio = bl_alloc_init_bio(npg, map->bdev,
180 disk_addr >> SECTOR_SHIFT, end_io, par);
Fred Isaman9549ec02011-07-30 20:52:53 -0400181 if (!bio)
182 return ERR_PTR(-ENOMEM);
Mike Christie95fe6c12016-06-05 14:31:48 -0500183 bio_set_op_attrs(bio, rw, 0);
Fred Isaman9549ec02011-07-30 20:52:53 -0400184 }
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700185 if (bio_add_page(bio, page, *len, offset) < *len) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500186 bio = bl_submit_bio(bio);
Fred Isaman9549ec02011-07-30 20:52:53 -0400187 goto retry;
188 }
189 return bio;
190}
191
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500192static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
193{
194 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
195 size_t bytes_left = header->args.count;
196 sector_t isect, extent_length = 0;
197 struct pnfs_block_extent be;
198
199 isect = header->args.offset >> SECTOR_SHIFT;
200 bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
201
202 while (bytes_left > 0) {
203 if (!ext_tree_lookup(bl, isect, &be, rw))
204 return;
205 extent_length = be.be_length - (isect - be.be_f_offset);
206 nfs4_mark_deviceid_unavailable(be.be_device);
207 isect += extent_length;
208 if (bytes_left > extent_length << SECTOR_SHIFT)
209 bytes_left -= extent_length << SECTOR_SHIFT;
210 else
211 bytes_left = 0;
212 }
213}
214
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200215static void bl_end_io_read(struct bio *bio)
Fred Isaman9549ec02011-07-30 20:52:53 -0400216{
217 struct parallel_io *par = bio->bi_private;
Fred Isaman9549ec02011-07-30 20:52:53 -0400218
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200219 if (bio->bi_status) {
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400220 struct nfs_pgio_header *header = par->data;
Fred Isamancd841602012-04-20 14:47:44 -0400221
222 if (!header->pnfs_error)
223 header->pnfs_error = -EIO;
224 pnfs_set_lo_fail(header->lseg);
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500225 bl_mark_devices_unavailable(header, false);
Fred Isaman9549ec02011-07-30 20:52:53 -0400226 }
Christoph Hellwig8c792ea2014-09-10 08:23:33 -0700227
Fred Isaman9549ec02011-07-30 20:52:53 -0400228 bio_put(bio);
229 put_parallel(par);
230}
231
232static void bl_read_cleanup(struct work_struct *work)
233{
234 struct rpc_task *task;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400235 struct nfs_pgio_header *hdr;
Fred Isaman9549ec02011-07-30 20:52:53 -0400236 dprintk("%s enter\n", __func__);
237 task = container_of(work, struct rpc_task, u.tk_work);
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400238 hdr = container_of(task, struct nfs_pgio_header, task);
239 pnfs_ld_read_done(hdr);
Fred Isaman9549ec02011-07-30 20:52:53 -0400240}
241
242static void
Christoph Hellwig80672532014-09-10 08:23:34 -0700243bl_end_par_io_read(void *data)
Fred Isaman9549ec02011-07-30 20:52:53 -0400244{
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400245 struct nfs_pgio_header *hdr = data;
Fred Isaman9549ec02011-07-30 20:52:53 -0400246
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400247 hdr->task.tk_status = hdr->pnfs_error;
248 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
249 schedule_work(&hdr->task.u.tk_work);
Fred Isaman9549ec02011-07-30 20:52:53 -0400250}
251
Fred Isaman155e7522011-07-30 20:52:39 -0400252static enum pnfs_try_status
Christoph Hellwig80672532014-09-10 08:23:34 -0700253bl_read_pagelist(struct nfs_pgio_header *header)
Fred Isaman155e7522011-07-30 20:52:39 -0400254{
Christoph Hellwig80672532014-09-10 08:23:34 -0700255 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700256 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
Fred Isaman9549ec02011-07-30 20:52:53 -0400257 struct bio *bio = NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700258 struct pnfs_block_extent be;
Fred Isaman9549ec02011-07-30 20:52:53 -0400259 sector_t isect, extent_length = 0;
260 struct parallel_io *par;
Christoph Hellwig80672532014-09-10 08:23:34 -0700261 loff_t f_offset = header->args.offset;
262 size_t bytes_left = header->args.count;
Kinglong Mee15ae2c72015-10-16 17:22:50 +0800263 unsigned int pg_offset = header->args.pgbase, pg_len;
Christoph Hellwig80672532014-09-10 08:23:34 -0700264 struct page **pages = header->args.pages;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300265 int pg_index = header->args.pgbase >> PAGE_SHIFT;
Peng Taof742dc42012-08-24 00:27:52 +0800266 const bool is_dio = (header->dreq != NULL);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500267 struct blk_plug plug;
Christoph Hellwig80672532014-09-10 08:23:34 -0700268 int i;
Fred Isaman9549ec02011-07-30 20:52:53 -0400269
Trond Myklebust6f008662012-03-20 14:12:46 -0400270 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
Christoph Hellwig80672532014-09-10 08:23:34 -0700271 header->page_array.npages, f_offset,
272 (unsigned int)header->args.count);
Fred Isaman9549ec02011-07-30 20:52:53 -0400273
Christoph Hellwig80672532014-09-10 08:23:34 -0700274 par = alloc_parallel(header);
Fred Isaman9549ec02011-07-30 20:52:53 -0400275 if (!par)
Christoph Hellwig80672532014-09-10 08:23:34 -0700276 return PNFS_NOT_ATTEMPTED;
Fred Isaman9549ec02011-07-30 20:52:53 -0400277 par->pnfs_callback = bl_end_par_io_read;
Fred Isaman9549ec02011-07-30 20:52:53 -0400278
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500279 blk_start_plug(&plug);
280
Fred Isaman9549ec02011-07-30 20:52:53 -0400281 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
282 /* Code assumes extents are page-aligned */
Christoph Hellwig80672532014-09-10 08:23:34 -0700283 for (i = pg_index; i < header->page_array.npages; i++) {
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500284 if (extent_length <= 0) {
Fred Isaman9549ec02011-07-30 20:52:53 -0400285 /* We've used up the previous extent */
Mike Christie4e49ea42016-06-05 14:31:41 -0500286 bio = bl_submit_bio(bio);
Christoph Hellwig80672532014-09-10 08:23:34 -0700287
Fred Isaman9549ec02011-07-30 20:52:53 -0400288 /* Get the next one */
Christoph Hellwig80672532014-09-10 08:23:34 -0700289 if (!ext_tree_lookup(bl, isect, &be, false)) {
Fred Isamancd841602012-04-20 14:47:44 -0400290 header->pnfs_error = -EIO;
Fred Isaman9549ec02011-07-30 20:52:53 -0400291 goto out;
292 }
Christoph Hellwig80672532014-09-10 08:23:34 -0700293 extent_length = be.be_length - (isect - be.be_f_offset);
Fred Isaman9549ec02011-07-30 20:52:53 -0400294 }
Peng Taof742dc42012-08-24 00:27:52 +0800295
296 if (is_dio) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300297 if (pg_offset + bytes_left > PAGE_SIZE)
298 pg_len = PAGE_SIZE - pg_offset;
Peng Taof742dc42012-08-24 00:27:52 +0800299 else
300 pg_len = bytes_left;
Peng Taof742dc42012-08-24 00:27:52 +0800301 } else {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700302 BUG_ON(pg_offset != 0);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300303 pg_len = PAGE_SIZE;
Peng Taof742dc42012-08-24 00:27:52 +0800304 }
305
Christoph Hellwig80672532014-09-10 08:23:34 -0700306 if (is_hole(&be)) {
Mike Christie4e49ea42016-06-05 14:31:41 -0500307 bio = bl_submit_bio(bio);
Fred Isaman9549ec02011-07-30 20:52:53 -0400308 /* Fill hole w/ zeroes w/o accessing device */
309 dprintk("%s Zeroing page for hole\n", __func__);
Peng Taof742dc42012-08-24 00:27:52 +0800310 zero_user_segment(pages[i], pg_offset, pg_len);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700311
312 /* invalidate map */
313 map.start = NFS4_MAX_UINT64;
Fred Isaman9549ec02011-07-30 20:52:53 -0400314 } else {
Weston Andros Adamson823b0c92014-06-09 11:48:34 -0400315 bio = do_add_page_to_bio(bio,
Christoph Hellwig80672532014-09-10 08:23:34 -0700316 header->page_array.npages - i,
Fred Isaman30dd3742012-04-20 14:47:45 -0400317 READ,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700318 isect, pages[i], &map, &be,
Peng Taof742dc42012-08-24 00:27:52 +0800319 bl_end_io_read, par,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700320 pg_offset, &pg_len);
Fred Isaman9549ec02011-07-30 20:52:53 -0400321 if (IS_ERR(bio)) {
Fred Isamancd841602012-04-20 14:47:44 -0400322 header->pnfs_error = PTR_ERR(bio);
Peng Taoe6d05a72011-09-22 21:50:16 -0400323 bio = NULL;
Fred Isaman9549ec02011-07-30 20:52:53 -0400324 goto out;
325 }
326 }
Peng Taof742dc42012-08-24 00:27:52 +0800327 isect += (pg_len >> SECTOR_SHIFT);
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500328 extent_length -= (pg_len >> SECTOR_SHIFT);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700329 f_offset += pg_len;
330 bytes_left -= pg_len;
Kinglong Mee15ae2c72015-10-16 17:22:50 +0800331 pg_offset = 0;
Fred Isaman9549ec02011-07-30 20:52:53 -0400332 }
Fred Isamancd841602012-04-20 14:47:44 -0400333 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
Christoph Hellwig80672532014-09-10 08:23:34 -0700334 header->res.eof = 1;
335 header->res.count = header->inode->i_size - header->args.offset;
Fred Isaman9549ec02011-07-30 20:52:53 -0400336 } else {
Christoph Hellwig80672532014-09-10 08:23:34 -0700337 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
Fred Isaman9549ec02011-07-30 20:52:53 -0400338 }
339out:
Mike Christie4e49ea42016-06-05 14:31:41 -0500340 bl_submit_bio(bio);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500341 blk_finish_plug(&plug);
Fred Isaman9549ec02011-07-30 20:52:53 -0400342 put_parallel(par);
343 return PNFS_ATTEMPTED;
Fred Isaman31e63062011-07-30 20:52:55 -0400344}
345
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200346static void bl_end_io_write(struct bio *bio)
Fred Isaman155e7522011-07-30 20:52:39 -0400347{
Fred Isaman650e2d32011-07-30 20:52:54 -0400348 struct parallel_io *par = bio->bi_private;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400349 struct nfs_pgio_header *header = par->data;
Fred Isaman650e2d32011-07-30 20:52:54 -0400350
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200351 if (bio->bi_status) {
Fred Isamancd841602012-04-20 14:47:44 -0400352 if (!header->pnfs_error)
353 header->pnfs_error = -EIO;
354 pnfs_set_lo_fail(header->lseg);
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500355 bl_mark_devices_unavailable(header, true);
Fred Isaman650e2d32011-07-30 20:52:54 -0400356 }
357 bio_put(bio);
358 put_parallel(par);
359}
360
361/* Function scheduled for call during bl_end_par_io_write,
362 * it marks sectors as written and extends the commitlist.
363 */
364static void bl_write_cleanup(struct work_struct *work)
365{
Christoph Hellwig80672532014-09-10 08:23:34 -0700366 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
367 struct nfs_pgio_header *hdr =
368 container_of(task, struct nfs_pgio_header, task);
369
Fred Isaman650e2d32011-07-30 20:52:54 -0400370 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -0700371
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400372 if (likely(!hdr->pnfs_error)) {
Christoph Hellwig80672532014-09-10 08:23:34 -0700373 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300374 u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
Christoph Hellwig80672532014-09-10 08:23:34 -0700375 u64 end = (hdr->args.offset + hdr->args.count +
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300376 PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
Benjamin Coddingtona3f9d1b52016-10-11 15:53:21 -0400377 u64 lwb = hdr->args.offset + hdr->args.count;
Christoph Hellwig80672532014-09-10 08:23:34 -0700378
379 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
Benjamin Coddingtona3f9d1b52016-10-11 15:53:21 -0400380 (end - start) >> SECTOR_SHIFT, lwb);
Fred Isaman31e63062011-07-30 20:52:55 -0400381 }
Christoph Hellwig80672532014-09-10 08:23:34 -0700382
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400383 pnfs_ld_write_done(hdr);
Fred Isaman650e2d32011-07-30 20:52:54 -0400384}
385
386/* Called when last of bios associated with a bl_write_pagelist call finishes */
Christoph Hellwig80672532014-09-10 08:23:34 -0700387static void bl_end_par_io_write(void *data)
Fred Isaman650e2d32011-07-30 20:52:54 -0400388{
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400389 struct nfs_pgio_header *hdr = data;
Fred Isaman650e2d32011-07-30 20:52:54 -0400390
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400391 hdr->task.tk_status = hdr->pnfs_error;
Weston Andros Adamsonc65e6252014-06-09 11:48:36 -0400392 hdr->verf.committed = NFS_FILE_SYNC;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400393 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
394 schedule_work(&hdr->task.u.tk_work);
Fred Isaman650e2d32011-07-30 20:52:54 -0400395}
396
397static enum pnfs_try_status
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400398bl_write_pagelist(struct nfs_pgio_header *header, int sync)
Fred Isaman650e2d32011-07-30 20:52:54 -0400399{
Christoph Hellwig80672532014-09-10 08:23:34 -0700400 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700401 struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
Fred Isaman650e2d32011-07-30 20:52:54 -0400402 struct bio *bio = NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700403 struct pnfs_block_extent be;
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700404 sector_t isect, extent_length = 0;
Peng Tao96c9eae2012-08-24 00:27:53 +0800405 struct parallel_io *par = NULL;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400406 loff_t offset = header->args.offset;
407 size_t count = header->args.count;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400408 struct page **pages = header->args.pages;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300409 int pg_index = header->args.pgbase >> PAGE_SHIFT;
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700410 unsigned int pg_len;
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500411 struct blk_plug plug;
Christoph Hellwig80672532014-09-10 08:23:34 -0700412 int i;
Fred Isaman650e2d32011-07-30 20:52:54 -0400413
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -0800414 dprintk("%s enter, %zu@%lld\n", __func__, count, offset);
Peng Tao96c9eae2012-08-24 00:27:53 +0800415
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400416 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
Peng Tao71cdd402011-07-30 20:52:56 -0400417 * We want to write each, and if there is an error set pnfs_error
418 * to have it redone using nfs.
Fred Isaman650e2d32011-07-30 20:52:54 -0400419 */
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400420 par = alloc_parallel(header);
Fred Isaman650e2d32011-07-30 20:52:54 -0400421 if (!par)
Christoph Hellwig80672532014-09-10 08:23:34 -0700422 return PNFS_NOT_ATTEMPTED;
Fred Isaman650e2d32011-07-30 20:52:54 -0400423 par->pnfs_callback = bl_end_par_io_write;
Fred Isaman650e2d32011-07-30 20:52:54 -0400424
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700425 blk_start_plug(&plug);
Peng Tao71cdd402011-07-30 20:52:56 -0400426
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700427 /* we always write out the whole page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300428 offset = offset & (loff_t)PAGE_MASK;
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700429 isect = offset >> SECTOR_SHIFT;
Peng Tao71cdd402011-07-30 20:52:56 -0400430
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400431 for (i = pg_index; i < header->page_array.npages; i++) {
Christoph Hellwig921b81a2014-08-21 11:09:29 -0500432 if (extent_length <= 0) {
Fred Isaman650e2d32011-07-30 20:52:54 -0400433 /* We've used up the previous extent */
Mike Christie4e49ea42016-06-05 14:31:41 -0500434 bio = bl_submit_bio(bio);
Fred Isaman650e2d32011-07-30 20:52:54 -0400435 /* Get the next one */
Christoph Hellwig80672532014-09-10 08:23:34 -0700436 if (!ext_tree_lookup(bl, isect, &be, true)) {
Fred Isamancd841602012-04-20 14:47:44 -0400437 header->pnfs_error = -EINVAL;
Fred Isaman650e2d32011-07-30 20:52:54 -0400438 goto out;
439 }
Peng Taofe6e1e82012-08-24 00:27:51 +0800440
Christoph Hellwig80672532014-09-10 08:23:34 -0700441 extent_length = be.be_length - (isect - be.be_f_offset);
Peng Tao71cdd402011-07-30 20:52:56 -0400442 }
Peng Taofe6e1e82012-08-24 00:27:51 +0800443
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300444 pg_len = PAGE_SIZE;
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400445 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700446 WRITE, isect, pages[i], &map, &be,
Peng Taofe6e1e82012-08-24 00:27:51 +0800447 bl_end_io_write, par,
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700448 0, &pg_len);
Peng Tao71cdd402011-07-30 20:52:56 -0400449 if (IS_ERR(bio)) {
Fred Isamancd841602012-04-20 14:47:44 -0400450 header->pnfs_error = PTR_ERR(bio);
Peng Taoe6d05a72011-09-22 21:50:16 -0400451 bio = NULL;
Peng Tao71cdd402011-07-30 20:52:56 -0400452 goto out;
Fred Isaman650e2d32011-07-30 20:52:54 -0400453 }
Christoph Hellwig5c83746a2014-09-10 17:37:27 -0700454
455 offset += pg_len;
456 count -= pg_len;
457 isect += (pg_len >> SECTOR_SHIFT);
458 extent_length -= (pg_len >> SECTOR_SHIFT);
Fred Isaman650e2d32011-07-30 20:52:54 -0400459 }
Peng Tao71cdd402011-07-30 20:52:56 -0400460
Weston Andros Adamsond45f60c2014-06-09 11:48:35 -0400461 header->res.count = header->args.count;
Fred Isaman650e2d32011-07-30 20:52:54 -0400462out:
Mike Christie4e49ea42016-06-05 14:31:41 -0500463 bl_submit_bio(bio);
Christoph Hellwigbe98fd02014-08-21 11:09:28 -0500464 blk_finish_plug(&plug);
Fred Isaman650e2d32011-07-30 20:52:54 -0400465 put_parallel(par);
466 return PNFS_ATTEMPTED;
Fred Isaman155e7522011-07-30 20:52:39 -0400467}
468
469static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
470{
471 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
Christoph Hellwig80672532014-09-10 08:23:34 -0700472 int err;
Fred Isaman155e7522011-07-30 20:52:39 -0400473
474 dprintk("%s enter\n", __func__);
Christoph Hellwig80672532014-09-10 08:23:34 -0700475
476 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
477 WARN_ON(err);
478
Fred Isaman155e7522011-07-30 20:52:39 -0400479 kfree(bl);
480}
481
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100482static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
483 gfp_t gfp_flags, bool is_scsi_layout)
Fred Isaman155e7522011-07-30 20:52:39 -0400484{
485 struct pnfs_block_layout *bl;
486
487 dprintk("%s enter\n", __func__);
488 bl = kzalloc(sizeof(*bl), gfp_flags);
489 if (!bl)
490 return NULL;
Christoph Hellwig80672532014-09-10 08:23:34 -0700491
492 bl->bl_ext_rw = RB_ROOT;
493 bl->bl_ext_ro = RB_ROOT;
Fred Isaman155e7522011-07-30 20:52:39 -0400494 spin_lock_init(&bl->bl_ext_lock);
Christoph Hellwig80672532014-09-10 08:23:34 -0700495
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100496 bl->bl_scsi_layout = is_scsi_layout;
Fred Isaman155e7522011-07-30 20:52:39 -0400497 return &bl->bl_layout;
498}
499
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100500static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
501 gfp_t gfp_flags)
502{
503 return __bl_alloc_layout_hdr(inode, gfp_flags, false);
504}
505
506static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
507 gfp_t gfp_flags)
508{
509 return __bl_alloc_layout_hdr(inode, gfp_flags, true);
510}
511
Fred Isamana60d2eb2011-07-30 20:52:44 -0400512static void bl_free_lseg(struct pnfs_layout_segment *lseg)
Fred Isaman155e7522011-07-30 20:52:39 -0400513{
Fred Isamana60d2eb2011-07-30 20:52:44 -0400514 dprintk("%s enter\n", __func__);
515 kfree(lseg);
Fred Isaman155e7522011-07-30 20:52:39 -0400516}
517
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700518/* Tracks info needed to ensure extents in layout obey constraints of spec */
519struct layout_verification {
520 u32 mode; /* R or RW */
521 u64 start; /* Expected start of next non-COW extent */
522 u64 inval; /* Start of INVAL coverage */
523 u64 cowread; /* End of COW read coverage */
524};
525
526/* Verify the extent meets the layout requirements of the pnfs-block draft,
527 * section 2.3.1.
528 */
529static int verify_extent(struct pnfs_block_extent *be,
530 struct layout_verification *lv)
531{
532 if (lv->mode == IOMODE_READ) {
533 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
534 be->be_state == PNFS_BLOCK_INVALID_DATA)
535 return -EIO;
536 if (be->be_f_offset != lv->start)
537 return -EIO;
538 lv->start += be->be_length;
539 return 0;
540 }
541 /* lv->mode == IOMODE_RW */
542 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
543 if (be->be_f_offset != lv->start)
544 return -EIO;
545 if (lv->cowread > lv->start)
546 return -EIO;
547 lv->start += be->be_length;
548 lv->inval = lv->start;
549 return 0;
550 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
551 if (be->be_f_offset != lv->start)
552 return -EIO;
553 lv->start += be->be_length;
554 return 0;
555 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
556 if (be->be_f_offset > lv->start)
557 return -EIO;
558 if (be->be_f_offset < lv->inval)
559 return -EIO;
560 if (be->be_f_offset < lv->cowread)
561 return -EIO;
562 /* It looks like you might want to min this with lv->start,
563 * but you really don't.
564 */
565 lv->inval = lv->inval + be->be_length;
566 lv->cowread = be->be_f_offset + be->be_length;
567 return 0;
568 } else
569 return -EIO;
570}
571
572static int decode_sector_number(__be32 **rp, sector_t *sp)
573{
574 uint64_t s;
575
576 *rp = xdr_decode_hyper(*rp, &s);
577 if (s & 0x1ff) {
578 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
579 return -1;
580 }
581 *sp = s >> SECTOR_SHIFT;
582 return 0;
583}
584
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500585static struct nfs4_deviceid_node *
586bl_find_get_deviceid(struct nfs_server *server,
NeilBrowna52458b2018-12-03 11:30:31 +1100587 const struct nfs4_deviceid *id, const struct cred *cred,
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500588 gfp_t gfp_mask)
589{
590 struct nfs4_deviceid_node *node;
591 unsigned long start, end;
592
593retry:
594 node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
595 if (!node)
596 return ERR_PTR(-ENODEV);
597
598 if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
599 return node;
600
601 end = jiffies;
602 start = end - PNFS_DEVICE_RETRY_TIMEOUT;
603 if (!time_in_range(node->timestamp_unavailable, start, end)) {
604 nfs4_delete_deviceid(node->ld, node->nfs_client, id);
605 goto retry;
606 }
607 return ERR_PTR(-ENODEV);
608}
609
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700610static int
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700611bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
612 struct layout_verification *lv, struct list_head *extents,
613 gfp_t gfp_mask)
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700614{
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700615 struct pnfs_block_extent *be;
616 struct nfs4_deviceid id;
617 int error;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700618 __be32 *p;
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700619
620 p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
621 if (!p)
622 return -EIO;
623
624 be = kzalloc(sizeof(*be), GFP_NOFS);
625 if (!be)
626 return -ENOMEM;
627
628 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
629 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
630
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500631 be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700632 lo->plh_lc_cred, gfp_mask);
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500633 if (IS_ERR(be->be_device)) {
634 error = PTR_ERR(be->be_device);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700635 goto out_free_be;
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500636 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700637
638 /*
639 * The next three values are read in as bytes, but stored in the
640 * extent structure in 512-byte granularity.
641 */
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500642 error = -EIO;
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700643 if (decode_sector_number(&p, &be->be_f_offset) < 0)
644 goto out_put_deviceid;
645 if (decode_sector_number(&p, &be->be_length) < 0)
646 goto out_put_deviceid;
647 if (decode_sector_number(&p, &be->be_v_offset) < 0)
648 goto out_put_deviceid;
649 be->be_state = be32_to_cpup(p++);
650
651 error = verify_extent(be, lv);
652 if (error) {
653 dprintk("%s: extent verification failed\n", __func__);
654 goto out_put_deviceid;
655 }
656
657 list_add_tail(&be->be_list, extents);
658 return 0;
659
660out_put_deviceid:
661 nfs4_put_deviceid_node(be->be_device);
662out_free_be:
663 kfree(be);
664 return error;
665}
666
667static struct pnfs_layout_segment *
668bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
669 gfp_t gfp_mask)
670{
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700671 struct layout_verification lv = {
672 .mode = lgr->range.iomode,
673 .start = lgr->range.offset >> SECTOR_SHIFT,
674 .inval = lgr->range.offset >> SECTOR_SHIFT,
675 .cowread = lgr->range.offset >> SECTOR_SHIFT,
676 };
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700677 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
678 struct pnfs_layout_segment *lseg;
679 struct xdr_buf buf;
680 struct xdr_stream xdr;
681 struct page *scratch;
682 int status, i;
683 uint32_t count;
684 __be32 *p;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700685 LIST_HEAD(extents);
686
687 dprintk("---> %s\n", __func__);
688
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700689 lseg = kzalloc(sizeof(*lseg), gfp_mask);
690 if (!lseg)
691 return ERR_PTR(-ENOMEM);
692
693 status = -ENOMEM;
694 scratch = alloc_page(gfp_mask);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700695 if (!scratch)
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700696 goto out;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700697
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700698 xdr_init_decode_pages(&xdr, &buf,
699 lgr->layoutp->pages, lgr->layoutp->len);
700 xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700701
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700702 status = -EIO;
703 p = xdr_inline_decode(&xdr, 4);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700704 if (unlikely(!p))
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700705 goto out_free_scratch;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700706
707 count = be32_to_cpup(p++);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700708 dprintk("%s: number of extents %d\n", __func__, count);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700709
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700710 /*
711 * Decode individual extents, putting them in temporary staging area
712 * until whole layout is decoded to make error recovery easier.
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700713 */
714 for (i = 0; i < count; i++) {
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700715 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
716 if (status)
717 goto process_extents;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700718 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700719
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700720 if (lgr->range.offset + lgr->range.length !=
721 lv.start << SECTOR_SHIFT) {
722 dprintk("%s Final length mismatch\n", __func__);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700723 status = -EIO;
724 goto process_extents;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700725 }
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700726
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700727 if (lv.start < lv.cowread) {
728 dprintk("%s Final uncovered COW extent\n", __func__);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700729 status = -EIO;
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700730 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700731
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700732process_extents:
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700733 while (!list_empty(&extents)) {
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700734 struct pnfs_block_extent *be =
735 list_first_entry(&extents, struct pnfs_block_extent,
736 be_list);
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700737 list_del(&be->be_list);
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700738
739 if (!status)
740 status = ext_tree_insert(bl, be);
741
742 if (status) {
743 nfs4_put_deviceid_node(be->be_device);
744 kfree(be);
745 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700746 }
Christoph Hellwig9cc47542014-09-10 17:37:24 -0700747
Christoph Hellwigca0fe1d2014-09-10 17:37:25 -0700748out_free_scratch:
749 __free_page(scratch);
750out:
751 dprintk("%s returns %d\n", __func__, status);
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500752 switch (status) {
753 case -ENODEV:
754 /* Our extent block devices are unavailable */
755 set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
Gustavo A. R. Silva01e03bd2018-07-31 21:18:44 -0500756 /* Fall through */
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500757 case 0:
758 return lseg;
759 default:
Fred Isamana60d2eb2011-07-30 20:52:44 -0400760 kfree(lseg);
761 return ERR_PTR(status);
762 }
Fred Isaman155e7522011-07-30 20:52:39 -0400763}
764
765static void
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700766bl_return_range(struct pnfs_layout_hdr *lo,
767 struct pnfs_layout_range *range)
768{
769 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
770 sector_t offset = range->offset >> SECTOR_SHIFT, end;
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700771
772 if (range->offset % 8) {
773 dprintk("%s: offset %lld not block size aligned\n",
774 __func__, range->offset);
775 return;
776 }
777
778 if (range->length != NFS4_MAX_UINT64) {
779 if (range->length % 8) {
780 dprintk("%s: length %lld not block size aligned\n",
781 __func__, range->length);
782 return;
783 }
784
785 end = offset + (range->length >> SECTOR_SHIFT);
786 } else {
787 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
788 }
789
Trond Myklebust164ae582014-09-12 13:25:14 -0400790 ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700791}
792
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700793static int
794bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
Fred Isaman155e7522011-07-30 20:52:39 -0400795{
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700796 return ext_tree_prepare_commit(arg);
Fred Isaman155e7522011-07-30 20:52:39 -0400797}
798
799static void
800bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
801{
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700802 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
Fred Isaman155e7522011-07-30 20:52:39 -0400803}
804
805static int
806bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
807{
808 dprintk("%s enter\n", __func__);
Fred Isaman2f9fd182011-07-30 20:52:46 -0400809
810 if (server->pnfs_blksize == 0) {
811 dprintk("%s Server did not return blksize\n", __func__);
812 return -EINVAL;
813 }
Christoph Hellwige3aaf7f2014-08-21 11:09:26 -0500814 if (server->pnfs_blksize > PAGE_SIZE) {
815 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
816 __func__, server->pnfs_blksize);
817 return -EINVAL;
818 }
819
Christoph Hellwigd4b18c32014-09-10 17:36:31 -0700820 return 0;
Fred Isaman155e7522011-07-30 20:52:39 -0400821}
822
Peng Taof742dc42012-08-24 00:27:52 +0800823static bool
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700824is_aligned_req(struct nfs_pageio_descriptor *pgio,
Kinglong Meef35592a2016-02-13 21:51:31 +0800825 struct nfs_page *req, unsigned int alignment, bool is_write)
Peng Taof742dc42012-08-24 00:27:52 +0800826{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700827 /*
828 * Always accept buffered writes, higher layers take care of the
829 * right alignment.
830 */
831 if (pgio->pg_dreq == NULL)
832 return true;
833
834 if (!IS_ALIGNED(req->wb_offset, alignment))
835 return false;
836
837 if (IS_ALIGNED(req->wb_bytes, alignment))
838 return true;
839
Kinglong Meef35592a2016-02-13 21:51:31 +0800840 if (is_write &&
841 (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700842 /*
843 * If the write goes up to the inode size, just write
844 * the full page. Data past the inode size is
845 * guaranteed to be zeroed by the higher level client
846 * code, and this behaviour is mandated by RFC 5663
847 * section 2.3.2.
848 */
849 return true;
850 }
851
852 return false;
Peng Taof742dc42012-08-24 00:27:52 +0800853}
854
855static void
856bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
857{
Kinglong Meef35592a2016-02-13 21:51:31 +0800858 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
Peng Taof742dc42012-08-24 00:27:52 +0800859 nfs_pageio_reset_read_mds(pgio);
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700860 return;
861 }
862
863 pnfs_generic_pg_init_read(pgio, req);
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500864
865 if (pgio->pg_lseg &&
866 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
867 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
868 pnfs_set_lo_fail(pgio->pg_lseg);
869 nfs_pageio_reset_read_mds(pgio);
870 }
Peng Taof742dc42012-08-24 00:27:52 +0800871}
872
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400873/*
874 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
875 * of bytes (maximum @req->wb_bytes) that can be coalesced.
876 */
877static size_t
Peng Taof742dc42012-08-24 00:27:52 +0800878bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
879 struct nfs_page *req)
880{
Kinglong Meef35592a2016-02-13 21:51:31 +0800881 if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400882 return 0;
Peng Taof742dc42012-08-24 00:27:52 +0800883 return pnfs_generic_pg_test(pgio, prev, req);
884}
885
Peng Tao62965562012-09-25 14:55:57 +0800886/*
887 * Return the number of contiguous bytes for a given inode
888 * starting at page frame idx.
889 */
890static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
891{
892 struct address_space *mapping = inode->i_mapping;
893 pgoff_t end;
894
895 /* Optimize common case that writes from 0 to end of file */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300896 end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Christoph Hellwig6a74c0c2014-11-24 16:47:02 -0500897 if (end != inode->i_mapping->nrpages) {
Peng Tao62965562012-09-25 14:55:57 +0800898 rcu_read_lock();
Matthew Wilcox0d3f9292017-11-21 14:07:06 -0500899 end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
Peng Tao62965562012-09-25 14:55:57 +0800900 rcu_read_unlock();
901 }
902
903 if (!end)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300904 return i_size_read(inode) - (idx << PAGE_SHIFT);
Peng Tao62965562012-09-25 14:55:57 +0800905 else
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300906 return (end - idx) << PAGE_SHIFT;
Peng Tao62965562012-09-25 14:55:57 +0800907}
908
Trond Myklebust6f018ef2012-10-02 08:29:14 -0700909static void
Peng Tao96c9eae2012-08-24 00:27:53 +0800910bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
911{
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700912 u64 wb_size;
Peng Tao62965562012-09-25 14:55:57 +0800913
Kinglong Meef35592a2016-02-13 21:51:31 +0800914 if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700915 nfs_pageio_reset_write_mds(pgio);
916 return;
Peng Tao62965562012-09-25 14:55:57 +0800917 }
Christoph Hellwig3a6fd1f2014-09-10 08:23:32 -0700918
919 if (pgio->pg_dreq == NULL)
920 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
921 req->wb_index);
922 else
923 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
924
925 pnfs_generic_pg_init_write(pgio, req, wb_size);
Benjamin Coddingtonb3dce6a2017-12-08 12:52:59 -0500926
927 if (pgio->pg_lseg &&
928 test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
929
930 pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
931 pnfs_set_lo_fail(pgio->pg_lseg);
932 nfs_pageio_reset_write_mds(pgio);
933 }
Peng Tao96c9eae2012-08-24 00:27:53 +0800934}
935
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400936/*
937 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
938 * of bytes (maximum @req->wb_bytes) that can be coalesced.
939 */
940static size_t
Peng Tao96c9eae2012-08-24 00:27:53 +0800941bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
942 struct nfs_page *req)
943{
Kinglong Meef35592a2016-02-13 21:51:31 +0800944 if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
Weston Andros Adamsonb4fdac12014-05-15 11:56:43 -0400945 return 0;
Peng Tao96c9eae2012-08-24 00:27:53 +0800946 return pnfs_generic_pg_test(pgio, prev, req);
947}
948
Benny Halevye9643fe2011-07-30 20:52:40 -0400949static const struct nfs_pageio_ops bl_pg_read_ops = {
Peng Taof742dc42012-08-24 00:27:52 +0800950 .pg_init = bl_pg_init_read,
951 .pg_test = bl_pg_test_read,
Benny Halevye9643fe2011-07-30 20:52:40 -0400952 .pg_doio = pnfs_generic_pg_readpages,
Weston Andros Adamson180bb5e2014-09-10 15:48:01 -0400953 .pg_cleanup = pnfs_generic_pg_cleanup,
Benny Halevye9643fe2011-07-30 20:52:40 -0400954};
955
956static const struct nfs_pageio_ops bl_pg_write_ops = {
Peng Tao96c9eae2012-08-24 00:27:53 +0800957 .pg_init = bl_pg_init_write,
958 .pg_test = bl_pg_test_write,
Benny Halevye9643fe2011-07-30 20:52:40 -0400959 .pg_doio = pnfs_generic_pg_writepages,
Weston Andros Adamson180bb5e2014-09-10 15:48:01 -0400960 .pg_cleanup = pnfs_generic_pg_cleanup,
Benny Halevye9643fe2011-07-30 20:52:40 -0400961};
962
Fred Isaman155e7522011-07-30 20:52:39 -0400963static struct pnfs_layoutdriver_type blocklayout_type = {
964 .id = LAYOUT_BLOCK_VOLUME,
965 .name = "LAYOUT_BLOCK_VOLUME",
fanchaoting5a12cca2013-02-04 21:15:02 +0800966 .owner = THIS_MODULE,
Christoph Hellwig848746b2014-09-10 08:23:36 -0700967 .flags = PNFS_LAYOUTRET_ON_SETATTR |
Benjamin Coddingtond78471d2017-12-08 12:52:57 -0500968 PNFS_LAYOUTRET_ON_ERROR |
Christoph Hellwig848746b2014-09-10 08:23:36 -0700969 PNFS_READ_WHOLE_PAGE,
Fred Isaman155e7522011-07-30 20:52:39 -0400970 .read_pagelist = bl_read_pagelist,
971 .write_pagelist = bl_write_pagelist,
972 .alloc_layout_hdr = bl_alloc_layout_hdr,
973 .free_layout_hdr = bl_free_layout_hdr,
974 .alloc_lseg = bl_alloc_lseg,
975 .free_lseg = bl_free_lseg,
Christoph Hellwig71d5b762014-09-10 08:23:35 -0700976 .return_range = bl_return_range,
Christoph Hellwig34dc93c2014-09-10 17:36:30 -0700977 .prepare_layoutcommit = bl_prepare_layoutcommit,
Fred Isaman155e7522011-07-30 20:52:39 -0400978 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
979 .set_layoutdriver = bl_set_layoutdriver,
Christoph Hellwig20d655d2014-09-02 21:28:00 -0700980 .alloc_deviceid_node = bl_alloc_deviceid_node,
981 .free_deviceid_node = bl_free_deviceid_node,
Benny Halevye9643fe2011-07-30 20:52:40 -0400982 .pg_read_ops = &bl_pg_read_ops,
983 .pg_write_ops = &bl_pg_write_ops,
Trond Myklebust5bb89b42015-03-25 14:14:42 -0400984 .sync = pnfs_generic_sync,
Fred Isaman155e7522011-07-30 20:52:39 -0400985};
986
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100987static struct pnfs_layoutdriver_type scsilayout_type = {
988 .id = LAYOUT_SCSI,
989 .name = "LAYOUT_SCSI",
990 .owner = THIS_MODULE,
991 .flags = PNFS_LAYOUTRET_ON_SETATTR |
Benjamin Coddingtond78471d2017-12-08 12:52:57 -0500992 PNFS_LAYOUTRET_ON_ERROR |
Christoph Hellwigd9186c02016-03-04 20:46:15 +0100993 PNFS_READ_WHOLE_PAGE,
994 .read_pagelist = bl_read_pagelist,
995 .write_pagelist = bl_write_pagelist,
996 .alloc_layout_hdr = sl_alloc_layout_hdr,
997 .free_layout_hdr = bl_free_layout_hdr,
998 .alloc_lseg = bl_alloc_lseg,
999 .free_lseg = bl_free_lseg,
1000 .return_range = bl_return_range,
1001 .prepare_layoutcommit = bl_prepare_layoutcommit,
1002 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
1003 .set_layoutdriver = bl_set_layoutdriver,
1004 .alloc_deviceid_node = bl_alloc_deviceid_node,
1005 .free_deviceid_node = bl_free_deviceid_node,
1006 .pg_read_ops = &bl_pg_read_ops,
1007 .pg_write_ops = &bl_pg_write_ops,
1008 .sync = pnfs_generic_sync,
1009};
1010
1011
Fred Isaman155e7522011-07-30 20:52:39 -04001012static int __init nfs4blocklayout_init(void)
1013{
1014 int ret;
1015
1016 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1017
Christoph Hellwig871760c2014-09-10 17:37:26 -07001018 ret = bl_init_pipefs();
Stanislav Kinsbursky9e2e74d2012-01-10 17:04:24 +04001019 if (ret)
Christoph Hellwigd9186c02016-03-04 20:46:15 +01001020 goto out;
1021
1022 ret = pnfs_register_layoutdriver(&blocklayout_type);
1023 if (ret)
1024 goto out_cleanup_pipe;
1025
1026 ret = pnfs_register_layoutdriver(&scsilayout_type);
1027 if (ret)
1028 goto out_unregister_block;
Christoph Hellwig871760c2014-09-10 17:37:26 -07001029 return 0;
Jim Reesfe0a9b72011-07-30 20:52:42 -04001030
Christoph Hellwigd9186c02016-03-04 20:46:15 +01001031out_unregister_block:
Jim Reesfe0a9b72011-07-30 20:52:42 -04001032 pnfs_unregister_layoutdriver(&blocklayout_type);
Christoph Hellwigd9186c02016-03-04 20:46:15 +01001033out_cleanup_pipe:
1034 bl_cleanup_pipefs();
Christoph Hellwig871760c2014-09-10 17:37:26 -07001035out:
Fred Isaman155e7522011-07-30 20:52:39 -04001036 return ret;
1037}
1038
1039static void __exit nfs4blocklayout_exit(void)
1040{
1041 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1042 __func__);
1043
Christoph Hellwigd9186c02016-03-04 20:46:15 +01001044 pnfs_unregister_layoutdriver(&scsilayout_type);
Fred Isaman155e7522011-07-30 20:52:39 -04001045 pnfs_unregister_layoutdriver(&blocklayout_type);
Christoph Hellwigd9186c02016-03-04 20:46:15 +01001046 bl_cleanup_pipefs();
Fred Isaman155e7522011-07-30 20:52:39 -04001047}
1048
1049MODULE_ALIAS("nfs-layouttype4-3");
Benjamin Coddingtonad6b0242017-12-08 12:52:47 -05001050MODULE_ALIAS("nfs-layouttype4-5");
Fred Isaman155e7522011-07-30 20:52:39 -04001051
1052module_init(nfs4blocklayout_init);
1053module_exit(nfs4blocklayout_exit);