1 /*******************************************************************************
2 * Filename: target_core_file.c
4 * This file contains the Storage Engine <-> FILEIO transport specific functions
6 * Copyright (c) 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/version.h>
30 #include <linux/string.h>
31 #include <linux/parser.h>
32 #include <linux/timer.h>
33 #include <linux/blkdev.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_host.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_device.h>
41 #include <target/target_core_transport.h>
43 #include "target_core_file.h"
46 #define DEBUG_FD_CACHE(x...) printk(x)
48 #define DEBUG_FD_CACHE(x...)
52 #define DEBUG_FD_FUA(x...) printk(x)
54 #define DEBUG_FD_FUA(x...)
57 static struct se_subsystem_api fileio_template;
59 /* fd_attach_hba(): (Part of se_subsystem_api_t template)
63 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
65 struct fd_host *fd_host;
67 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
69 printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
73 fd_host->fd_host_id = host_id;
75 atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
76 atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
77 hba->hba_ptr = (void *) fd_host;
79 printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
80 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
81 TARGET_CORE_MOD_VERSION);
82 printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
83 " Target Core with TCQ Depth: %d MaxSectors: %u\n",
84 hba->hba_id, fd_host->fd_host_id,
85 atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
90 static void fd_detach_hba(struct se_hba *hba)
92 struct fd_host *fd_host = hba->hba_ptr;
94 printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
95 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
101 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
103 struct fd_dev *fd_dev;
104 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
106 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
108 printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
112 fd_dev->fd_host = fd_host;
114 printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
119 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
123 static struct se_device *fd_create_virtdevice(
125 struct se_subsystem_dev *se_dev,
129 struct se_device *dev;
130 struct se_dev_limits dev_limits;
131 struct queue_limits *limits;
132 struct fd_dev *fd_dev = (struct fd_dev *) p;
133 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
136 struct inode *inode = NULL;
137 int dev_flags = 0, flags, ret = -EINVAL;
139 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
143 dev_p = getname(fd_dev->fd_dev_name);
147 printk(KERN_ERR "getname(%s) failed: %lu\n",
148 fd_dev->fd_dev_name, IS_ERR(dev_p));
149 ret = PTR_ERR(dev_p);
153 if (di->no_create_file)
154 flags = O_RDWR | O_LARGEFILE;
156 flags = O_RDWR | O_CREAT | O_LARGEFILE;
158 flags = O_RDWR | O_CREAT | O_LARGEFILE;
160 /* flags |= O_DIRECT; */
162 * If fd_buffered_io=1 has not been set explictly (the default),
163 * use O_SYNC to force FILEIO writes to disk.
165 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
168 file = filp_open(dev_p, flags, 0600);
170 printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
174 if (!file || !file->f_dentry) {
175 printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
178 fd_dev->fd_file = file;
180 * If using a block backend with this struct file, we extract
181 * fd_dev->fd_[block,dev]_size from struct block_device.
183 * Otherwise, we use the passed fd_size= from configfs
185 inode = file->f_mapping->host;
186 if (S_ISBLK(inode->i_mode)) {
187 struct request_queue *q;
189 * Setup the local scope queue_limits from struct request_queue->limits
190 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
192 q = bdev_get_queue(inode->i_bdev);
193 limits = &dev_limits.limits;
194 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
195 limits->max_hw_sectors = queue_max_hw_sectors(q);
196 limits->max_sectors = queue_max_sectors(q);
198 * Determine the number of bytes from i_size_read() minus
199 * one (1) logical sector from underlying struct block_device
201 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
202 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
203 fd_dev->fd_block_size);
205 printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
206 " block_device blocks: %llu logical_block_size: %d\n",
208 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
209 fd_dev->fd_block_size);
211 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
212 printk(KERN_ERR "FILEIO: Missing fd_dev_size="
213 " parameter, and no backing struct"
218 limits = &dev_limits.limits;
219 limits->logical_block_size = FD_BLOCKSIZE;
220 limits->max_hw_sectors = FD_MAX_SECTORS;
221 limits->max_sectors = FD_MAX_SECTORS;
222 fd_dev->fd_block_size = FD_BLOCKSIZE;
225 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
226 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
228 dev = transport_add_device_to_core_hba(hba, &fileio_template,
229 se_dev, dev_flags, (void *)fd_dev,
230 &dev_limits, "FILEIO", FD_VERSION);
234 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
235 fd_dev->fd_queue_depth = dev->queue_depth;
237 printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
238 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
239 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
244 if (fd_dev->fd_file) {
245 filp_close(fd_dev->fd_file, NULL);
246 fd_dev->fd_file = NULL;
252 /* fd_free_device(): (Part of se_subsystem_api_t template)
256 static void fd_free_device(void *p)
258 struct fd_dev *fd_dev = (struct fd_dev *) p;
260 if (fd_dev->fd_file) {
261 filp_close(fd_dev->fd_file, NULL);
262 fd_dev->fd_file = NULL;
268 static inline struct fd_request *FILE_REQ(struct se_task *task)
270 return container_of(task, struct fd_request, fd_task);
274 static struct se_task *
275 fd_alloc_task(struct se_cmd *cmd)
277 struct fd_request *fd_req;
279 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
281 printk(KERN_ERR "Unable to allocate struct fd_request\n");
285 fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
287 return &fd_req->fd_task;
290 static int fd_do_readv(struct se_task *task)
292 struct fd_request *req = FILE_REQ(task);
293 struct file *fd = req->fd_dev->fd_file;
294 struct scatterlist *sg = task->task_sg;
297 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
300 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
302 printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
306 for (i = 0; i < task->task_sg_num; i++) {
307 iov[i].iov_len = sg[i].length;
308 iov[i].iov_base = sg_virt(&sg[i]);
313 ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
318 * Return zeros and GOOD status even if the READ did not return
319 * the expected virt_size for struct file w/o a backing struct
322 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
323 if (ret < 0 || ret != task->task_size) {
324 printk(KERN_ERR "vfs_readv() returned %d,"
325 " expecting %d for S_ISBLK\n", ret,
326 (int)task->task_size);
331 printk(KERN_ERR "vfs_readv() returned %d for non"
340 static int fd_do_writev(struct se_task *task)
342 struct fd_request *req = FILE_REQ(task);
343 struct file *fd = req->fd_dev->fd_file;
344 struct scatterlist *sg = task->task_sg;
347 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
350 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
352 printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
356 for (i = 0; i < task->task_sg_num; i++) {
357 iov[i].iov_len = sg[i].length;
358 iov[i].iov_base = sg_virt(&sg[i]);
363 ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
368 if (ret < 0 || ret != task->task_size) {
369 printk(KERN_ERR "vfs_writev() returned %d\n", ret);
376 static void fd_emulate_sync_cache(struct se_task *task)
378 struct se_cmd *cmd = TASK_CMD(task);
379 struct se_device *dev = cmd->se_dev;
380 struct fd_dev *fd_dev = dev->dev_ptr;
381 int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
386 * If the Immediate bit is set, queue up the GOOD response
387 * for this SYNCHRONIZE_CACHE op
390 transport_complete_sync_cache(cmd, 1);
393 * Determine if we will be flushing the entire device.
395 if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
399 start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
400 if (cmd->data_length)
401 end = start + cmd->data_length;
406 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
408 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
411 transport_complete_sync_cache(cmd, ret == 0);
415 * Tell TCM Core that we are capable of WriteCache emulation for
416 * an underlying struct se_device.
418 static int fd_emulated_write_cache(struct se_device *dev)
423 static int fd_emulated_dpo(struct se_device *dev)
428 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
431 static int fd_emulated_fua_write(struct se_device *dev)
436 static int fd_emulated_fua_read(struct se_device *dev)
442 * WRITE Force Unit Access (FUA) emulation on a per struct se_task
445 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
447 struct se_device *dev = cmd->se_dev;
448 struct fd_dev *fd_dev = dev->dev_ptr;
449 loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
450 loff_t end = start + task->task_size;
453 DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
454 task->task_lba, task->task_size);
456 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
458 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
461 static int fd_do_task(struct se_task *task)
463 struct se_cmd *cmd = task->task_se_cmd;
464 struct se_device *dev = cmd->se_dev;
468 * Call vectorized fileio functions to map struct scatterlist
469 * physical memory addresses to struct iovec virtual memory.
471 if (task->task_data_direction == DMA_FROM_DEVICE) {
472 ret = fd_do_readv(task);
474 ret = fd_do_writev(task);
477 DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
478 DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
479 T_TASK(cmd)->t_tasks_fua) {
481 * We might need to be a bit smarter here
482 * and return some sense data to let the initiator
483 * know the FUA WRITE cache sync failed..?
485 fd_emulate_write_fua(cmd, task);
493 task->task_scsi_status = GOOD;
494 transport_complete_task(task, 1);
496 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
499 /* fd_free_task(): (Part of se_subsystem_api_t template)
503 static void fd_free_task(struct se_task *task)
505 struct fd_request *req = FILE_REQ(task);
511 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
514 static match_table_t tokens = {
515 {Opt_fd_dev_name, "fd_dev_name=%s"},
516 {Opt_fd_dev_size, "fd_dev_size=%s"},
517 {Opt_fd_buffered_io, "fd_buffered_id=%d"},
521 static ssize_t fd_set_configfs_dev_params(
523 struct se_subsystem_dev *se_dev,
524 const char *page, ssize_t count)
526 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
527 char *orig, *ptr, *arg_p, *opts;
528 substring_t args[MAX_OPT_ARGS];
529 int ret = 0, arg, token;
531 opts = kstrdup(page, GFP_KERNEL);
537 while ((ptr = strsep(&opts, ",")) != NULL) {
541 token = match_token(ptr, tokens, args);
543 case Opt_fd_dev_name:
544 arg_p = match_strdup(&args[0]);
549 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
552 printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
553 fd_dev->fd_dev_name);
554 fd_dev->fbd_flags |= FBDF_HAS_PATH;
556 case Opt_fd_dev_size:
557 arg_p = match_strdup(&args[0]);
562 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
565 printk(KERN_ERR "strict_strtoull() failed for"
569 printk(KERN_INFO "FILEIO: Referencing Size: %llu"
570 " bytes\n", fd_dev->fd_dev_size);
571 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
573 case Opt_fd_buffered_io:
574 match_int(args, &arg);
576 printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
581 printk(KERN_INFO "FILEIO: Using buffered I/O"
582 " operations for struct fd_dev\n");
584 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
593 return (!ret) ? count : ret;
596 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
598 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
600 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
601 printk(KERN_ERR "Missing fd_dev_name=\n");
608 static ssize_t fd_show_configfs_dev_params(
610 struct se_subsystem_dev *se_dev,
613 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
616 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
617 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
618 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
619 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
620 "Buffered" : "Synchronous");
624 /* fd_get_cdb(): (Part of se_subsystem_api_t template)
628 static unsigned char *fd_get_cdb(struct se_task *task)
630 struct fd_request *req = FILE_REQ(task);
632 return req->fd_scsi_cdb;
635 /* fd_get_device_rev(): (Part of se_subsystem_api_t template)
639 static u32 fd_get_device_rev(struct se_device *dev)
641 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
644 /* fd_get_device_type(): (Part of se_subsystem_api_t template)
648 static u32 fd_get_device_type(struct se_device *dev)
653 static sector_t fd_get_blocks(struct se_device *dev)
655 struct fd_dev *fd_dev = dev->dev_ptr;
656 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
657 DEV_ATTRIB(dev)->block_size);
662 static struct se_subsystem_api fileio_template = {
664 .owner = THIS_MODULE,
665 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
666 .attach_hba = fd_attach_hba,
667 .detach_hba = fd_detach_hba,
668 .allocate_virtdevice = fd_allocate_virtdevice,
669 .create_virtdevice = fd_create_virtdevice,
670 .free_device = fd_free_device,
671 .dpo_emulated = fd_emulated_dpo,
672 .fua_write_emulated = fd_emulated_fua_write,
673 .fua_read_emulated = fd_emulated_fua_read,
674 .write_cache_emulated = fd_emulated_write_cache,
675 .alloc_task = fd_alloc_task,
676 .do_task = fd_do_task,
677 .do_sync_cache = fd_emulate_sync_cache,
678 .free_task = fd_free_task,
679 .check_configfs_dev_params = fd_check_configfs_dev_params,
680 .set_configfs_dev_params = fd_set_configfs_dev_params,
681 .show_configfs_dev_params = fd_show_configfs_dev_params,
682 .get_cdb = fd_get_cdb,
683 .get_device_rev = fd_get_device_rev,
684 .get_device_type = fd_get_device_type,
685 .get_blocks = fd_get_blocks,
688 static int __init fileio_module_init(void)
690 return transport_subsystem_register(&fileio_template);
693 static void fileio_module_exit(void)
695 transport_subsystem_release(&fileio_template);
698 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
699 MODULE_AUTHOR("nab@Linux-iSCSI.org");
700 MODULE_LICENSE("GPL");
702 module_init(fileio_module_init);
703 module_exit(fileio_module_exit);