#include <linux/mount.h>
#include <linux/fs.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/ext2_fs.h>
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/crc32.h>
struct gfs2_holder i_gh;
loff_t error;
- if (origin == 2) {
+ switch (origin) {
+ case SEEK_END: /* These reference inode->i_size */
+ case SEEK_DATA:
+ case SEEK_HOLE:
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
&i_gh);
if (!error) {
- error = generic_file_llseek_unlocked(file, offset, origin);
+ error = generic_file_llseek(file, offset, origin);
gfs2_glock_dq_uninit(&i_gh);
}
- } else
- error = generic_file_llseek_unlocked(file, offset, origin);
+ break;
+ case SEEK_CUR:
+ case SEEK_SET:
+ error = generic_file_llseek(file, offset, origin);
+ break;
+ default:
+ error = -EINVAL;
+ }
return error;
}
return error;
}
- error = gfs2_dir_read(dir, &offset, dirent, filldir);
+ error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
gfs2_glock_dq_uninit(&d_gh);
[7] = GFS2_DIF_NOATIME,
[12] = GFS2_DIF_EXHASH,
[14] = GFS2_DIF_INHERIT_JDATA,
+ [17] = GFS2_DIF_TOPDIR,
};
static const u32 gfs2_to_fsflags[32] = {
[gfs2fl_AppendOnly] = FS_APPEND_FL,
[gfs2fl_NoAtime] = FS_NOATIME_FL,
[gfs2fl_ExHash] = FS_INDEX_FL,
+ [gfs2fl_TopLevel] = FS_TOPDIR_FL,
[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
};
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int flags = inode->i_flags;
- flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
+ if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
+ inode->i_flags |= S_NOSEC;
if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
flags |= S_IMMUTABLE;
if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
GFS2_DIF_NOATIME| \
GFS2_DIF_SYNC| \
GFS2_DIF_SYSTEM| \
+ GFS2_DIF_TOPDIR| \
GFS2_DIF_INHERIT_JDATA)
/**
int error;
u32 new_flags, flags;
- error = mnt_want_write(filp->f_path.mnt);
+ error = mnt_want_write_file(filp);
if (error)
return error;
!capable(CAP_LINUX_IMMUTABLE))
goto out;
if (!IS_IMMUTABLE(inode)) {
- error = gfs2_permission(inode, MAY_WRITE, 0);
+ error = gfs2_permission(inode, MAY_WRITE);
if (error)
goto out;
}
out:
gfs2_glock_dq_uninit(&gh);
out_drop_write:
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
return error;
}
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
if (!S_ISDIR(inode->i_mode)) {
+ gfsflags &= ~GFS2_DIF_TOPDIR;
if (gfsflags & GFS2_DIF_INHERIT_JDATA)
gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
return do_gfs2_set_flags(filp, gfsflags, ~0);
return gfs2_get_flags(filp, (u32 __user *)arg);
case FS_IOC_SETFLAGS:
return gfs2_set_flags(filp, (u32 __user *)arg);
+ case FITRIM:
+ return gfs2_fitrim(filp, (void __user *)arg);
}
return -ENOTTY;
}
/**
+ * gfs2_size_hint - Give a hint to the size of a write request
+ * @file: The struct file
+ * @offset: The file offset of the write
+ * @size: The length of the write
+ *
+ * When we are about to do a write, this function records the total
+ * write size in order to provide a suitable hint to the lower layers
+ * about how many blocks will be required.
+ *
+ */
+
+static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
+{
+ struct inode *inode = filep->f_dentry->d_inode;
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
+ int hint = min_t(size_t, INT_MAX, blks);
+
+ atomic_set(&ip->i_res->rs_sizehint, hint);
+}
+
+/**
* gfs2_allocate_page_backing - Use bmap to allocate blocks
* @page: The (locked) page to allocate backing for
*
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
- struct gfs2_alloc *al;
+ loff_t size;
int ret;
+ sb_start_pagefault(inode->i_sb);
+
+ /* Update file times before taking page lock */
+ file_update_time(vma->vm_file);
+
+ ret = gfs2_rs_alloc(ip);
+ if (ret)
+ return ret;
+
+ gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
+
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
- if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
+ if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
+ lock_page(page);
+ if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ ret = -EAGAIN;
+ unlock_page(page);
+ }
goto out_unlock;
- ret = -ENOMEM;
- al = gfs2_alloc_get(ip);
- if (al == NULL)
+ }
+
+ ret = gfs2_rindex_update(sdp);
+ if (ret)
goto out_unlock;
ret = gfs2_quota_lock_check(ip);
if (ret)
- goto out_alloc_put;
+ goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
- al->al_requested = data_blocks + ind_blocks;
- ret = gfs2_inplace_reserve(ip);
+ ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (ret)
goto out_quota_unlock;
rblocks += data_blocks ? data_blocks : 1;
if (ind_blocks || data_blocks) {
rblocks += RES_STATFS + RES_QUOTA;
- rblocks += gfs2_rg_blocks(al);
+ rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
}
ret = gfs2_trans_begin(sdp, rblocks, 0);
if (ret)
lock_page(page);
ret = -EINVAL;
- last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
- if (page->index > last_index)
- goto out_unlock_page;
+ size = i_size_read(inode);
+ last_index = (size - 1) >> PAGE_CACHE_SHIFT;
+ /* Check page index against inode size */
+ if (size == 0 || (page->index > last_index))
+ goto out_trans_end;
+
+ ret = -EAGAIN;
+ /* If truncated, we must retry the operation, we may have raced
+ * with the glock demotion code.
+ */
+ if (!PageUptodate(page) || page->mapping != inode->i_mapping)
+ goto out_trans_end;
+
+ /* Unstuff, if required, and allocate backing blocks for page */
ret = 0;
- if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
- goto out_unlock_page;
- if (gfs2_is_stuffed(ip)) {
+ if (gfs2_is_stuffed(ip))
ret = gfs2_unstuff_dinode(ip, page);
- if (ret)
- goto out_unlock_page;
- }
- ret = gfs2_allocate_page_backing(page);
+ if (ret == 0)
+ ret = gfs2_allocate_page_backing(page);
-out_unlock_page:
- unlock_page(page);
+out_trans_end:
+ if (ret)
+ unlock_page(page);
gfs2_trans_end(sdp);
out_trans_fail:
gfs2_inplace_release(ip);
out_quota_unlock:
gfs2_quota_unlock(ip);
-out_alloc_put:
- gfs2_alloc_put(ip);
out_unlock:
gfs2_glock_dq(&gh);
out:
gfs2_holder_uninit(&gh);
- if (ret == -ENOMEM)
- ret = VM_FAULT_OOM;
- else if (ret)
- ret = VM_FAULT_SIGBUS;
- return ret;
+ if (ret == 0) {
+ set_page_dirty(page);
+ wait_on_page_writeback(page);
+ }
+ sb_end_pagefault(inode->i_sb);
+ return block_page_mkwrite_return(ret);
}
static const struct vm_operations_struct gfs2_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = gfs2_page_mkwrite,
+ .remap_pages = generic_file_remap_pages,
};
/**
return error;
}
vma->vm_ops = &gfs2_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
}
/**
- * gfs2_close - called to close a struct file
+ * gfs2_release - called to close a struct file
* @inode: the inode the struct file belongs to
* @file: the struct file being closed
*
* Returns: errno
*/
-static int gfs2_close(struct inode *inode, struct file *file)
+static int gfs2_release(struct inode *inode, struct file *file)
{
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
- struct gfs2_file *fp;
+ struct gfs2_inode *ip = GFS2_I(inode);
- fp = file->private_data;
+ kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_assert_warn(sdp, fp))
- return -EIO;
-
- kfree(fp);
+ if ((file->f_mode & FMODE_WRITE) &&
+ (atomic_read(&inode->i_writecount) == 1))
+ gfs2_rs_delete(ip);
return 0;
}
/**
* gfs2_fsync - sync the dirty data for a file (across the cluster)
- * @file: the file that points to the dentry (we ignore this)
+ * @file: the file that points to the dentry
+ * @start: the start position in the file to sync
+ * @end: the end position in the file to sync
* @datasync: set if we can ignore timestamp changes
*
- * The VFS will flush data for us. We only need to worry
- * about metadata here.
+ * We split the data flushing here so that we don't wait for the data
+ * until after we've also sent the metadata to disk. Note that for
+ * data=ordered, we will write & wait for the data at the log flush
+ * stage anyway, so this is unlikely to make much of a difference
+ * except in the data=writeback case.
+ *
+ * If the fdatawrite fails due to any reason except -EIO, we will
+ * continue the remainder of the fsync, although we'll still report
+ * the error at the end. This is to match filemap_write_and_wait_range()
+ * behaviour.
*
* Returns: errno
*/
-static int gfs2_fsync(struct file *file, int datasync)
+static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
{
- struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
struct gfs2_inode *ip = GFS2_I(inode);
- int ret;
+ int ret = 0, ret1 = 0;
+
+ if (mapping->nrpages) {
+ ret1 = filemap_fdatawrite_range(mapping, start, end);
+ if (ret1 == -EIO)
+ return ret1;
+ }
if (datasync)
sync_state &= ~I_DIRTY_SYNC;
ret = sync_inode_metadata(inode, 1);
if (ret)
return ret;
- gfs2_ail_flush(ip->i_gl);
+ if (gfs2_is_jdata(ip))
+ filemap_write_and_wait(mapping);
+ gfs2_ail_flush(ip->i_gl, 1);
}
- return 0;
+ if (mapping->nrpages)
+ ret = filemap_fdatawait_range(mapping, start, end);
+
+ return ret ? ret : ret1;
}
/**
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ size_t writesize = iov_length(iov, nr_segs);
+ struct dentry *dentry = file->f_dentry;
+ struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_sbd *sdp;
+ int ret;
+
+ sdp = GFS2_SB(file->f_mapping->host);
+ ret = gfs2_rs_alloc(ip);
+ if (ret)
+ return ret;
+
+ gfs2_size_hint(file, pos, writesize);
if (file->f_flags & O_APPEND) {
- struct dentry *dentry = file->f_dentry;
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_holder gh;
- int ret;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (ret)
return generic_file_aio_write(iocb, iov, nr_segs, pos);
}
-static int empty_write_end(struct page *page, unsigned from,
- unsigned to, int mode)
-{
- struct inode *inode = page->mapping->host;
- struct gfs2_inode *ip = GFS2_I(inode);
- struct buffer_head *bh;
- unsigned offset, blksize = 1 << inode->i_blkbits;
- pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-
- zero_user(page, from, to-from);
- mark_page_accessed(page);
-
- if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
- if (!gfs2_is_writeback(ip))
- gfs2_page_add_databufs(ip, page, from, to);
-
- block_commit_write(page, from, to);
- return 0;
- }
-
- offset = 0;
- bh = page_buffers(page);
- while (offset < to) {
- if (offset >= from) {
- set_buffer_uptodate(bh);
- mark_buffer_dirty(bh);
- clear_buffer_new(bh);
- write_dirty_buffer(bh, WRITE);
- }
- offset += blksize;
- bh = bh->b_this_page;
- }
-
- offset = 0;
- bh = page_buffers(page);
- while (offset < to) {
- if (offset >= from) {
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- return -EIO;
- }
- offset += blksize;
- bh = bh->b_this_page;
- }
- return 0;
-}
-
-static int needs_empty_write(sector_t block, struct inode *inode)
-{
- int error;
- struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
-
- bh_map.b_size = 1 << inode->i_blkbits;
- error = gfs2_block_map(inode, block, &bh_map, 0);
- if (unlikely(error))
- return error;
- return !buffer_mapped(&bh_map);
-}
-
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
- int mode)
-{
- struct inode *inode = page->mapping->host;
- unsigned start, end, next, blksize;
- sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- int ret;
-
- blksize = 1 << inode->i_blkbits;
- next = end = 0;
- while (next < from) {
- next += blksize;
- block++;
- }
- start = next;
- do {
- next += blksize;
- ret = needs_empty_write(block, inode);
- if (unlikely(ret < 0))
- return ret;
- if (ret == 0) {
- if (end) {
- ret = __block_write_begin(page, start, end - start,
- gfs2_block_map);
- if (unlikely(ret))
- return ret;
- ret = empty_write_end(page, start, end, mode);
- if (unlikely(ret))
- return ret;
- end = 0;
- }
- start = next;
- }
- else
- end = next;
- block++;
- } while (next < to);
-
- if (end) {
- ret = __block_write_begin(page, start, end - start, gfs2_block_map);
- if (unlikely(ret))
- return ret;
- ret = empty_write_end(page, start, end, mode);
- if (unlikely(ret))
- return ret;
- }
-
- return 0;
-}
-
static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
int mode)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct buffer_head *dibh;
int error;
- u64 start = offset >> PAGE_CACHE_SHIFT;
- unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
- u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
- pgoff_t curr;
- struct page *page;
- unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
- unsigned int from, to;
-
- if (!end_offset)
- end_offset = PAGE_CACHE_SIZE;
+ loff_t size = len;
+ unsigned int nr_blks;
+ sector_t lblock = offset >> inode->i_blkbits;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (unlikely(error))
- goto out;
+ return error;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
goto out;
}
- curr = start;
- offset = start << PAGE_CACHE_SHIFT;
- from = start_offset;
- to = PAGE_CACHE_SIZE;
- while (curr <= end) {
- page = grab_cache_page_write_begin(inode->i_mapping, curr,
- AOP_FLAG_NOFS);
- if (unlikely(!page)) {
- error = -ENOMEM;
- goto out;
- }
+ while (len) {
+ struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
+ bh_map.b_size = len;
+ set_buffer_zeronew(&bh_map);
- if (curr == end)
- to = end_offset;
- error = write_empty_blocks(page, from, to, mode);
- if (!error && offset + to > inode->i_size &&
- !(mode & FALLOC_FL_KEEP_SIZE)) {
- i_size_write(inode, offset + to);
- }
- unlock_page(page);
- page_cache_release(page);
- if (error)
+ error = gfs2_block_map(inode, lblock, &bh_map, 1);
+ if (unlikely(error))
+ goto out;
+ len -= bh_map.b_size;
+ nr_blks = bh_map.b_size >> inode->i_blkbits;
+ lblock += nr_blks;
+ if (!buffer_new(&bh_map))
+ continue;
+ if (unlikely(!buffer_zeronew(&bh_map))) {
+ error = -EIO;
goto out;
- curr++;
- offset += PAGE_CACHE_SIZE;
- from = 0;
+ }
}
+ if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
+ i_size_write(inode, offset + size);
- gfs2_dinode_out(ip, dibh->b_data);
mark_inode_dirty(inode);
- brelse(dibh);
-
out:
+ brelse(dibh);
return error;
}
unsigned int *data_blocks, unsigned int *ind_blocks)
{
const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
+ unsigned int max_blocks = ip->i_rgd->rd_free_clone;
unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
for (tmp = max_data; tmp > sdp->sd_diptrs;) {
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
- struct gfs2_alloc *al;
int error;
+ const loff_t pos = offset;
+ const loff_t count = len;
+ loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
+ loff_t max_chunk_size = UINT_MAX & bsize_mask;
next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
/* We only support the FALLOC_FL_KEEP_SIZE mode */
if (mode & ~FALLOC_FL_KEEP_SIZE)
return -EOPNOTSUPP;
- offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
- sdp->sd_sb.sb_bsize_shift;
+ offset &= bsize_mask;
len = next - offset;
bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
if (!bytes)
bytes = UINT_MAX;
+ bytes &= bsize_mask;
+ if (bytes == 0)
+ bytes = sdp->sd_sb.sb_bsize;
+
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ return error;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
error = gfs2_glock_nq(&ip->i_gh);
if (unlikely(error))
goto out_uninit;
- if (!gfs2_write_alloc_required(ip, offset, len))
- goto out_unlock;
+ gfs2_size_hint(file, offset, len);
while (len > 0) {
if (len < bytes)
bytes = len;
- al = gfs2_alloc_get(ip);
- if (!al) {
- error = -ENOMEM;
- goto out_unlock;
+ if (!gfs2_write_alloc_required(ip, offset, bytes)) {
+ len -= bytes;
+ offset += bytes;
+ continue;
}
-
error = gfs2_quota_lock_check(ip);
if (error)
- goto out_alloc_put;
+ goto out_unlock;
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
- al->al_requested = data_blocks + ind_blocks;
- error = gfs2_inplace_reserve(ip);
+ error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error) {
if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
bytes >>= 1;
+ bytes &= bsize_mask;
+ if (bytes == 0)
+ bytes = sdp->sd_sb.sb_bsize;
goto retry;
}
goto out_qunlock;
}
max_bytes = bytes;
- calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
- al->al_requested = data_blocks + ind_blocks;
+ calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
+ &max_bytes, &data_blocks, &ind_blocks);
rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
- RES_RG_HDR + gfs2_rg_blocks(al);
+ RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
if (gfs2_is_jdata(ip))
rblocks += data_blocks ? data_blocks : 1;
offset += max_bytes;
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
- gfs2_alloc_put(ip);
}
+
+ if (error == 0)
+ error = generic_write_sync(file, pos, count);
goto out_unlock;
out_trans_fail:
gfs2_inplace_release(ip);
out_qunlock:
gfs2_quota_unlock(ip);
-out_alloc_put:
- gfs2_alloc_put(ip);
out_unlock:
gfs2_glock_dq(&ip->i_gh);
out_uninit:
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
- .release = gfs2_close,
+ .release = gfs2_release,
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
.readdir = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
.open = gfs2_open,
- .release = gfs2_close,
+ .release = gfs2_release,
.fsync = gfs2_fsync,
.lock = gfs2_lock,
.flock = gfs2_flock,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
- .release = gfs2_close,
+ .release = gfs2_release,
.fsync = gfs2_fsync,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
.readdir = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
.open = gfs2_open,
- .release = gfs2_close,
+ .release = gfs2_release,
.fsync = gfs2_fsync,
.llseek = default_llseek,
};