]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - fs/cifs/file.c
Merge branch 'rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[linux-2.6.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  *
6  *   Copyright (C) International Business Machines  Corp., 2002,2010
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
37 #include "cifsfs.h"
38 #include "cifspdu.h"
39 #include "cifsglob.h"
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
44 #include "fscache.h"
45
46 static inline int cifs_convert_flags(unsigned int flags)
47 {
48         if ((flags & O_ACCMODE) == O_RDONLY)
49                 return GENERIC_READ;
50         else if ((flags & O_ACCMODE) == O_WRONLY)
51                 return GENERIC_WRITE;
52         else if ((flags & O_ACCMODE) == O_RDWR) {
53                 /* GENERIC_ALL is too much permission to request
54                    can cause unnecessary access denied on create */
55                 /* return GENERIC_ALL; */
56                 return (GENERIC_READ | GENERIC_WRITE);
57         }
58
59         return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60                 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61                 FILE_READ_DATA);
62 }
63
64 static u32 cifs_posix_convert_flags(unsigned int flags)
65 {
66         u32 posix_flags = 0;
67
68         if ((flags & O_ACCMODE) == O_RDONLY)
69                 posix_flags = SMB_O_RDONLY;
70         else if ((flags & O_ACCMODE) == O_WRONLY)
71                 posix_flags = SMB_O_WRONLY;
72         else if ((flags & O_ACCMODE) == O_RDWR)
73                 posix_flags = SMB_O_RDWR;
74
75         if (flags & O_CREAT)
76                 posix_flags |= SMB_O_CREAT;
77         if (flags & O_EXCL)
78                 posix_flags |= SMB_O_EXCL;
79         if (flags & O_TRUNC)
80                 posix_flags |= SMB_O_TRUNC;
81         /* be safe and imply O_SYNC for O_DSYNC */
82         if (flags & O_DSYNC)
83                 posix_flags |= SMB_O_SYNC;
84         if (flags & O_DIRECTORY)
85                 posix_flags |= SMB_O_DIRECTORY;
86         if (flags & O_NOFOLLOW)
87                 posix_flags |= SMB_O_NOFOLLOW;
88         if (flags & O_DIRECT)
89                 posix_flags |= SMB_O_DIRECT;
90
91         return posix_flags;
92 }
93
94 static inline int cifs_get_disposition(unsigned int flags)
95 {
96         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97                 return FILE_CREATE;
98         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99                 return FILE_OVERWRITE_IF;
100         else if ((flags & O_CREAT) == O_CREAT)
101                 return FILE_OPEN_IF;
102         else if ((flags & O_TRUNC) == O_TRUNC)
103                 return FILE_OVERWRITE;
104         else
105                 return FILE_OPEN;
106 }
107
108 int cifs_posix_open(char *full_path, struct inode **pinode,
109                         struct super_block *sb, int mode, unsigned int f_flags,
110                         __u32 *poplock, __u16 *pnetfid, int xid)
111 {
112         int rc;
113         FILE_UNIX_BASIC_INFO *presp_data;
114         __u32 posix_flags = 0;
115         struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116         struct cifs_fattr fattr;
117         struct tcon_link *tlink;
118         struct cifs_tcon *tcon;
119
120         cFYI(1, "posix open %s", full_path);
121
122         presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123         if (presp_data == NULL)
124                 return -ENOMEM;
125
126         tlink = cifs_sb_tlink(cifs_sb);
127         if (IS_ERR(tlink)) {
128                 rc = PTR_ERR(tlink);
129                 goto posix_open_ret;
130         }
131
132         tcon = tlink_tcon(tlink);
133         mode &= ~current_umask();
134
135         posix_flags = cifs_posix_convert_flags(f_flags);
136         rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137                              poplock, full_path, cifs_sb->local_nls,
138                              cifs_sb->mnt_cifs_flags &
139                                         CIFS_MOUNT_MAP_SPECIAL_CHR);
140         cifs_put_tlink(tlink);
141
142         if (rc)
143                 goto posix_open_ret;
144
145         if (presp_data->Type == cpu_to_le32(-1))
146                 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148         if (!pinode)
149                 goto posix_open_ret; /* caller does not need info */
150
151         cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153         /* get new inode and set it up */
154         if (*pinode == NULL) {
155                 cifs_fill_uniqueid(sb, &fattr);
156                 *pinode = cifs_iget(sb, &fattr);
157                 if (!*pinode) {
158                         rc = -ENOMEM;
159                         goto posix_open_ret;
160                 }
161         } else {
162                 cifs_fattr_to_inode(*pinode, &fattr);
163         }
164
165 posix_open_ret:
166         kfree(presp_data);
167         return rc;
168 }
169
170 static int
171 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
172              struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
173              __u16 *pnetfid, int xid)
174 {
175         int rc;
176         int desiredAccess;
177         int disposition;
178         int create_options = CREATE_NOT_DIR;
179         FILE_ALL_INFO *buf;
180
181         desiredAccess = cifs_convert_flags(f_flags);
182
183 /*********************************************************************
184  *  open flag mapping table:
185  *
186  *      POSIX Flag            CIFS Disposition
187  *      ----------            ----------------
188  *      O_CREAT               FILE_OPEN_IF
189  *      O_CREAT | O_EXCL      FILE_CREATE
190  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
191  *      O_TRUNC               FILE_OVERWRITE
192  *      none of the above     FILE_OPEN
193  *
194  *      Note that there is not a direct match between disposition
195  *      FILE_SUPERSEDE (ie create whether or not file exists although
196  *      O_CREAT | O_TRUNC is similar but truncates the existing
197  *      file rather than creating a new file as FILE_SUPERSEDE does
198  *      (which uses the attributes / metadata passed in on open call)
199  *?
200  *?  O_SYNC is a reasonable match to CIFS writethrough flag
201  *?  and the read write flags match reasonably.  O_LARGEFILE
202  *?  is irrelevant because largefile support is always used
203  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
204  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
205  *********************************************************************/
206
207         disposition = cifs_get_disposition(f_flags);
208
209         /* BB pass O_SYNC flag through on file attributes .. BB */
210
211         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
212         if (!buf)
213                 return -ENOMEM;
214
215         if (backup_cred(cifs_sb))
216                 create_options |= CREATE_OPEN_BACKUP_INTENT;
217
218         if (tcon->ses->capabilities & CAP_NT_SMBS)
219                 rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
220                          desiredAccess, create_options, pnetfid, poplock, buf,
221                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
222                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
223         else
224                 rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
225                         desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf,
226                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
227                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
228
229         if (rc)
230                 goto out;
231
232         if (tcon->unix_ext)
233                 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
234                                               xid);
235         else
236                 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
237                                          xid, pnetfid);
238
239 out:
240         kfree(buf);
241         return rc;
242 }
243
244 struct cifsFileInfo *
245 cifs_new_fileinfo(__u16 fileHandle, struct file *file,
246                   struct tcon_link *tlink, __u32 oplock)
247 {
248         struct dentry *dentry = file->f_path.dentry;
249         struct inode *inode = dentry->d_inode;
250         struct cifsInodeInfo *pCifsInode = CIFS_I(inode);
251         struct cifsFileInfo *pCifsFile;
252
253         pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
254         if (pCifsFile == NULL)
255                 return pCifsFile;
256
257         pCifsFile->count = 1;
258         pCifsFile->netfid = fileHandle;
259         pCifsFile->pid = current->tgid;
260         pCifsFile->uid = current_fsuid();
261         pCifsFile->dentry = dget(dentry);
262         pCifsFile->f_flags = file->f_flags;
263         pCifsFile->invalidHandle = false;
264         pCifsFile->tlink = cifs_get_tlink(tlink);
265         mutex_init(&pCifsFile->fh_mutex);
266         INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267
268         spin_lock(&cifs_file_list_lock);
269         list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
270         /* if readable file instance put first in list*/
271         if (file->f_mode & FMODE_READ)
272                 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
273         else
274                 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
275         spin_unlock(&cifs_file_list_lock);
276
277         cifs_set_oplock_level(pCifsInode, oplock);
278         pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
279
280         file->private_data = pCifsFile;
281         return pCifsFile;
282 }
283
284 static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
285
286 /*
287  * Release a reference on the file private data. This may involve closing
288  * the filehandle out on the server. Must be called without holding
289  * cifs_file_list_lock.
290  */
291 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
292 {
293         struct inode *inode = cifs_file->dentry->d_inode;
294         struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
295         struct cifsInodeInfo *cifsi = CIFS_I(inode);
296         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
297         struct cifsLockInfo *li, *tmp;
298
299         spin_lock(&cifs_file_list_lock);
300         if (--cifs_file->count > 0) {
301                 spin_unlock(&cifs_file_list_lock);
302                 return;
303         }
304
305         /* remove it from the lists */
306         list_del(&cifs_file->flist);
307         list_del(&cifs_file->tlist);
308
309         if (list_empty(&cifsi->openFileList)) {
310                 cFYI(1, "closing last open instance for inode %p",
311                         cifs_file->dentry->d_inode);
312
313                 /* in strict cache mode we need invalidate mapping on the last
314                    close  because it may cause a error when we open this file
315                    again and get at least level II oplock */
316                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
317                         CIFS_I(inode)->invalid_mapping = true;
318
319                 cifs_set_oplock_level(cifsi, 0);
320         }
321         spin_unlock(&cifs_file_list_lock);
322
323         cancel_work_sync(&cifs_file->oplock_break);
324
325         if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
326                 int xid, rc;
327
328                 xid = GetXid();
329                 rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
330                 FreeXid(xid);
331         }
332
333         /* Delete any outstanding lock records. We'll lose them when the file
334          * is closed anyway.
335          */
336         mutex_lock(&cifsi->lock_mutex);
337         list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
338                 if (li->netfid != cifs_file->netfid)
339                         continue;
340                 list_del(&li->llist);
341                 cifs_del_lock_waiters(li);
342                 kfree(li);
343         }
344         mutex_unlock(&cifsi->lock_mutex);
345
346         cifs_put_tlink(cifs_file->tlink);
347         dput(cifs_file->dentry);
348         kfree(cifs_file);
349 }
350
351 int cifs_open(struct inode *inode, struct file *file)
352 {
353         int rc = -EACCES;
354         int xid;
355         __u32 oplock;
356         struct cifs_sb_info *cifs_sb;
357         struct cifs_tcon *tcon;
358         struct tcon_link *tlink;
359         struct cifsFileInfo *pCifsFile = NULL;
360         char *full_path = NULL;
361         bool posix_open_ok = false;
362         __u16 netfid;
363
364         xid = GetXid();
365
366         cifs_sb = CIFS_SB(inode->i_sb);
367         tlink = cifs_sb_tlink(cifs_sb);
368         if (IS_ERR(tlink)) {
369                 FreeXid(xid);
370                 return PTR_ERR(tlink);
371         }
372         tcon = tlink_tcon(tlink);
373
374         full_path = build_path_from_dentry(file->f_path.dentry);
375         if (full_path == NULL) {
376                 rc = -ENOMEM;
377                 goto out;
378         }
379
380         cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
381                  inode, file->f_flags, full_path);
382
383         if (tcon->ses->server->oplocks)
384                 oplock = REQ_OPLOCK;
385         else
386                 oplock = 0;
387
388         if (!tcon->broken_posix_open && tcon->unix_ext &&
389             (tcon->ses->capabilities & CAP_UNIX) &&
390             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
391                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
392                 /* can not refresh inode info since size could be stale */
393                 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
394                                 cifs_sb->mnt_file_mode /* ignored */,
395                                 file->f_flags, &oplock, &netfid, xid);
396                 if (rc == 0) {
397                         cFYI(1, "posix open succeeded");
398                         posix_open_ok = true;
399                 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
400                         if (tcon->ses->serverNOS)
401                                 cERROR(1, "server %s of type %s returned"
402                                            " unexpected error on SMB posix open"
403                                            ", disabling posix open support."
404                                            " Check if server update available.",
405                                            tcon->ses->serverName,
406                                            tcon->ses->serverNOS);
407                         tcon->broken_posix_open = true;
408                 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
409                          (rc != -EOPNOTSUPP)) /* path not found or net err */
410                         goto out;
411                 /* else fallthrough to retry open the old way on network i/o
412                    or DFS errors */
413         }
414
415         if (!posix_open_ok) {
416                 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
417                                   file->f_flags, &oplock, &netfid, xid);
418                 if (rc)
419                         goto out;
420         }
421
422         pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock);
423         if (pCifsFile == NULL) {
424                 CIFSSMBClose(xid, tcon, netfid);
425                 rc = -ENOMEM;
426                 goto out;
427         }
428
429         cifs_fscache_set_inode_cookie(inode, file);
430
431         if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
432                 /* time to set mode which we can not set earlier due to
433                    problems creating new read-only files */
434                 struct cifs_unix_set_info_args args = {
435                         .mode   = inode->i_mode,
436                         .uid    = NO_CHANGE_64,
437                         .gid    = NO_CHANGE_64,
438                         .ctime  = NO_CHANGE_64,
439                         .atime  = NO_CHANGE_64,
440                         .mtime  = NO_CHANGE_64,
441                         .device = 0,
442                 };
443                 CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid,
444                                         pCifsFile->pid);
445         }
446
447 out:
448         kfree(full_path);
449         FreeXid(xid);
450         cifs_put_tlink(tlink);
451         return rc;
452 }
453
454 /* Try to reacquire byte range locks that were released when session */
455 /* to server was lost */
456 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
457 {
458         int rc = 0;
459
460 /* BB list all locks open on this file and relock */
461
462         return rc;
463 }
464
465 static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
466 {
467         int rc = -EACCES;
468         int xid;
469         __u32 oplock;
470         struct cifs_sb_info *cifs_sb;
471         struct cifs_tcon *tcon;
472         struct cifsInodeInfo *pCifsInode;
473         struct inode *inode;
474         char *full_path = NULL;
475         int desiredAccess;
476         int disposition = FILE_OPEN;
477         int create_options = CREATE_NOT_DIR;
478         __u16 netfid;
479
480         xid = GetXid();
481         mutex_lock(&pCifsFile->fh_mutex);
482         if (!pCifsFile->invalidHandle) {
483                 mutex_unlock(&pCifsFile->fh_mutex);
484                 rc = 0;
485                 FreeXid(xid);
486                 return rc;
487         }
488
489         inode = pCifsFile->dentry->d_inode;
490         cifs_sb = CIFS_SB(inode->i_sb);
491         tcon = tlink_tcon(pCifsFile->tlink);
492
493 /* can not grab rename sem here because various ops, including
494    those that already have the rename sem can end up causing writepage
495    to get called and if the server was down that means we end up here,
496    and we can never tell if the caller already has the rename_sem */
497         full_path = build_path_from_dentry(pCifsFile->dentry);
498         if (full_path == NULL) {
499                 rc = -ENOMEM;
500                 mutex_unlock(&pCifsFile->fh_mutex);
501                 FreeXid(xid);
502                 return rc;
503         }
504
505         cFYI(1, "inode = 0x%p file flags 0x%x for %s",
506                  inode, pCifsFile->f_flags, full_path);
507
508         if (tcon->ses->server->oplocks)
509                 oplock = REQ_OPLOCK;
510         else
511                 oplock = 0;
512
513         if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
514             (CIFS_UNIX_POSIX_PATH_OPS_CAP &
515                         le64_to_cpu(tcon->fsUnixInfo.Capability))) {
516
517                 /*
518                  * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
519                  * original open. Must mask them off for a reopen.
520                  */
521                 unsigned int oflags = pCifsFile->f_flags &
522                                                 ~(O_CREAT | O_EXCL | O_TRUNC);
523
524                 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
525                                 cifs_sb->mnt_file_mode /* ignored */,
526                                 oflags, &oplock, &netfid, xid);
527                 if (rc == 0) {
528                         cFYI(1, "posix reopen succeeded");
529                         goto reopen_success;
530                 }
531                 /* fallthrough to retry open the old way on errors, especially
532                    in the reconnect path it is important to retry hard */
533         }
534
535         desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
536
537         if (backup_cred(cifs_sb))
538                 create_options |= CREATE_OPEN_BACKUP_INTENT;
539
540         /* Can not refresh inode by passing in file_info buf to be returned
541            by SMBOpen and then calling get_inode_info with returned buf
542            since file might have write behind data that needs to be flushed
543            and server version of file size can be stale. If we knew for sure
544            that inode was not dirty locally we could do this */
545
546         rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
547                          create_options, &netfid, &oplock, NULL,
548                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
549                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
550         if (rc) {
551                 mutex_unlock(&pCifsFile->fh_mutex);
552                 cFYI(1, "cifs_open returned 0x%x", rc);
553                 cFYI(1, "oplock: %d", oplock);
554                 goto reopen_error_exit;
555         }
556
557 reopen_success:
558         pCifsFile->netfid = netfid;
559         pCifsFile->invalidHandle = false;
560         mutex_unlock(&pCifsFile->fh_mutex);
561         pCifsInode = CIFS_I(inode);
562
563         if (can_flush) {
564                 rc = filemap_write_and_wait(inode->i_mapping);
565                 mapping_set_error(inode->i_mapping, rc);
566
567                 if (tcon->unix_ext)
568                         rc = cifs_get_inode_info_unix(&inode,
569                                 full_path, inode->i_sb, xid);
570                 else
571                         rc = cifs_get_inode_info(&inode,
572                                 full_path, NULL, inode->i_sb,
573                                 xid, NULL);
574         } /* else we are writing out data to server already
575              and could deadlock if we tried to flush data, and
576              since we do not know if we have data that would
577              invalidate the current end of file on the server
578              we can not go to the server to get the new inod
579              info */
580
581         cifs_set_oplock_level(pCifsInode, oplock);
582
583         cifs_relock_file(pCifsFile);
584
585 reopen_error_exit:
586         kfree(full_path);
587         FreeXid(xid);
588         return rc;
589 }
590
591 int cifs_close(struct inode *inode, struct file *file)
592 {
593         if (file->private_data != NULL) {
594                 cifsFileInfo_put(file->private_data);
595                 file->private_data = NULL;
596         }
597
598         /* return code from the ->release op is always ignored */
599         return 0;
600 }
601
602 int cifs_closedir(struct inode *inode, struct file *file)
603 {
604         int rc = 0;
605         int xid;
606         struct cifsFileInfo *pCFileStruct = file->private_data;
607         char *ptmp;
608
609         cFYI(1, "Closedir inode = 0x%p", inode);
610
611         xid = GetXid();
612
613         if (pCFileStruct) {
614                 struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
615
616                 cFYI(1, "Freeing private data in close dir");
617                 spin_lock(&cifs_file_list_lock);
618                 if (!pCFileStruct->srch_inf.endOfSearch &&
619                     !pCFileStruct->invalidHandle) {
620                         pCFileStruct->invalidHandle = true;
621                         spin_unlock(&cifs_file_list_lock);
622                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
623                         cFYI(1, "Closing uncompleted readdir with rc %d",
624                                  rc);
625                         /* not much we can do if it fails anyway, ignore rc */
626                         rc = 0;
627                 } else
628                         spin_unlock(&cifs_file_list_lock);
629                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
630                 if (ptmp) {
631                         cFYI(1, "closedir free smb buf in srch struct");
632                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
633                         if (pCFileStruct->srch_inf.smallBuf)
634                                 cifs_small_buf_release(ptmp);
635                         else
636                                 cifs_buf_release(ptmp);
637                 }
638                 cifs_put_tlink(pCFileStruct->tlink);
639                 kfree(file->private_data);
640                 file->private_data = NULL;
641         }
642         /* BB can we lock the filestruct while this is going on? */
643         FreeXid(xid);
644         return rc;
645 }
646
647 static struct cifsLockInfo *
648 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
649 {
650         struct cifsLockInfo *lock =
651                 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
652         if (!lock)
653                 return lock;
654         lock->offset = offset;
655         lock->length = length;
656         lock->type = type;
657         lock->netfid = netfid;
658         lock->pid = current->tgid;
659         INIT_LIST_HEAD(&lock->blist);
660         init_waitqueue_head(&lock->block_q);
661         return lock;
662 }
663
664 static void
665 cifs_del_lock_waiters(struct cifsLockInfo *lock)
666 {
667         struct cifsLockInfo *li, *tmp;
668         list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
669                 list_del_init(&li->blist);
670                 wake_up(&li->block_q);
671         }
672 }
673
674 static bool
675 __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
676                         __u64 length, __u8 type, __u16 netfid,
677                         struct cifsLockInfo **conf_lock)
678 {
679         struct cifsLockInfo *li, *tmp;
680
681         list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
682                 if (offset + length <= li->offset ||
683                     offset >= li->offset + li->length)
684                         continue;
685                 else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
686                          ((netfid == li->netfid && current->tgid == li->pid) ||
687                           type == li->type))
688                         continue;
689                 else {
690                         *conf_lock = li;
691                         return true;
692                 }
693         }
694         return false;
695 }
696
697 static bool
698 cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
699                         struct cifsLockInfo **conf_lock)
700 {
701         return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
702                                          lock->type, lock->netfid, conf_lock);
703 }
704
705 /*
706  * Check if there is another lock that prevents us to set the lock (mandatory
707  * style). If such a lock exists, update the flock structure with its
708  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
709  * or leave it the same if we can't. Returns 0 if we don't need to request to
710  * the server or 1 otherwise.
711  */
712 static int
713 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
714                __u8 type, __u16 netfid, struct file_lock *flock)
715 {
716         int rc = 0;
717         struct cifsLockInfo *conf_lock;
718         bool exist;
719
720         mutex_lock(&cinode->lock_mutex);
721
722         exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
723                                           &conf_lock);
724         if (exist) {
725                 flock->fl_start = conf_lock->offset;
726                 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
727                 flock->fl_pid = conf_lock->pid;
728                 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
729                         flock->fl_type = F_RDLCK;
730                 else
731                         flock->fl_type = F_WRLCK;
732         } else if (!cinode->can_cache_brlcks)
733                 rc = 1;
734         else
735                 flock->fl_type = F_UNLCK;
736
737         mutex_unlock(&cinode->lock_mutex);
738         return rc;
739 }
740
741 static void
742 cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
743 {
744         mutex_lock(&cinode->lock_mutex);
745         list_add_tail(&lock->llist, &cinode->llist);
746         mutex_unlock(&cinode->lock_mutex);
747 }
748
749 /*
750  * Set the byte-range lock (mandatory style). Returns:
751  * 1) 0, if we set the lock and don't need to request to the server;
752  * 2) 1, if no locks prevent us but we need to request to the server;
753  * 3) -EACCESS, if there is a lock that prevents us and wait is false.
754  */
755 static int
756 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
757                  bool wait)
758 {
759         struct cifsLockInfo *conf_lock;
760         bool exist;
761         int rc = 0;
762
763 try_again:
764         exist = false;
765         mutex_lock(&cinode->lock_mutex);
766
767         exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
768         if (!exist && cinode->can_cache_brlcks) {
769                 list_add_tail(&lock->llist, &cinode->llist);
770                 mutex_unlock(&cinode->lock_mutex);
771                 return rc;
772         }
773
774         if (!exist)
775                 rc = 1;
776         else if (!wait)
777                 rc = -EACCES;
778         else {
779                 list_add_tail(&lock->blist, &conf_lock->blist);
780                 mutex_unlock(&cinode->lock_mutex);
781                 rc = wait_event_interruptible(lock->block_q,
782                                         (lock->blist.prev == &lock->blist) &&
783                                         (lock->blist.next == &lock->blist));
784                 if (!rc)
785                         goto try_again;
786                 mutex_lock(&cinode->lock_mutex);
787                 list_del_init(&lock->blist);
788         }
789
790         mutex_unlock(&cinode->lock_mutex);
791         return rc;
792 }
793
794 /*
795  * Check if there is another lock that prevents us to set the lock (posix
796  * style). If such a lock exists, update the flock structure with its
797  * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
798  * or leave it the same if we can't. Returns 0 if we don't need to request to
799  * the server or 1 otherwise.
800  */
801 static int
802 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
803 {
804         int rc = 0;
805         struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
806         unsigned char saved_type = flock->fl_type;
807
808         if ((flock->fl_flags & FL_POSIX) == 0)
809                 return 1;
810
811         mutex_lock(&cinode->lock_mutex);
812         posix_test_lock(file, flock);
813
814         if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
815                 flock->fl_type = saved_type;
816                 rc = 1;
817         }
818
819         mutex_unlock(&cinode->lock_mutex);
820         return rc;
821 }
822
823 /*
824  * Set the byte-range lock (posix style). Returns:
825  * 1) 0, if we set the lock and don't need to request to the server;
826  * 2) 1, if we need to request to the server;
827  * 3) <0, if the error occurs while setting the lock.
828  */
829 static int
830 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
831 {
832         struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
833         int rc = 1;
834
835         if ((flock->fl_flags & FL_POSIX) == 0)
836                 return rc;
837
838 try_again:
839         mutex_lock(&cinode->lock_mutex);
840         if (!cinode->can_cache_brlcks) {
841                 mutex_unlock(&cinode->lock_mutex);
842                 return rc;
843         }
844
845         rc = posix_lock_file(file, flock, NULL);
846         mutex_unlock(&cinode->lock_mutex);
847         if (rc == FILE_LOCK_DEFERRED) {
848                 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
849                 if (!rc)
850                         goto try_again;
851                 locks_delete_block(flock);
852         }
853         return rc;
854 }
855
856 static int
857 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
858 {
859         int xid, rc = 0, stored_rc;
860         struct cifsLockInfo *li, *tmp;
861         struct cifs_tcon *tcon;
862         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
863         unsigned int num, max_num;
864         LOCKING_ANDX_RANGE *buf, *cur;
865         int types[] = {LOCKING_ANDX_LARGE_FILES,
866                        LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
867         int i;
868
869         xid = GetXid();
870         tcon = tlink_tcon(cfile->tlink);
871
872         mutex_lock(&cinode->lock_mutex);
873         if (!cinode->can_cache_brlcks) {
874                 mutex_unlock(&cinode->lock_mutex);
875                 FreeXid(xid);
876                 return rc;
877         }
878
879         max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
880                   sizeof(LOCKING_ANDX_RANGE);
881         buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
882         if (!buf) {
883                 mutex_unlock(&cinode->lock_mutex);
884                 FreeXid(xid);
885                 return rc;
886         }
887
888         for (i = 0; i < 2; i++) {
889                 cur = buf;
890                 num = 0;
891                 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
892                         if (li->type != types[i])
893                                 continue;
894                         cur->Pid = cpu_to_le16(li->pid);
895                         cur->LengthLow = cpu_to_le32((u32)li->length);
896                         cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
897                         cur->OffsetLow = cpu_to_le32((u32)li->offset);
898                         cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
899                         if (++num == max_num) {
900                                 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
901                                                        li->type, 0, num, buf);
902                                 if (stored_rc)
903                                         rc = stored_rc;
904                                 cur = buf;
905                                 num = 0;
906                         } else
907                                 cur++;
908                 }
909
910                 if (num) {
911                         stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
912                                                types[i], 0, num, buf);
913                         if (stored_rc)
914                                 rc = stored_rc;
915                 }
916         }
917
918         cinode->can_cache_brlcks = false;
919         mutex_unlock(&cinode->lock_mutex);
920
921         kfree(buf);
922         FreeXid(xid);
923         return rc;
924 }
925
926 /* copied from fs/locks.c with a name change */
927 #define cifs_for_each_lock(inode, lockp) \
928         for (lockp = &inode->i_flock; *lockp != NULL; \
929              lockp = &(*lockp)->fl_next)
930
931 struct lock_to_push {
932         struct list_head llist;
933         __u64 offset;
934         __u64 length;
935         __u32 pid;
936         __u16 netfid;
937         __u8 type;
938 };
939
940 static int
941 cifs_push_posix_locks(struct cifsFileInfo *cfile)
942 {
943         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
944         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
945         struct file_lock *flock, **before;
946         unsigned int count = 0, i = 0;
947         int rc = 0, xid, type;
948         struct list_head locks_to_send, *el;
949         struct lock_to_push *lck, *tmp;
950         __u64 length;
951
952         xid = GetXid();
953
954         mutex_lock(&cinode->lock_mutex);
955         if (!cinode->can_cache_brlcks) {
956                 mutex_unlock(&cinode->lock_mutex);
957                 FreeXid(xid);
958                 return rc;
959         }
960
961         lock_flocks();
962         cifs_for_each_lock(cfile->dentry->d_inode, before) {
963                 if ((*before)->fl_flags & FL_POSIX)
964                         count++;
965         }
966         unlock_flocks();
967
968         INIT_LIST_HEAD(&locks_to_send);
969
970         /*
971          * Allocating count locks is enough because no FL_POSIX locks can be
972          * added to the list while we are holding cinode->lock_mutex that
973          * protects locking operations of this inode.
974          */
975         for (; i < count; i++) {
976                 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
977                 if (!lck) {
978                         rc = -ENOMEM;
979                         goto err_out;
980                 }
981                 list_add_tail(&lck->llist, &locks_to_send);
982         }
983
984         el = locks_to_send.next;
985         lock_flocks();
986         cifs_for_each_lock(cfile->dentry->d_inode, before) {
987                 flock = *before;
988                 if ((flock->fl_flags & FL_POSIX) == 0)
989                         continue;
990                 if (el == &locks_to_send) {
991                         /*
992                          * The list ended. We don't have enough allocated
993                          * structures - something is really wrong.
994                          */
995                         cERROR(1, "Can't push all brlocks!");
996                         break;
997                 }
998                 length = 1 + flock->fl_end - flock->fl_start;
999                 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1000                         type = CIFS_RDLCK;
1001                 else
1002                         type = CIFS_WRLCK;
1003                 lck = list_entry(el, struct lock_to_push, llist);
1004                 lck->pid = flock->fl_pid;
1005                 lck->netfid = cfile->netfid;
1006                 lck->length = length;
1007                 lck->type = type;
1008                 lck->offset = flock->fl_start;
1009                 el = el->next;
1010         }
1011         unlock_flocks();
1012
1013         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1014                 struct file_lock tmp_lock;
1015                 int stored_rc;
1016
1017                 tmp_lock.fl_start = lck->offset;
1018                 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1019                                              0, lck->length, &tmp_lock,
1020                                              lck->type, 0);
1021                 if (stored_rc)
1022                         rc = stored_rc;
1023                 list_del(&lck->llist);
1024                 kfree(lck);
1025         }
1026
1027 out:
1028         cinode->can_cache_brlcks = false;
1029         mutex_unlock(&cinode->lock_mutex);
1030
1031         FreeXid(xid);
1032         return rc;
1033 err_out:
1034         list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1035                 list_del(&lck->llist);
1036                 kfree(lck);
1037         }
1038         goto out;
1039 }
1040
1041 static int
1042 cifs_push_locks(struct cifsFileInfo *cfile)
1043 {
1044         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1045         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1046
1047         if ((tcon->ses->capabilities & CAP_UNIX) &&
1048             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1049             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1050                 return cifs_push_posix_locks(cfile);
1051
1052         return cifs_push_mandatory_locks(cfile);
1053 }
1054
1055 static void
1056 cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1057                 bool *wait_flag)
1058 {
1059         if (flock->fl_flags & FL_POSIX)
1060                 cFYI(1, "Posix");
1061         if (flock->fl_flags & FL_FLOCK)
1062                 cFYI(1, "Flock");
1063         if (flock->fl_flags & FL_SLEEP) {
1064                 cFYI(1, "Blocking lock");
1065                 *wait_flag = true;
1066         }
1067         if (flock->fl_flags & FL_ACCESS)
1068                 cFYI(1, "Process suspended by mandatory locking - "
1069                         "not implemented yet");
1070         if (flock->fl_flags & FL_LEASE)
1071                 cFYI(1, "Lease on file - not implemented yet");
1072         if (flock->fl_flags &
1073             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1074                 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1075
1076         *type = LOCKING_ANDX_LARGE_FILES;
1077         if (flock->fl_type == F_WRLCK) {
1078                 cFYI(1, "F_WRLCK ");
1079                 *lock = 1;
1080         } else if (flock->fl_type == F_UNLCK) {
1081                 cFYI(1, "F_UNLCK");
1082                 *unlock = 1;
1083                 /* Check if unlock includes more than one lock range */
1084         } else if (flock->fl_type == F_RDLCK) {
1085                 cFYI(1, "F_RDLCK");
1086                 *type |= LOCKING_ANDX_SHARED_LOCK;
1087                 *lock = 1;
1088         } else if (flock->fl_type == F_EXLCK) {
1089                 cFYI(1, "F_EXLCK");
1090                 *lock = 1;
1091         } else if (flock->fl_type == F_SHLCK) {
1092                 cFYI(1, "F_SHLCK");
1093                 *type |= LOCKING_ANDX_SHARED_LOCK;
1094                 *lock = 1;
1095         } else
1096                 cFYI(1, "Unknown type of lock");
1097 }
1098
1099 static int
1100 cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1101            bool wait_flag, bool posix_lck, int xid)
1102 {
1103         int rc = 0;
1104         __u64 length = 1 + flock->fl_end - flock->fl_start;
1105         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1106         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1107         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1108         __u16 netfid = cfile->netfid;
1109
1110         if (posix_lck) {
1111                 int posix_lock_type;
1112
1113                 rc = cifs_posix_lock_test(file, flock);
1114                 if (!rc)
1115                         return rc;
1116
1117                 if (type & LOCKING_ANDX_SHARED_LOCK)
1118                         posix_lock_type = CIFS_RDLCK;
1119                 else
1120                         posix_lock_type = CIFS_WRLCK;
1121                 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1122                                       1 /* get */, length, flock,
1123                                       posix_lock_type, wait_flag);
1124                 return rc;
1125         }
1126
1127         rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
1128                             flock);
1129         if (!rc)
1130                 return rc;
1131
1132         /* BB we could chain these into one lock request BB */
1133         rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1134                          flock->fl_start, 0, 1, type, 0, 0);
1135         if (rc == 0) {
1136                 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1137                                  length, flock->fl_start, 1, 0,
1138                                  type, 0, 0);
1139                 flock->fl_type = F_UNLCK;
1140                 if (rc != 0)
1141                         cERROR(1, "Error unlocking previously locked "
1142                                    "range %d during test of lock", rc);
1143                 return 0;
1144         }
1145
1146         if (type & LOCKING_ANDX_SHARED_LOCK) {
1147                 flock->fl_type = F_WRLCK;
1148                 return 0;
1149         }
1150
1151         rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1152                          flock->fl_start, 0, 1,
1153                          type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
1154         if (rc == 0) {
1155                 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
1156                                  length, flock->fl_start, 1, 0,
1157                                  type | LOCKING_ANDX_SHARED_LOCK,
1158                                  0, 0);
1159                 flock->fl_type = F_RDLCK;
1160                 if (rc != 0)
1161                         cERROR(1, "Error unlocking previously locked "
1162                                   "range %d during test of lock", rc);
1163         } else
1164                 flock->fl_type = F_WRLCK;
1165
1166         return 0;
1167 }
1168
1169 static void
1170 cifs_move_llist(struct list_head *source, struct list_head *dest)
1171 {
1172         struct list_head *li, *tmp;
1173         list_for_each_safe(li, tmp, source)
1174                 list_move(li, dest);
1175 }
1176
1177 static void
1178 cifs_free_llist(struct list_head *llist)
1179 {
1180         struct cifsLockInfo *li, *tmp;
1181         list_for_each_entry_safe(li, tmp, llist, llist) {
1182                 cifs_del_lock_waiters(li);
1183                 list_del(&li->llist);
1184                 kfree(li);
1185         }
1186 }
1187
1188 static int
1189 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1190 {
1191         int rc = 0, stored_rc;
1192         int types[] = {LOCKING_ANDX_LARGE_FILES,
1193                        LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1194         unsigned int i;
1195         unsigned int max_num, num;
1196         LOCKING_ANDX_RANGE *buf, *cur;
1197         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1198         struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1199         struct cifsLockInfo *li, *tmp;
1200         __u64 length = 1 + flock->fl_end - flock->fl_start;
1201         struct list_head tmp_llist;
1202
1203         INIT_LIST_HEAD(&tmp_llist);
1204
1205         max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1206                   sizeof(LOCKING_ANDX_RANGE);
1207         buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1208         if (!buf)
1209                 return -ENOMEM;
1210
1211         mutex_lock(&cinode->lock_mutex);
1212         for (i = 0; i < 2; i++) {
1213                 cur = buf;
1214                 num = 0;
1215                 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1216                         if (flock->fl_start > li->offset ||
1217                             (flock->fl_start + length) <
1218                             (li->offset + li->length))
1219                                 continue;
1220                         if (current->tgid != li->pid)
1221                                 continue;
1222                         if (cfile->netfid != li->netfid)
1223                                 continue;
1224                         if (types[i] != li->type)
1225                                 continue;
1226                         if (!cinode->can_cache_brlcks) {
1227                                 cur->Pid = cpu_to_le16(li->pid);
1228                                 cur->LengthLow = cpu_to_le32((u32)li->length);
1229                                 cur->LengthHigh =
1230                                         cpu_to_le32((u32)(li->length>>32));
1231                                 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1232                                 cur->OffsetHigh =
1233                                         cpu_to_le32((u32)(li->offset>>32));
1234                                 /*
1235                                  * We need to save a lock here to let us add
1236                                  * it again to the inode list if the unlock
1237                                  * range request fails on the server.
1238                                  */
1239                                 list_move(&li->llist, &tmp_llist);
1240                                 if (++num == max_num) {
1241                                         stored_rc = cifs_lockv(xid, tcon,
1242                                                                cfile->netfid,
1243                                                                li->type, num,
1244                                                                0, buf);
1245                                         if (stored_rc) {
1246                                                 /*
1247                                                  * We failed on the unlock range
1248                                                  * request - add all locks from
1249                                                  * the tmp list to the head of
1250                                                  * the inode list.
1251                                                  */
1252                                                 cifs_move_llist(&tmp_llist,
1253                                                                 &cinode->llist);
1254                                                 rc = stored_rc;
1255                                         } else
1256                                                 /*
1257                                                  * The unlock range request
1258                                                  * succeed - free the tmp list.
1259                                                  */
1260                                                 cifs_free_llist(&tmp_llist);
1261                                         cur = buf;
1262                                         num = 0;
1263                                 } else
1264                                         cur++;
1265                         } else {
1266                                 /*
1267                                  * We can cache brlock requests - simply remove
1268                                  * a lock from the inode list.
1269                                  */
1270                                 list_del(&li->llist);
1271                                 cifs_del_lock_waiters(li);
1272                                 kfree(li);
1273                         }
1274                 }
1275                 if (num) {
1276                         stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1277                                                types[i], num, 0, buf);
1278                         if (stored_rc) {
1279                                 cifs_move_llist(&tmp_llist, &cinode->llist);
1280                                 rc = stored_rc;
1281                         } else
1282                                 cifs_free_llist(&tmp_llist);
1283                 }
1284         }
1285
1286         mutex_unlock(&cinode->lock_mutex);
1287         kfree(buf);
1288         return rc;
1289 }
1290
1291 static int
1292 cifs_setlk(struct file *file,  struct file_lock *flock, __u8 type,
1293            bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1294 {
1295         int rc = 0;
1296         __u64 length = 1 + flock->fl_end - flock->fl_start;
1297         struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1298         struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1299         struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
1300         __u16 netfid = cfile->netfid;
1301
1302         if (posix_lck) {
1303                 int posix_lock_type;
1304
1305                 rc = cifs_posix_lock_set(file, flock);
1306                 if (!rc || rc < 0)
1307                         return rc;
1308
1309                 if (type & LOCKING_ANDX_SHARED_LOCK)
1310                         posix_lock_type = CIFS_RDLCK;
1311                 else
1312                         posix_lock_type = CIFS_WRLCK;
1313
1314                 if (unlock == 1)
1315                         posix_lock_type = CIFS_UNLCK;
1316
1317                 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
1318                                       0 /* set */, length, flock,
1319                                       posix_lock_type, wait_flag);
1320                 goto out;
1321         }
1322
1323         if (lock) {
1324                 struct cifsLockInfo *lock;
1325
1326                 lock = cifs_lock_init(flock->fl_start, length, type, netfid);
1327                 if (!lock)
1328                         return -ENOMEM;
1329
1330                 rc = cifs_lock_add_if(cinode, lock, wait_flag);
1331                 if (rc < 0)
1332                         kfree(lock);
1333                 if (rc <= 0)
1334                         goto out;
1335
1336                 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
1337                                  flock->fl_start, 0, 1, type, wait_flag, 0);
1338                 if (rc) {
1339                         kfree(lock);
1340                         goto out;
1341                 }
1342
1343                 cifs_lock_add(cinode, lock);
1344         } else if (unlock)
1345                 rc = cifs_unlock_range(cfile, flock, xid);
1346
1347 out:
1348         if (flock->fl_flags & FL_POSIX)
1349                 posix_lock_file_wait(file, flock);
1350         return rc;
1351 }
1352
1353 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1354 {
1355         int rc, xid;
1356         int lock = 0, unlock = 0;
1357         bool wait_flag = false;
1358         bool posix_lck = false;
1359         struct cifs_sb_info *cifs_sb;
1360         struct cifs_tcon *tcon;
1361         struct cifsInodeInfo *cinode;
1362         struct cifsFileInfo *cfile;
1363         __u16 netfid;
1364         __u8 type;
1365
1366         rc = -EACCES;
1367         xid = GetXid();
1368
1369         cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1370                 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1371                 flock->fl_start, flock->fl_end);
1372
1373         cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1374
1375         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1376         cfile = (struct cifsFileInfo *)file->private_data;
1377         tcon = tlink_tcon(cfile->tlink);
1378         netfid = cfile->netfid;
1379         cinode = CIFS_I(file->f_path.dentry->d_inode);
1380
1381         if ((tcon->ses->capabilities & CAP_UNIX) &&
1382             (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1383             ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1384                 posix_lck = true;
1385         /*
1386          * BB add code here to normalize offset and length to account for
1387          * negative length which we can not accept over the wire.
1388          */
1389         if (IS_GETLK(cmd)) {
1390                 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1391                 FreeXid(xid);
1392                 return rc;
1393         }
1394
1395         if (!lock && !unlock) {
1396                 /*
1397                  * if no lock or unlock then nothing to do since we do not
1398                  * know what it is
1399                  */
1400                 FreeXid(xid);
1401                 return -EOPNOTSUPP;
1402         }
1403
1404         rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1405                         xid);
1406         FreeXid(xid);
1407         return rc;
1408 }
1409
1410 /*
1411  * update the file size (if needed) after a write. Should be called with
1412  * the inode->i_lock held
1413  */
1414 void
1415 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1416                       unsigned int bytes_written)
1417 {
1418         loff_t end_of_write = offset + bytes_written;
1419
1420         if (end_of_write > cifsi->server_eof)
1421                 cifsi->server_eof = end_of_write;
1422 }
1423
1424 static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
1425                           const char *write_data, size_t write_size,
1426                           loff_t *poffset)
1427 {
1428         int rc = 0;
1429         unsigned int bytes_written = 0;
1430         unsigned int total_written;
1431         struct cifs_sb_info *cifs_sb;
1432         struct cifs_tcon *pTcon;
1433         int xid;
1434         struct dentry *dentry = open_file->dentry;
1435         struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
1436         struct cifs_io_parms io_parms;
1437
1438         cifs_sb = CIFS_SB(dentry->d_sb);
1439
1440         cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
1441            *poffset, dentry->d_name.name);
1442
1443         pTcon = tlink_tcon(open_file->tlink);
1444
1445         xid = GetXid();
1446
1447         for (total_written = 0; write_size > total_written;
1448              total_written += bytes_written) {
1449                 rc = -EAGAIN;
1450                 while (rc == -EAGAIN) {
1451                         struct kvec iov[2];
1452                         unsigned int len;
1453
1454                         if (open_file->invalidHandle) {
1455                                 /* we could deadlock if we called
1456                                    filemap_fdatawait from here so tell
1457                                    reopen_file not to flush data to
1458                                    server now */
1459                                 rc = cifs_reopen_file(open_file, false);
1460                                 if (rc != 0)
1461                                         break;
1462                         }
1463
1464                         len = min((size_t)cifs_sb->wsize,
1465                                   write_size - total_written);
1466                         /* iov[0] is reserved for smb header */
1467                         iov[1].iov_base = (char *)write_data + total_written;
1468                         iov[1].iov_len = len;
1469                         io_parms.netfid = open_file->netfid;
1470                         io_parms.pid = pid;
1471                         io_parms.tcon = pTcon;
1472                         io_parms.offset = *poffset;
1473                         io_parms.length = len;
1474                         rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
1475                                            1, 0);
1476                 }
1477                 if (rc || (bytes_written == 0)) {
1478                         if (total_written)
1479                                 break;
1480                         else {
1481                                 FreeXid(xid);
1482                                 return rc;
1483                         }
1484                 } else {
1485                         spin_lock(&dentry->d_inode->i_lock);
1486                         cifs_update_eof(cifsi, *poffset, bytes_written);
1487                         spin_unlock(&dentry->d_inode->i_lock);
1488                         *poffset += bytes_written;
1489                 }
1490         }
1491
1492         cifs_stats_bytes_written(pTcon, total_written);
1493
1494         if (total_written > 0) {
1495                 spin_lock(&dentry->d_inode->i_lock);
1496                 if (*poffset > dentry->d_inode->i_size)
1497                         i_size_write(dentry->d_inode, *poffset);
1498                 spin_unlock(&dentry->d_inode->i_lock);
1499         }
1500         mark_inode_dirty_sync(dentry->d_inode);
1501         FreeXid(xid);
1502         return total_written;
1503 }
1504
1505 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1506                                         bool fsuid_only)
1507 {
1508         struct cifsFileInfo *open_file = NULL;
1509         struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1510
1511         /* only filter by fsuid on multiuser mounts */
1512         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1513                 fsuid_only = false;
1514
1515         spin_lock(&cifs_file_list_lock);
1516         /* we could simply get the first_list_entry since write-only entries
1517            are always at the end of the list but since the first entry might
1518            have a close pending, we go through the whole list */
1519         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1520                 if (fsuid_only && open_file->uid != current_fsuid())
1521                         continue;
1522                 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1523                         if (!open_file->invalidHandle) {
1524                                 /* found a good file */
1525                                 /* lock it so it will not be closed on us */
1526                                 cifsFileInfo_get(open_file);
1527                                 spin_unlock(&cifs_file_list_lock);
1528                                 return open_file;
1529                         } /* else might as well continue, and look for
1530                              another, or simply have the caller reopen it
1531                              again rather than trying to fix this handle */
1532                 } else /* write only file */
1533                         break; /* write only files are last so must be done */
1534         }
1535         spin_unlock(&cifs_file_list_lock);
1536         return NULL;
1537 }
1538
1539 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1540                                         bool fsuid_only)
1541 {
1542         struct cifsFileInfo *open_file;
1543         struct cifs_sb_info *cifs_sb;
1544         bool any_available = false;
1545         int rc;
1546
1547         /* Having a null inode here (because mapping->host was set to zero by
1548         the VFS or MM) should not happen but we had reports of on oops (due to
1549         it being zero) during stress testcases so we need to check for it */
1550
1551         if (cifs_inode == NULL) {
1552                 cERROR(1, "Null inode passed to cifs_writeable_file");
1553                 dump_stack();
1554                 return NULL;
1555         }
1556
1557         cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1558
1559         /* only filter by fsuid on multiuser mounts */
1560         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1561                 fsuid_only = false;
1562
1563         spin_lock(&cifs_file_list_lock);
1564 refind_writable:
1565         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1566                 if (!any_available && open_file->pid != current->tgid)
1567                         continue;
1568                 if (fsuid_only && open_file->uid != current_fsuid())
1569                         continue;
1570                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1571                         cifsFileInfo_get(open_file);
1572
1573                         if (!open_file->invalidHandle) {
1574                                 /* found a good writable file */
1575                                 spin_unlock(&cifs_file_list_lock);
1576                                 return open_file;
1577                         }
1578
1579                         spin_unlock(&cifs_file_list_lock);
1580
1581                         /* Had to unlock since following call can block */
1582                         rc = cifs_reopen_file(open_file, false);
1583                         if (!rc)
1584                                 return open_file;
1585
1586                         /* if it fails, try another handle if possible */
1587                         cFYI(1, "wp failed on reopen file");
1588                         cifsFileInfo_put(open_file);
1589
1590                         spin_lock(&cifs_file_list_lock);
1591
1592                         /* else we simply continue to the next entry. Thus
1593                            we do not loop on reopen errors.  If we
1594                            can not reopen the file, for example if we
1595                            reconnected to a server with another client
1596                            racing to delete or lock the file we would not
1597                            make progress if we restarted before the beginning
1598                            of the loop here. */
1599                 }
1600         }
1601         /* couldn't find useable FH with same pid, try any available */
1602         if (!any_available) {
1603                 any_available = true;
1604                 goto refind_writable;
1605         }
1606         spin_unlock(&cifs_file_list_lock);
1607         return NULL;
1608 }
1609
1610 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1611 {
1612         struct address_space *mapping = page->mapping;
1613         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1614         char *write_data;
1615         int rc = -EFAULT;
1616         int bytes_written = 0;
1617         struct inode *inode;
1618         struct cifsFileInfo *open_file;
1619
1620         if (!mapping || !mapping->host)
1621                 return -EFAULT;
1622
1623         inode = page->mapping->host;
1624
1625         offset += (loff_t)from;
1626         write_data = kmap(page);
1627         write_data += from;
1628
1629         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1630                 kunmap(page);
1631                 return -EIO;
1632         }
1633
1634         /* racing with truncate? */
1635         if (offset > mapping->host->i_size) {
1636                 kunmap(page);
1637                 return 0; /* don't care */
1638         }
1639
1640         /* check to make sure that we are not extending the file */
1641         if (mapping->host->i_size - offset < (loff_t)to)
1642                 to = (unsigned)(mapping->host->i_size - offset);
1643
1644         open_file = find_writable_file(CIFS_I(mapping->host), false);
1645         if (open_file) {
1646                 bytes_written = cifs_write(open_file, open_file->pid,
1647                                            write_data, to - from, &offset);
1648                 cifsFileInfo_put(open_file);
1649                 /* Does mm or vfs already set times? */
1650                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1651                 if ((bytes_written > 0) && (offset))
1652                         rc = 0;
1653                 else if (bytes_written < 0)
1654                         rc = bytes_written;
1655         } else {
1656                 cFYI(1, "No writeable filehandles for inode");
1657                 rc = -EIO;
1658         }
1659
1660         kunmap(page);
1661         return rc;
1662 }
1663
1664 /*
1665  * Marshal up the iov array, reserving the first one for the header. Also,
1666  * set wdata->bytes.
1667  */
1668 static void
1669 cifs_writepages_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
1670 {
1671         int i;
1672         struct inode *inode = wdata->cfile->dentry->d_inode;
1673         loff_t size = i_size_read(inode);
1674
1675         /* marshal up the pages into iov array */
1676         wdata->bytes = 0;
1677         for (i = 0; i < wdata->nr_pages; i++) {
1678                 iov[i + 1].iov_len = min(size - page_offset(wdata->pages[i]),
1679                                         (loff_t)PAGE_CACHE_SIZE);
1680                 iov[i + 1].iov_base = kmap(wdata->pages[i]);
1681                 wdata->bytes += iov[i + 1].iov_len;
1682         }
1683 }
1684
1685 static int cifs_writepages(struct address_space *mapping,
1686                            struct writeback_control *wbc)
1687 {
1688         struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1689         bool done = false, scanned = false, range_whole = false;
1690         pgoff_t end, index;
1691         struct cifs_writedata *wdata;
1692         struct page *page;
1693         int rc = 0;
1694
1695         /*
1696          * If wsize is smaller than the page cache size, default to writing
1697          * one page at a time via cifs_writepage
1698          */
1699         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1700                 return generic_writepages(mapping, wbc);
1701
1702         if (wbc->range_cyclic) {
1703                 index = mapping->writeback_index; /* Start from prev offset */
1704                 end = -1;
1705         } else {
1706                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1707                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1708                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1709                         range_whole = true;
1710                 scanned = true;
1711         }
1712 retry:
1713         while (!done && index <= end) {
1714                 unsigned int i, nr_pages, found_pages;
1715                 pgoff_t next = 0, tofind;
1716                 struct page **pages;
1717
1718                 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1719                                 end - index) + 1;
1720
1721                 wdata = cifs_writedata_alloc((unsigned int)tofind,
1722                                              cifs_writev_complete);
1723                 if (!wdata) {
1724                         rc = -ENOMEM;
1725                         break;
1726                 }
1727
1728                 /*
1729                  * find_get_pages_tag seems to return a max of 256 on each
1730                  * iteration, so we must call it several times in order to
1731                  * fill the array or the wsize is effectively limited to
1732                  * 256 * PAGE_CACHE_SIZE.
1733                  */
1734                 found_pages = 0;
1735                 pages = wdata->pages;
1736                 do {
1737                         nr_pages = find_get_pages_tag(mapping, &index,
1738                                                         PAGECACHE_TAG_DIRTY,
1739                                                         tofind, pages);
1740                         found_pages += nr_pages;
1741                         tofind -= nr_pages;
1742                         pages += nr_pages;
1743                 } while (nr_pages && tofind && index <= end);
1744
1745                 if (found_pages == 0) {
1746                         kref_put(&wdata->refcount, cifs_writedata_release);
1747                         break;
1748                 }
1749
1750                 nr_pages = 0;
1751                 for (i = 0; i < found_pages; i++) {
1752                         page = wdata->pages[i];
1753                         /*
1754                          * At this point we hold neither mapping->tree_lock nor
1755                          * lock on the page itself: the page may be truncated or
1756                          * invalidated (changing page->mapping to NULL), or even
1757                          * swizzled back from swapper_space to tmpfs file
1758                          * mapping
1759                          */
1760
1761                         if (nr_pages == 0)
1762                                 lock_page(page);
1763                         else if (!trylock_page(page))
1764                                 break;
1765
1766                         if (unlikely(page->mapping != mapping)) {
1767                                 unlock_page(page);
1768                                 break;
1769                         }
1770
1771                         if (!wbc->range_cyclic && page->index > end) {
1772                                 done = true;
1773                                 unlock_page(page);
1774                                 break;
1775                         }
1776
1777                         if (next && (page->index != next)) {
1778                                 /* Not next consecutive page */
1779                                 unlock_page(page);
1780                                 break;
1781                         }
1782
1783                         if (wbc->sync_mode != WB_SYNC_NONE)
1784                                 wait_on_page_writeback(page);
1785
1786                         if (PageWriteback(page) ||
1787                                         !clear_page_dirty_for_io(page)) {
1788                                 unlock_page(page);
1789                                 break;
1790                         }
1791
1792                         /*
1793                          * This actually clears the dirty bit in the radix tree.
1794                          * See cifs_writepage() for more commentary.
1795                          */
1796                         set_page_writeback(page);
1797
1798                         if (page_offset(page) >= mapping->host->i_size) {
1799                                 done = true;
1800                                 unlock_page(page);
1801                                 end_page_writeback(page);
1802                                 break;
1803                         }
1804
1805                         wdata->pages[i] = page;
1806                         next = page->index + 1;
1807                         ++nr_pages;
1808                 }
1809
1810                 /* reset index to refind any pages skipped */
1811                 if (nr_pages == 0)
1812                         index = wdata->pages[0]->index + 1;
1813
1814                 /* put any pages we aren't going to use */
1815                 for (i = nr_pages; i < found_pages; i++) {
1816                         page_cache_release(wdata->pages[i]);
1817                         wdata->pages[i] = NULL;
1818                 }
1819
1820                 /* nothing to write? */
1821                 if (nr_pages == 0) {
1822                         kref_put(&wdata->refcount, cifs_writedata_release);
1823                         continue;
1824                 }
1825
1826                 wdata->sync_mode = wbc->sync_mode;
1827                 wdata->nr_pages = nr_pages;
1828                 wdata->offset = page_offset(wdata->pages[0]);
1829                 wdata->marshal_iov = cifs_writepages_marshal_iov;
1830
1831                 do {
1832                         if (wdata->cfile != NULL)
1833                                 cifsFileInfo_put(wdata->cfile);
1834                         wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1835                                                           false);
1836                         if (!wdata->cfile) {
1837                                 cERROR(1, "No writable handles for inode");
1838                                 rc = -EBADF;
1839                                 break;
1840                         }
1841                         wdata->pid = wdata->cfile->pid;
1842                         rc = cifs_async_writev(wdata);
1843                 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
1844
1845                 for (i = 0; i < nr_pages; ++i)
1846                         unlock_page(wdata->pages[i]);
1847
1848                 /* send failure -- clean up the mess */
1849                 if (rc != 0) {
1850                         for (i = 0; i < nr_pages; ++i) {
1851                                 if (rc == -EAGAIN)
1852                                         redirty_page_for_writepage(wbc,
1853                                                            wdata->pages[i]);
1854                                 else
1855                                         SetPageError(wdata->pages[i]);
1856                                 end_page_writeback(wdata->pages[i]);
1857                                 page_cache_release(wdata->pages[i]);
1858                         }
1859                         if (rc != -EAGAIN)
1860                                 mapping_set_error(mapping, rc);
1861                 }
1862                 kref_put(&wdata->refcount, cifs_writedata_release);
1863
1864                 wbc->nr_to_write -= nr_pages;
1865                 if (wbc->nr_to_write <= 0)
1866                         done = true;
1867
1868                 index = next;
1869         }
1870
1871         if (!scanned && !done) {
1872                 /*
1873                  * We hit the last page and there is more work to be done: wrap
1874                  * back to the start of the file
1875                  */
1876                 scanned = true;
1877                 index = 0;
1878                 goto retry;
1879         }
1880
1881         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1882                 mapping->writeback_index = index;
1883
1884         return rc;
1885 }
1886
1887 static int
1888 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
1889 {
1890         int rc;
1891         int xid;
1892
1893         xid = GetXid();
1894 /* BB add check for wbc flags */
1895         page_cache_get(page);
1896         if (!PageUptodate(page))
1897                 cFYI(1, "ppw - page not up to date");
1898
1899         /*
1900          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1901          *
1902          * A writepage() implementation always needs to do either this,
1903          * or re-dirty the page with "redirty_page_for_writepage()" in
1904          * the case of a failure.
1905          *
1906          * Just unlocking the page will cause the radix tree tag-bits
1907          * to fail to update with the state of the page correctly.
1908          */
1909         set_page_writeback(page);
1910 retry_write:
1911         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1912         if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1913                 goto retry_write;
1914         else if (rc == -EAGAIN)
1915                 redirty_page_for_writepage(wbc, page);
1916         else if (rc != 0)
1917                 SetPageError(page);
1918         else
1919                 SetPageUptodate(page);
1920         end_page_writeback(page);
1921         page_cache_release(page);
1922         FreeXid(xid);
1923         return rc;
1924 }
1925
1926 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1927 {
1928         int rc = cifs_writepage_locked(page, wbc);
1929         unlock_page(page);
1930         return rc;
1931 }
1932
1933 static int cifs_write_end(struct file *file, struct address_space *mapping,
1934                         loff_t pos, unsigned len, unsigned copied,
1935                         struct page *page, void *fsdata)
1936 {
1937         int rc;
1938         struct inode *inode = mapping->host;
1939         struct cifsFileInfo *cfile = file->private_data;
1940         struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1941         __u32 pid;
1942
1943         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
1944                 pid = cfile->pid;
1945         else
1946                 pid = current->tgid;
1947
1948         cFYI(1, "write_end for page %p from pos %lld with %d bytes",
1949                  page, pos, copied);
1950
1951         if (PageChecked(page)) {
1952                 if (copied == len)
1953                         SetPageUptodate(page);
1954                 ClearPageChecked(page);
1955         } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1956                 SetPageUptodate(page);
1957
1958         if (!PageUptodate(page)) {
1959                 char *page_data;
1960                 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1961                 int xid;
1962
1963                 xid = GetXid();
1964                 /* this is probably better than directly calling
1965                    partialpage_write since in this function the file handle is
1966                    known which we might as well leverage */
1967                 /* BB check if anything else missing out of ppw
1968                    such as updating last write time */
1969                 page_data = kmap(page);
1970                 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
1971                 /* if (rc < 0) should we set writebehind rc? */
1972                 kunmap(page);
1973
1974                 FreeXid(xid);
1975         } else {
1976                 rc = copied;
1977                 pos += copied;
1978                 set_page_dirty(page);
1979         }
1980
1981         if (rc > 0) {
1982                 spin_lock(&inode->i_lock);
1983                 if (pos > inode->i_size)
1984                         i_size_write(inode, pos);
1985                 spin_unlock(&inode->i_lock);
1986         }
1987
1988         unlock_page(page);
1989         page_cache_release(page);
1990
1991         return rc;
1992 }
1993
1994 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
1995                       int datasync)
1996 {
1997         int xid;
1998         int rc = 0;
1999         struct cifs_tcon *tcon;
2000         struct cifsFileInfo *smbfile = file->private_data;
2001         struct inode *inode = file->f_path.dentry->d_inode;
2002         struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2003
2004         rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2005         if (rc)
2006                 return rc;
2007         mutex_lock(&inode->i_mutex);
2008
2009         xid = GetXid();
2010
2011         cFYI(1, "Sync file - name: %s datasync: 0x%x",
2012                 file->f_path.dentry->d_name.name, datasync);
2013
2014         if (!CIFS_I(inode)->clientCanCacheRead) {
2015                 rc = cifs_invalidate_mapping(inode);
2016                 if (rc) {
2017                         cFYI(1, "rc: %d during invalidate phase", rc);
2018                         rc = 0; /* don't care about it in fsync */
2019                 }
2020         }
2021
2022         tcon = tlink_tcon(smbfile->tlink);
2023         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2024                 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2025
2026         FreeXid(xid);
2027         mutex_unlock(&inode->i_mutex);
2028         return rc;
2029 }
2030
2031 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2032 {
2033         int xid;
2034         int rc = 0;
2035         struct cifs_tcon *tcon;
2036         struct cifsFileInfo *smbfile = file->private_data;
2037         struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2038         struct inode *inode = file->f_mapping->host;
2039
2040         rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2041         if (rc)
2042                 return rc;
2043         mutex_lock(&inode->i_mutex);
2044
2045         xid = GetXid();
2046
2047         cFYI(1, "Sync file - name: %s datasync: 0x%x",
2048                 file->f_path.dentry->d_name.name, datasync);
2049
2050         tcon = tlink_tcon(smbfile->tlink);
2051         if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
2052                 rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
2053
2054         FreeXid(xid);
2055         mutex_unlock(&inode->i_mutex);
2056         return rc;
2057 }
2058
2059 /*
2060  * As file closes, flush all cached write data for this inode checking
2061  * for write behind errors.
2062  */
2063 int cifs_flush(struct file *file, fl_owner_t id)
2064 {
2065         struct inode *inode = file->f_path.dentry->d_inode;
2066         int rc = 0;
2067
2068         if (file->f_mode & FMODE_WRITE)
2069                 rc = filemap_write_and_wait(inode->i_mapping);
2070
2071         cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
2072
2073         return rc;
2074 }
2075
2076 static int
2077 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2078 {
2079         int rc = 0;
2080         unsigned long i;
2081
2082         for (i = 0; i < num_pages; i++) {
2083                 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2084                 if (!pages[i]) {
2085                         /*
2086                          * save number of pages we have already allocated and
2087                          * return with ENOMEM error
2088                          */
2089                         num_pages = i;
2090                         rc = -ENOMEM;
2091                         break;
2092                 }
2093         }
2094
2095         if (rc) {
2096                 for (i = 0; i < num_pages; i++)
2097                         put_page(pages[i]);
2098         }
2099         return rc;
2100 }
2101
2102 static inline
2103 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2104 {
2105         size_t num_pages;
2106         size_t clen;
2107
2108         clen = min_t(const size_t, len, wsize);
2109         num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2110
2111         if (cur_len)
2112                 *cur_len = clen;
2113
2114         return num_pages;
2115 }
2116
2117 static void
2118 cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata)
2119 {
2120         int i;
2121         size_t bytes = wdata->bytes;
2122
2123         /* marshal up the pages into iov array */
2124         for (i = 0; i < wdata->nr_pages; i++) {
2125                 iov[i + 1].iov_len = min_t(size_t, bytes, PAGE_SIZE);
2126                 iov[i + 1].iov_base = kmap(wdata->pages[i]);
2127                 bytes -= iov[i + 1].iov_len;
2128         }
2129 }
2130
2131 static void
2132 cifs_uncached_writev_complete(struct work_struct *work)
2133 {
2134         int i;
2135         struct cifs_writedata *wdata = container_of(work,
2136                                         struct cifs_writedata, work);
2137         struct inode *inode = wdata->cfile->dentry->d_inode;
2138         struct cifsInodeInfo *cifsi = CIFS_I(inode);
2139
2140         spin_lock(&inode->i_lock);
2141         cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2142         if (cifsi->server_eof > inode->i_size)
2143                 i_size_write(inode, cifsi->server_eof);
2144         spin_unlock(&inode->i_lock);
2145
2146         complete(&wdata->done);
2147
2148         if (wdata->result != -EAGAIN) {
2149                 for (i = 0; i < wdata->nr_pages; i++)
2150                         put_page(wdata->pages[i]);
2151         }
2152
2153         kref_put(&wdata->refcount, cifs_writedata_release);
2154 }
2155
2156 /* attempt to send write to server, retry on any -EAGAIN errors */
2157 static int
2158 cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2159 {
2160         int rc;
2161
2162         do {
2163                 if (wdata->cfile->invalidHandle) {
2164                         rc = cifs_reopen_file(wdata->cfile, false);
2165                         if (rc != 0)
2166                                 continue;
2167                 }
2168                 rc = cifs_async_writev(wdata);
2169         } while (rc == -EAGAIN);
2170
2171         return rc;
2172 }
2173
2174 static ssize_t
2175 cifs_iovec_write(struct file *file, const struct iovec *iov,
2176                  unsigned long nr_segs, loff_t *poffset)
2177 {
2178         unsigned long nr_pages, i;
2179         size_t copied, len, cur_len;
2180         ssize_t total_written = 0;
2181         loff_t offset = *poffset;
2182         struct iov_iter it;
2183         struct cifsFileInfo *open_file;
2184         struct cifs_tcon *tcon;
2185         struct cifs_sb_info *cifs_sb;
2186         struct cifs_writedata *wdata, *tmp;
2187         struct list_head wdata_list;
2188         int rc;
2189         pid_t pid;
2190
2191         len = iov_length(iov, nr_segs);
2192         if (!len)
2193                 return 0;
2194
2195         rc = generic_write_checks(file, poffset, &len, 0);
2196         if (rc)
2197                 return rc;
2198
2199         INIT_LIST_HEAD(&wdata_list);
2200         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2201         open_file = file->private_data;
2202         tcon = tlink_tcon(open_file->tlink);
2203
2204         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2205                 pid = open_file->pid;
2206         else
2207                 pid = current->tgid;
2208
2209         iov_iter_init(&it, iov, nr_segs, len, 0);
2210         do {
2211                 size_t save_len;
2212
2213                 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2214                 wdata = cifs_writedata_alloc(nr_pages,
2215                                              cifs_uncached_writev_complete);
2216                 if (!wdata) {
2217                         rc = -ENOMEM;
2218                         break;
2219                 }
2220
2221                 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2222                 if (rc) {
2223                         kfree(wdata);
2224                         break;
2225                 }
2226
2227                 save_len = cur_len;
2228                 for (i = 0; i < nr_pages; i++) {
2229                         copied = min_t(const size_t, cur_len, PAGE_SIZE);
2230                         copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2231                                                          0, copied);
2232                         cur_len -= copied;
2233                         iov_iter_advance(&it, copied);
2234                 }
2235                 cur_len = save_len - cur_len;
2236
2237                 wdata->sync_mode = WB_SYNC_ALL;
2238                 wdata->nr_pages = nr_pages;
2239                 wdata->offset = (__u64)offset;
2240                 wdata->cfile = cifsFileInfo_get(open_file);
2241                 wdata->pid = pid;
2242                 wdata->bytes = cur_len;
2243                 wdata->marshal_iov = cifs_uncached_marshal_iov;
2244                 rc = cifs_uncached_retry_writev(wdata);
2245                 if (rc) {
2246                         kref_put(&wdata->refcount, cifs_writedata_release);
2247                         break;
2248                 }
2249
2250                 list_add_tail(&wdata->list, &wdata_list);
2251                 offset += cur_len;
2252                 len -= cur_len;
2253         } while (len > 0);
2254
2255         /*
2256          * If at least one write was successfully sent, then discard any rc
2257          * value from the later writes. If the other write succeeds, then
2258          * we'll end up returning whatever was written. If it fails, then
2259          * we'll get a new rc value from that.
2260          */
2261         if (!list_empty(&wdata_list))
2262                 rc = 0;
2263
2264         /*
2265          * Wait for and collect replies for any successful sends in order of
2266          * increasing offset. Once an error is hit or we get a fatal signal
2267          * while waiting, then return without waiting for any more replies.
2268          */
2269 restart_loop:
2270         list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2271                 if (!rc) {
2272                         /* FIXME: freezable too? */
2273                         rc = wait_for_completion_killable(&wdata->done);
2274                         if (rc)
2275                                 rc = -EINTR;
2276                         else if (wdata->result)
2277                                 rc = wdata->result;
2278                         else
2279                                 total_written += wdata->bytes;
2280
2281                         /* resend call if it's a retryable error */
2282                         if (rc == -EAGAIN) {
2283                                 rc = cifs_uncached_retry_writev(wdata);
2284                                 goto restart_loop;
2285                         }
2286                 }
2287                 list_del_init(&wdata->list);
2288                 kref_put(&wdata->refcount, cifs_writedata_release);
2289         }
2290
2291         if (total_written > 0)
2292                 *poffset += total_written;
2293
2294         cifs_stats_bytes_written(tcon, total_written);
2295         return total_written ? total_written : (ssize_t)rc;
2296 }
2297
2298 ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
2299                                 unsigned long nr_segs, loff_t pos)
2300 {
2301         ssize_t written;
2302         struct inode *inode;
2303
2304         inode = iocb->ki_filp->f_path.dentry->d_inode;
2305
2306         /*
2307          * BB - optimize the way when signing is disabled. We can drop this
2308          * extra memory-to-memory copying and use iovec buffers for constructing
2309          * write request.
2310          */
2311
2312         written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2313         if (written > 0) {
2314                 CIFS_I(inode)->invalid_mapping = true;
2315                 iocb->ki_pos = pos;
2316         }
2317
2318         return written;
2319 }
2320
2321 ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2322                            unsigned long nr_segs, loff_t pos)
2323 {
2324         struct inode *inode;
2325
2326         inode = iocb->ki_filp->f_path.dentry->d_inode;
2327
2328         if (CIFS_I(inode)->clientCanCacheAll)
2329                 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2330
2331         /*
2332          * In strict cache mode we need to write the data to the server exactly
2333          * from the pos to pos+len-1 rather than flush all affected pages
2334          * because it may cause a error with mandatory locks on these pages but
2335          * not on the region from pos to ppos+len-1.
2336          */
2337
2338         return cifs_user_writev(iocb, iov, nr_segs, pos);
2339 }
2340
2341 static ssize_t
2342 cifs_iovec_read(struct file *file, const struct iovec *iov,
2343                  unsigned long nr_segs, loff_t *poffset)
2344 {
2345         int rc;
2346         int xid;
2347         ssize_t total_read;
2348         unsigned int bytes_read = 0;
2349         size_t len, cur_len;
2350         int iov_offset = 0;
2351         struct cifs_sb_info *cifs_sb;
2352         struct cifs_tcon *pTcon;
2353         struct cifsFileInfo *open_file;
2354         struct smb_com_read_rsp *pSMBr;
2355         struct cifs_io_parms io_parms;
2356         char *read_data;
2357         unsigned int rsize;
2358         __u32 pid;
2359
2360         if (!nr_segs)
2361                 return 0;
2362
2363         len = iov_length(iov, nr_segs);
2364         if (!len)
2365                 return 0;
2366
2367         xid = GetXid();
2368         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2369
2370         /* FIXME: set up handlers for larger reads and/or convert to async */
2371         rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2372
2373         open_file = file->private_data;
2374         pTcon = tlink_tcon(open_file->tlink);
2375
2376         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2377                 pid = open_file->pid;
2378         else
2379                 pid = current->tgid;
2380
2381         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2382                 cFYI(1, "attempting read on write only file instance");
2383
2384         for (total_read = 0; total_read < len; total_read += bytes_read) {
2385                 cur_len = min_t(const size_t, len - total_read, rsize);
2386                 rc = -EAGAIN;
2387                 read_data = NULL;
2388
2389                 while (rc == -EAGAIN) {
2390                         int buf_type = CIFS_NO_BUFFER;
2391                         if (open_file->invalidHandle) {
2392                                 rc = cifs_reopen_file(open_file, true);
2393                                 if (rc != 0)
2394                                         break;
2395                         }
2396                         io_parms.netfid = open_file->netfid;
2397                         io_parms.pid = pid;
2398                         io_parms.tcon = pTcon;
2399                         io_parms.offset = *poffset;
2400                         io_parms.length = cur_len;
2401                         rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2402                                          &read_data, &buf_type);
2403                         pSMBr = (struct smb_com_read_rsp *)read_data;
2404                         if (read_data) {
2405                                 char *data_offset = read_data + 4 +
2406                                                 le16_to_cpu(pSMBr->DataOffset);
2407                                 if (memcpy_toiovecend(iov, data_offset,
2408                                                       iov_offset, bytes_read))
2409                                         rc = -EFAULT;
2410                                 if (buf_type == CIFS_SMALL_BUFFER)
2411                                         cifs_small_buf_release(read_data);
2412                                 else if (buf_type == CIFS_LARGE_BUFFER)
2413                                         cifs_buf_release(read_data);
2414                                 read_data = NULL;
2415                                 iov_offset += bytes_read;
2416                         }
2417                 }
2418
2419                 if (rc || (bytes_read == 0)) {
2420                         if (total_read) {
2421                                 break;
2422                         } else {
2423                                 FreeXid(xid);
2424                                 return rc;
2425                         }
2426                 } else {
2427                         cifs_stats_bytes_read(pTcon, bytes_read);
2428                         *poffset += bytes_read;
2429                 }
2430         }
2431
2432         FreeXid(xid);
2433         return total_read;
2434 }
2435
2436 ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
2437                                unsigned long nr_segs, loff_t pos)
2438 {
2439         ssize_t read;
2440
2441         read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2442         if (read > 0)
2443                 iocb->ki_pos = pos;
2444
2445         return read;
2446 }
2447
2448 ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2449                           unsigned long nr_segs, loff_t pos)
2450 {
2451         struct inode *inode;
2452
2453         inode = iocb->ki_filp->f_path.dentry->d_inode;
2454
2455         if (CIFS_I(inode)->clientCanCacheRead)
2456                 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2457
2458         /*
2459          * In strict cache mode we need to read from the server all the time
2460          * if we don't have level II oplock because the server can delay mtime
2461          * change - so we can't make a decision about inode invalidating.
2462          * And we can also fail with pagereading if there are mandatory locks
2463          * on pages affected by this read but not on the region from pos to
2464          * pos+len-1.
2465          */
2466
2467         return cifs_user_readv(iocb, iov, nr_segs, pos);
2468 }
2469
2470 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
2471                          loff_t *poffset)
2472 {
2473         int rc = -EACCES;
2474         unsigned int bytes_read = 0;
2475         unsigned int total_read;
2476         unsigned int current_read_size;
2477         unsigned int rsize;
2478         struct cifs_sb_info *cifs_sb;
2479         struct cifs_tcon *pTcon;
2480         int xid;
2481         char *current_offset;
2482         struct cifsFileInfo *open_file;
2483         struct cifs_io_parms io_parms;
2484         int buf_type = CIFS_NO_BUFFER;
2485         __u32 pid;
2486
2487         xid = GetXid();
2488         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2489
2490         /* FIXME: set up handlers for larger reads and/or convert to async */
2491         rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2492
2493         if (file->private_data == NULL) {
2494                 rc = -EBADF;
2495                 FreeXid(xid);
2496                 return rc;
2497         }
2498         open_file = file->private_data;
2499         pTcon = tlink_tcon(open_file->tlink);
2500
2501         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2502                 pid = open_file->pid;
2503         else
2504                 pid = current->tgid;
2505
2506         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2507                 cFYI(1, "attempting read on write only file instance");
2508
2509         for (total_read = 0, current_offset = read_data;
2510              read_size > total_read;
2511              total_read += bytes_read, current_offset += bytes_read) {
2512                 current_read_size = min_t(uint, read_size - total_read, rsize);
2513
2514                 /* For windows me and 9x we do not want to request more
2515                 than it negotiated since it will refuse the read then */
2516                 if ((pTcon->ses) &&
2517                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
2518                         current_read_size = min_t(uint, current_read_size,
2519                                         CIFSMaxBufSize);
2520                 }
2521                 rc = -EAGAIN;
2522                 while (rc == -EAGAIN) {
2523                         if (open_file->invalidHandle) {
2524                                 rc = cifs_reopen_file(open_file, true);
2525                                 if (rc != 0)
2526                                         break;
2527                         }
2528                         io_parms.netfid = open_file->netfid;
2529                         io_parms.pid = pid;
2530                         io_parms.tcon = pTcon;
2531                         io_parms.offset = *poffset;
2532                         io_parms.length = current_read_size;
2533                         rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2534                                          &current_offset, &buf_type);
2535                 }
2536                 if (rc || (bytes_read == 0)) {
2537                         if (total_read) {
2538                                 break;
2539                         } else {
2540                                 FreeXid(xid);
2541                                 return rc;
2542                         }
2543                 } else {
2544                         cifs_stats_bytes_read(pTcon, total_read);
2545                         *poffset += bytes_read;
2546                 }
2547         }
2548         FreeXid(xid);
2549         return total_read;
2550 }
2551
2552 /*
2553  * If the page is mmap'ed into a process' page tables, then we need to make
2554  * sure that it doesn't change while being written back.
2555  */
2556 static int
2557 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2558 {
2559         struct page *page = vmf->page;
2560
2561         lock_page(page);
2562         return VM_FAULT_LOCKED;
2563 }
2564
2565 static struct vm_operations_struct cifs_file_vm_ops = {
2566         .fault = filemap_fault,
2567         .page_mkwrite = cifs_page_mkwrite,
2568 };
2569
2570 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2571 {
2572         int rc, xid;
2573         struct inode *inode = file->f_path.dentry->d_inode;
2574
2575         xid = GetXid();
2576
2577         if (!CIFS_I(inode)->clientCanCacheRead) {
2578                 rc = cifs_invalidate_mapping(inode);
2579                 if (rc)
2580                         return rc;
2581         }
2582
2583         rc = generic_file_mmap(file, vma);
2584         if (rc == 0)
2585                 vma->vm_ops = &cifs_file_vm_ops;
2586         FreeXid(xid);
2587         return rc;
2588 }
2589
2590 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2591 {
2592         int rc, xid;
2593
2594         xid = GetXid();
2595         rc = cifs_revalidate_file(file);
2596         if (rc) {
2597                 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
2598                 FreeXid(xid);
2599                 return rc;
2600         }
2601         rc = generic_file_mmap(file, vma);
2602         if (rc == 0)
2603                 vma->vm_ops = &cifs_file_vm_ops;
2604         FreeXid(xid);
2605         return rc;
2606 }
2607
2608 static int cifs_readpages(struct file *file, struct address_space *mapping,
2609         struct list_head *page_list, unsigned num_pages)
2610 {
2611         int rc;
2612         struct list_head tmplist;
2613         struct cifsFileInfo *open_file = file->private_data;
2614         struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2615         unsigned int rsize = cifs_sb->rsize;
2616         pid_t pid;
2617
2618         /*
2619          * Give up immediately if rsize is too small to read an entire page.
2620          * The VFS will fall back to readpage. We should never reach this
2621          * point however since we set ra_pages to 0 when the rsize is smaller
2622          * than a cache page.
2623          */
2624         if (unlikely(rsize < PAGE_CACHE_SIZE))
2625                 return 0;
2626
2627         /*
2628          * Reads as many pages as possible from fscache. Returns -ENOBUFS
2629          * immediately if the cookie is negative
2630          */
2631         rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
2632                                          &num_pages);
2633         if (rc == 0)
2634                 return rc;
2635
2636         if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2637                 pid = open_file->pid;
2638         else
2639                 pid = current->tgid;
2640
2641         rc = 0;
2642         INIT_LIST_HEAD(&tmplist);
2643
2644         cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
2645                 mapping, num_pages);
2646
2647         /*
2648          * Start with the page at end of list and move it to private
2649          * list. Do the same with any following pages until we hit
2650          * the rsize limit, hit an index discontinuity, or run out of
2651          * pages. Issue the async read and then start the loop again
2652          * until the list is empty.
2653          *
2654          * Note that list order is important. The page_list is in
2655          * the order of declining indexes. When we put the pages in
2656          * the rdata->pages, then we want them in increasing order.
2657          */
2658         while (!list_empty(page_list)) {
2659                 unsigned int bytes = PAGE_CACHE_SIZE;
2660                 unsigned int expected_index;
2661                 unsigned int nr_pages = 1;
2662                 loff_t offset;
2663                 struct page *page, *tpage;
2664                 struct cifs_readdata *rdata;
2665
2666                 page = list_entry(page_list->prev, struct page, lru);
2667
2668                 /*
2669                  * Lock the page and put it in the cache. Since no one else
2670                  * should have access to this page, we're safe to simply set
2671                  * PG_locked without checking it first.
2672                  */
2673                 __set_page_locked(page);
2674                 rc = add_to_page_cache_locked(page, mapping,
2675                                               page->index, GFP_KERNEL);
2676
2677                 /* give up if we can't stick it in the cache */
2678                 if (rc) {
2679                         __clear_page_locked(page);
2680                         break;
2681                 }
2682
2683                 /* move first page to the tmplist */
2684                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2685                 list_move_tail(&page->lru, &tmplist);
2686
2687                 /* now try and add more pages onto the request */
2688                 expected_index = page->index + 1;
2689                 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
2690                         /* discontinuity ? */
2691                         if (page->index != expected_index)
2692                                 break;
2693
2694                         /* would this page push the read over the rsize? */
2695                         if (bytes + PAGE_CACHE_SIZE > rsize)
2696                                 break;
2697
2698                         __set_page_locked(page);
2699                         if (add_to_page_cache_locked(page, mapping,
2700                                                 page->index, GFP_KERNEL)) {
2701                                 __clear_page_locked(page);
2702                                 break;
2703                         }
2704                         list_move_tail(&page->lru, &tmplist);
2705                         bytes += PAGE_CACHE_SIZE;
2706                         expected_index++;
2707                         nr_pages++;
2708                 }
2709
2710                 rdata = cifs_readdata_alloc(nr_pages);
2711                 if (!rdata) {
2712                         /* best to give up if we're out of mem */
2713                         list_for_each_entry_safe(page, tpage, &tmplist, lru) {
2714                                 list_del(&page->lru);
2715                                 lru_cache_add_file(page);
2716                                 unlock_page(page);
2717                                 page_cache_release(page);
2718                         }
2719                         rc = -ENOMEM;
2720                         break;
2721                 }
2722
2723                 spin_lock(&cifs_file_list_lock);
2724                 cifsFileInfo_get(open_file);
2725                 spin_unlock(&cifs_file_list_lock);
2726                 rdata->cfile = open_file;
2727                 rdata->mapping = mapping;
2728                 rdata->offset = offset;
2729                 rdata->bytes = bytes;
2730                 rdata->pid = pid;
2731                 list_splice_init(&tmplist, &rdata->pages);
2732
2733                 do {
2734                         if (open_file->invalidHandle) {
2735                                 rc = cifs_reopen_file(open_file, true);
2736                                 if (rc != 0)
2737                                         continue;
2738                         }
2739                         rc = cifs_async_readv(rdata);
2740                 } while (rc == -EAGAIN);
2741
2742                 if (rc != 0) {
2743                         list_for_each_entry_safe(page, tpage, &rdata->pages,
2744                                                  lru) {
2745                                 list_del(&page->lru);
2746                                 lru_cache_add_file(page);
2747                                 unlock_page(page);
2748                                 page_cache_release(page);
2749                         }
2750                         cifs_readdata_free(rdata);
2751                         break;
2752                 }
2753         }
2754
2755         return rc;
2756 }
2757
2758 static int cifs_readpage_worker(struct file *file, struct page *page,
2759         loff_t *poffset)
2760 {
2761         char *read_data;
2762         int rc;
2763
2764         /* Is the page cached? */
2765         rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
2766         if (rc == 0)
2767                 goto read_complete;
2768
2769         page_cache_get(page);
2770         read_data = kmap(page);
2771         /* for reads over a certain size could initiate async read ahead */
2772
2773         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
2774
2775         if (rc < 0)
2776                 goto io_error;
2777         else
2778                 cFYI(1, "Bytes read %d", rc);
2779
2780         file->f_path.dentry->d_inode->i_atime =
2781                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
2782
2783         if (PAGE_CACHE_SIZE > rc)
2784                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
2785
2786         flush_dcache_page(page);
2787         SetPageUptodate(page);
2788
2789         /* send this page to the cache */
2790         cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
2791
2792         rc = 0;
2793
2794 io_error:
2795         kunmap(page);
2796         page_cache_release(page);
2797
2798 read_complete:
2799         return rc;
2800 }
2801
2802 static int cifs_readpage(struct file *file, struct page *page)
2803 {
2804         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2805         int rc = -EACCES;
2806         int xid;
2807
2808         xid = GetXid();
2809
2810         if (file->private_data == NULL) {
2811                 rc = -EBADF;
2812                 FreeXid(xid);
2813                 return rc;
2814         }
2815
2816         cFYI(1, "readpage %p at offset %d 0x%x\n",
2817                  page, (int)offset, (int)offset);
2818
2819         rc = cifs_readpage_worker(file, page, &offset);
2820
2821         unlock_page(page);
2822
2823         FreeXid(xid);
2824         return rc;
2825 }
2826
2827 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2828 {
2829         struct cifsFileInfo *open_file;
2830
2831         spin_lock(&cifs_file_list_lock);
2832         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2833                 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2834                         spin_unlock(&cifs_file_list_lock);
2835                         return 1;
2836                 }
2837         }
2838         spin_unlock(&cifs_file_list_lock);
2839         return 0;
2840 }
2841
2842 /* We do not want to update the file size from server for inodes
2843    open for write - to avoid races with writepage extending
2844    the file - in the future we could consider allowing
2845    refreshing the inode only on increases in the file size
2846    but this is tricky to do without racing with writebehind
2847    page caching in the current Linux kernel design */
2848 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2849 {
2850         if (!cifsInode)
2851                 return true;
2852
2853         if (is_inode_writable(cifsInode)) {
2854                 /* This inode is open for write at least once */
2855                 struct cifs_sb_info *cifs_sb;
2856
2857                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2858                 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2859                         /* since no page cache to corrupt on directio
2860                         we can change size safely */
2861                         return true;
2862                 }
2863
2864                 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2865                         return true;
2866
2867                 return false;
2868         } else
2869                 return true;
2870 }
2871
2872 static int cifs_write_begin(struct file *file, struct address_space *mapping,
2873                         loff_t pos, unsigned len, unsigned flags,
2874                         struct page **pagep, void **fsdata)
2875 {
2876         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2877         loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2878         loff_t page_start = pos & PAGE_MASK;
2879         loff_t i_size;
2880         struct page *page;
2881         int rc = 0;
2882
2883         cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
2884
2885         page = grab_cache_page_write_begin(mapping, index, flags);
2886         if (!page) {
2887                 rc = -ENOMEM;
2888                 goto out;
2889         }
2890
2891         if (PageUptodate(page))
2892                 goto out;
2893
2894         /*
2895          * If we write a full page it will be up to date, no need to read from
2896          * the server. If the write is short, we'll end up doing a sync write
2897          * instead.
2898          */
2899         if (len == PAGE_CACHE_SIZE)
2900                 goto out;
2901
2902         /*
2903          * optimize away the read when we have an oplock, and we're not
2904          * expecting to use any of the data we'd be reading in. That
2905          * is, when the page lies beyond the EOF, or straddles the EOF
2906          * and the write will cover all of the existing data.
2907          */
2908         if (CIFS_I(mapping->host)->clientCanCacheRead) {
2909                 i_size = i_size_read(mapping->host);
2910                 if (page_start >= i_size ||
2911                     (offset == 0 && (pos + len) >= i_size)) {
2912                         zero_user_segments(page, 0, offset,
2913                                            offset + len,
2914                                            PAGE_CACHE_SIZE);
2915                         /*
2916                          * PageChecked means that the parts of the page
2917                          * to which we're not writing are considered up
2918                          * to date. Once the data is copied to the
2919                          * page, it can be set uptodate.
2920                          */
2921                         SetPageChecked(page);
2922                         goto out;
2923                 }
2924         }
2925
2926         if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2927                 /*
2928                  * might as well read a page, it is fast enough. If we get
2929                  * an error, we don't need to return it. cifs_write_end will
2930                  * do a sync write instead since PG_uptodate isn't set.
2931                  */
2932                 cifs_readpage_worker(file, page, &page_start);
2933         } else {
2934                 /* we could try using another file handle if there is one -
2935                    but how would we lock it to prevent close of that handle
2936                    racing with this read? In any case
2937                    this will be written out by write_end so is fine */
2938         }
2939 out:
2940         *pagep = page;
2941         return rc;
2942 }
2943
2944 static int cifs_release_page(struct page *page, gfp_t gfp)
2945 {
2946         if (PagePrivate(page))
2947                 return 0;
2948
2949         return cifs_fscache_release_page(page, gfp);
2950 }
2951
2952 static void cifs_invalidate_page(struct page *page, unsigned long offset)
2953 {
2954         struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
2955
2956         if (offset == 0)
2957                 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
2958 }
2959
2960 static int cifs_launder_page(struct page *page)
2961 {
2962         int rc = 0;
2963         loff_t range_start = page_offset(page);
2964         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
2965         struct writeback_control wbc = {
2966                 .sync_mode = WB_SYNC_ALL,
2967                 .nr_to_write = 0,
2968                 .range_start = range_start,
2969                 .range_end = range_end,
2970         };
2971
2972         cFYI(1, "Launder page: %p", page);
2973
2974         if (clear_page_dirty_for_io(page))
2975                 rc = cifs_writepage_locked(page, &wbc);
2976
2977         cifs_fscache_invalidate_page(page, page->mapping->host);
2978         return rc;
2979 }
2980
2981 void cifs_oplock_break(struct work_struct *work)
2982 {
2983         struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2984                                                   oplock_break);
2985         struct inode *inode = cfile->dentry->d_inode;
2986         struct cifsInodeInfo *cinode = CIFS_I(inode);
2987         int rc = 0;
2988
2989         if (inode && S_ISREG(inode->i_mode)) {
2990                 if (cinode->clientCanCacheRead)
2991                         break_lease(inode, O_RDONLY);
2992                 else
2993                         break_lease(inode, O_WRONLY);
2994                 rc = filemap_fdatawrite(inode->i_mapping);
2995                 if (cinode->clientCanCacheRead == 0) {
2996                         rc = filemap_fdatawait(inode->i_mapping);
2997                         mapping_set_error(inode->i_mapping, rc);
2998                         invalidate_remote_inode(inode);
2999                 }
3000                 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
3001         }
3002
3003         rc = cifs_push_locks(cfile);
3004         if (rc)
3005                 cERROR(1, "Push locks rc = %d", rc);
3006
3007         /*
3008          * releasing stale oplock after recent reconnect of smb session using
3009          * a now incorrect file handle is not a data integrity issue but do
3010          * not bother sending an oplock release if session to server still is
3011          * disconnected since oplock already released by the server
3012          */
3013         if (!cfile->oplock_break_cancelled) {
3014                 rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
3015                                  current->tgid, 0, 0, 0, 0,
3016                                  LOCKING_ANDX_OPLOCK_RELEASE, false,
3017                                  cinode->clientCanCacheRead ? 1 : 0);
3018                 cFYI(1, "Oplock release rc = %d", rc);
3019         }
3020 }
3021
3022 const struct address_space_operations cifs_addr_ops = {
3023         .readpage = cifs_readpage,
3024         .readpages = cifs_readpages,
3025         .writepage = cifs_writepage,
3026         .writepages = cifs_writepages,
3027         .write_begin = cifs_write_begin,
3028         .write_end = cifs_write_end,
3029         .set_page_dirty = __set_page_dirty_nobuffers,
3030         .releasepage = cifs_release_page,
3031         .invalidatepage = cifs_invalidate_page,
3032         .launder_page = cifs_launder_page,
3033 };
3034
3035 /*
3036  * cifs_readpages requires the server to support a buffer large enough to
3037  * contain the header plus one complete page of data.  Otherwise, we need
3038  * to leave cifs_readpages out of the address space operations.
3039  */
3040 const struct address_space_operations cifs_addr_ops_smallbuf = {
3041         .readpage = cifs_readpage,
3042         .writepage = cifs_writepage,
3043         .writepages = cifs_writepages,
3044         .write_begin = cifs_write_begin,
3045         .write_end = cifs_write_end,
3046         .set_page_dirty = __set_page_dirty_nobuffers,
3047         .releasepage = cifs_release_page,
3048         .invalidatepage = cifs_invalidate_page,
3049         .launder_page = cifs_launder_page,
3050 };