]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - fs/cifs/file.c
header cleaning: don't include smp_lock.h when not used
[linux-2.6.git] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  * 
6  *   Copyright (C) International Business Machines  Corp., 2002,2003
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
34 #include "cifsfs.h"
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
41
42 static inline struct cifsFileInfo *cifs_init_private(
43         struct cifsFileInfo *private_data, struct inode *inode,
44         struct file *file, __u16 netfid)
45 {
46         memset(private_data, 0, sizeof(struct cifsFileInfo));
47         private_data->netfid = netfid;
48         private_data->pid = current->tgid;      
49         init_MUTEX(&private_data->fh_sem);
50         mutex_init(&private_data->lock_mutex);
51         INIT_LIST_HEAD(&private_data->llist);
52         private_data->pfile = file; /* needed for writepage */
53         private_data->pInode = inode;
54         private_data->invalidHandle = FALSE;
55         private_data->closePend = FALSE;
56         /* we have to track num writers to the inode, since writepages
57         does not tell us which handle the write is for so there can
58         be a close (overlapping with write) of the filehandle that
59         cifs_writepages chose to use */
60         atomic_set(&private_data->wrtPending,0); 
61
62         return private_data;
63 }
64
65 static inline int cifs_convert_flags(unsigned int flags)
66 {
67         if ((flags & O_ACCMODE) == O_RDONLY)
68                 return GENERIC_READ;
69         else if ((flags & O_ACCMODE) == O_WRONLY)
70                 return GENERIC_WRITE;
71         else if ((flags & O_ACCMODE) == O_RDWR) {
72                 /* GENERIC_ALL is too much permission to request
73                    can cause unnecessary access denied on create */
74                 /* return GENERIC_ALL; */
75                 return (GENERIC_READ | GENERIC_WRITE);
76         }
77
78         return 0x20197;
79 }
80
81 static inline int cifs_get_disposition(unsigned int flags)
82 {
83         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
84                 return FILE_CREATE;
85         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86                 return FILE_OVERWRITE_IF;
87         else if ((flags & O_CREAT) == O_CREAT)
88                 return FILE_OPEN_IF;
89         else if ((flags & O_TRUNC) == O_TRUNC)
90                 return FILE_OVERWRITE;
91         else
92                 return FILE_OPEN;
93 }
94
95 /* all arguments to this function must be checked for validity in caller */
96 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97         struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98         struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99         char *full_path, int xid)
100 {
101         struct timespec temp;
102         int rc;
103
104         /* want handles we can use to read with first
105            in the list so we do not have to walk the
106            list to search for one in prepare_write */
107         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
108                 list_add_tail(&pCifsFile->flist, 
109                               &pCifsInode->openFileList);
110         } else {
111                 list_add(&pCifsFile->flist,
112                          &pCifsInode->openFileList);
113         }
114         write_unlock(&GlobalSMBSeslock);
115         if (pCifsInode->clientCanCacheRead) {
116                 /* we have the inode open somewhere else
117                    no need to discard cache data */
118                 goto client_can_cache;
119         }
120
121         /* BB need same check in cifs_create too? */
122         /* if not oplocked, invalidate inode pages if mtime or file
123            size changed */
124         temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
125         if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126                            (file->f_path.dentry->d_inode->i_size ==
127                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
128                 cFYI(1, ("inode unchanged on server"));
129         } else {
130                 if (file->f_path.dentry->d_inode->i_mapping) {
131                 /* BB no need to lock inode until after invalidate
132                    since namei code should already have it locked? */
133                         filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
134                 }
135                 cFYI(1, ("invalidating remote inode since open detected it "
136                          "changed"));
137                 invalidate_remote_inode(file->f_path.dentry->d_inode);
138         }
139
140 client_can_cache:
141         if (pTcon->ses->capabilities & CAP_UNIX)
142                 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
143                         full_path, inode->i_sb, xid);
144         else
145                 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
146                         full_path, buf, inode->i_sb, xid);
147
148         if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
149                 pCifsInode->clientCanCacheAll = TRUE;
150                 pCifsInode->clientCanCacheRead = TRUE;
151                 cFYI(1, ("Exclusive Oplock granted on inode %p",
152                          file->f_path.dentry->d_inode));
153         } else if ((*oplock & 0xF) == OPLOCK_READ)
154                 pCifsInode->clientCanCacheRead = TRUE;
155
156         return rc;
157 }
158
159 int cifs_open(struct inode *inode, struct file *file)
160 {
161         int rc = -EACCES;
162         int xid, oplock;
163         struct cifs_sb_info *cifs_sb;
164         struct cifsTconInfo *pTcon;
165         struct cifsFileInfo *pCifsFile;
166         struct cifsInodeInfo *pCifsInode;
167         struct list_head *tmp;
168         char *full_path = NULL;
169         int desiredAccess;
170         int disposition;
171         __u16 netfid;
172         FILE_ALL_INFO *buf = NULL;
173
174         xid = GetXid();
175
176         cifs_sb = CIFS_SB(inode->i_sb);
177         pTcon = cifs_sb->tcon;
178
179         if (file->f_flags & O_CREAT) {
180                 /* search inode for this file and fill in file->private_data */
181                 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
182                 read_lock(&GlobalSMBSeslock);
183                 list_for_each(tmp, &pCifsInode->openFileList) {
184                         pCifsFile = list_entry(tmp, struct cifsFileInfo,
185                                                flist);
186                         if ((pCifsFile->pfile == NULL) &&
187                             (pCifsFile->pid == current->tgid)) {
188                                 /* mode set in cifs_create */
189
190                                 /* needed for writepage */
191                                 pCifsFile->pfile = file;
192                                 
193                                 file->private_data = pCifsFile;
194                                 break;
195                         }
196                 }
197                 read_unlock(&GlobalSMBSeslock);
198                 if (file->private_data != NULL) {
199                         rc = 0;
200                         FreeXid(xid);
201                         return rc;
202                 } else {
203                         if (file->f_flags & O_EXCL)
204                                 cERROR(1, ("could not find file instance for "
205                                            "new file %p", file));
206                 }
207         }
208
209         full_path = build_path_from_dentry(file->f_path.dentry);
210         if (full_path == NULL) {
211                 FreeXid(xid);
212                 return -ENOMEM;
213         }
214
215         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
216                  inode, file->f_flags, full_path));
217         desiredAccess = cifs_convert_flags(file->f_flags);
218
219 /*********************************************************************
220  *  open flag mapping table:
221  *  
222  *      POSIX Flag            CIFS Disposition
223  *      ----------            ---------------- 
224  *      O_CREAT               FILE_OPEN_IF
225  *      O_CREAT | O_EXCL      FILE_CREATE
226  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
227  *      O_TRUNC               FILE_OVERWRITE
228  *      none of the above     FILE_OPEN
229  *
230  *      Note that there is not a direct match between disposition
231  *      FILE_SUPERSEDE (ie create whether or not file exists although 
232  *      O_CREAT | O_TRUNC is similar but truncates the existing
233  *      file rather than creating a new file as FILE_SUPERSEDE does
234  *      (which uses the attributes / metadata passed in on open call)
235  *?
236  *?  O_SYNC is a reasonable match to CIFS writethrough flag  
237  *?  and the read write flags match reasonably.  O_LARGEFILE
238  *?  is irrelevant because largefile support is always used
239  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
240  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
241  *********************************************************************/
242
243         disposition = cifs_get_disposition(file->f_flags);
244
245         if (oplockEnabled)
246                 oplock = REQ_OPLOCK;
247         else
248                 oplock = FALSE;
249
250         /* BB pass O_SYNC flag through on file attributes .. BB */
251
252         /* Also refresh inode by passing in file_info buf returned by SMBOpen
253            and calling get_inode_info with returned buf (at least helps
254            non-Unix server case) */
255
256         /* BB we can not do this if this is the second open of a file 
257            and the first handle has writebehind data, we might be 
258            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
259         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
260         if (!buf) {
261                 rc = -ENOMEM;
262                 goto out;
263         }
264
265         if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
266                 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, 
267                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
268                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
269                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
270         else
271                 rc = -EIO; /* no NT SMB support fall into legacy open below */
272
273         if (rc == -EIO) {
274                 /* Old server, try legacy style OpenX */
275                 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
276                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
277                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
278                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
279         }
280         if (rc) {
281                 cFYI(1, ("cifs_open returned 0x%x", rc));
282                 goto out;
283         }
284         file->private_data =
285                 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
286         if (file->private_data == NULL) {
287                 rc = -ENOMEM;
288                 goto out;
289         }
290         pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
291         write_lock(&GlobalSMBSeslock);
292         list_add(&pCifsFile->tlist, &pTcon->openFileList);
293
294         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
295         if (pCifsInode) {
296                 rc = cifs_open_inode_helper(inode, file, pCifsInode,
297                                             pCifsFile, pTcon,
298                                             &oplock, buf, full_path, xid);
299         } else {
300                 write_unlock(&GlobalSMBSeslock);
301         }
302
303         if (oplock & CIFS_CREATE_ACTION) {           
304                 /* time to set mode which we can not set earlier due to
305                    problems creating new read-only files */
306                 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
307                         CIFSSMBUnixSetPerms(xid, pTcon, full_path,
308                                             inode->i_mode,
309                                             (__u64)-1, (__u64)-1, 0 /* dev */,
310                                             cifs_sb->local_nls,
311                                             cifs_sb->mnt_cifs_flags & 
312                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
313                 } else {
314                         /* BB implement via Windows security descriptors eg
315                            CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
316                                               -1, -1, local_nls);
317                            in the meantime could set r/o dos attribute when
318                            perms are eg: mode & 0222 == 0 */
319                 }
320         }
321
322 out:
323         kfree(buf);
324         kfree(full_path);
325         FreeXid(xid);
326         return rc;
327 }
328
329 /* Try to reacquire byte range locks that were released when session */
330 /* to server was lost */
331 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
332 {
333         int rc = 0;
334
335 /* BB list all locks open on this file and relock */
336
337         return rc;
338 }
339
340 static int cifs_reopen_file(struct file *file, int can_flush)
341 {
342         int rc = -EACCES;
343         int xid, oplock;
344         struct cifs_sb_info *cifs_sb;
345         struct cifsTconInfo *pTcon;
346         struct cifsFileInfo *pCifsFile;
347         struct cifsInodeInfo *pCifsInode;
348         struct inode * inode;
349         char *full_path = NULL;
350         int desiredAccess;
351         int disposition = FILE_OPEN;
352         __u16 netfid;
353
354         if (file->private_data) {
355                 pCifsFile = (struct cifsFileInfo *)file->private_data;
356         } else
357                 return -EBADF;
358
359         xid = GetXid();
360         down(&pCifsFile->fh_sem);
361         if (pCifsFile->invalidHandle == FALSE) {
362                 up(&pCifsFile->fh_sem);
363                 FreeXid(xid);
364                 return 0;
365         }
366
367         if (file->f_path.dentry == NULL) {
368                 cERROR(1, ("no valid name if dentry freed"));
369                 dump_stack();
370                 rc = -EBADF;
371                 goto reopen_error_exit;
372         }
373
374         inode = file->f_path.dentry->d_inode;
375         if(inode == NULL) {
376                 cERROR(1, ("inode not valid"));
377                 dump_stack();
378                 rc = -EBADF;
379                 goto reopen_error_exit;
380         }
381                 
382         cifs_sb = CIFS_SB(inode->i_sb);
383         pTcon = cifs_sb->tcon;
384
385 /* can not grab rename sem here because various ops, including
386    those that already have the rename sem can end up causing writepage
387    to get called and if the server was down that means we end up here,
388    and we can never tell if the caller already has the rename_sem */
389         full_path = build_path_from_dentry(file->f_path.dentry);
390         if (full_path == NULL) {
391                 rc = -ENOMEM;
392 reopen_error_exit:
393                 up(&pCifsFile->fh_sem);
394                 FreeXid(xid);
395                 return rc;
396         }
397
398         cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
399                  inode, file->f_flags,full_path));
400         desiredAccess = cifs_convert_flags(file->f_flags);
401
402         if (oplockEnabled)
403                 oplock = REQ_OPLOCK;
404         else
405                 oplock = FALSE;
406
407         /* Can not refresh inode by passing in file_info buf to be returned
408            by SMBOpen and then calling get_inode_info with returned buf 
409            since file might have write behind data that needs to be flushed 
410            and server version of file size can be stale. If we knew for sure
411            that inode was not dirty locally we could do this */
412
413         rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
414                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
415                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 
416                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
417         if (rc) {
418                 up(&pCifsFile->fh_sem);
419                 cFYI(1, ("cifs_open returned 0x%x", rc));
420                 cFYI(1, ("oplock: %d", oplock));
421         } else {
422                 pCifsFile->netfid = netfid;
423                 pCifsFile->invalidHandle = FALSE;
424                 up(&pCifsFile->fh_sem);
425                 pCifsInode = CIFS_I(inode);
426                 if (pCifsInode) {
427                         if (can_flush) {
428                                 filemap_write_and_wait(inode->i_mapping);
429                         /* temporarily disable caching while we
430                            go to server to get inode info */
431                                 pCifsInode->clientCanCacheAll = FALSE;
432                                 pCifsInode->clientCanCacheRead = FALSE;
433                                 if (pTcon->ses->capabilities & CAP_UNIX)
434                                         rc = cifs_get_inode_info_unix(&inode,
435                                                 full_path, inode->i_sb, xid);
436                                 else
437                                         rc = cifs_get_inode_info(&inode,
438                                                 full_path, NULL, inode->i_sb,
439                                                 xid);
440                         } /* else we are writing out data to server already
441                              and could deadlock if we tried to flush data, and
442                              since we do not know if we have data that would
443                              invalidate the current end of file on the server
444                              we can not go to the server to get the new inod
445                              info */
446                         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
447                                 pCifsInode->clientCanCacheAll = TRUE;
448                                 pCifsInode->clientCanCacheRead = TRUE;
449                                 cFYI(1, ("Exclusive Oplock granted on inode %p",
450                                          file->f_path.dentry->d_inode));
451                         } else if ((oplock & 0xF) == OPLOCK_READ) {
452                                 pCifsInode->clientCanCacheRead = TRUE;
453                                 pCifsInode->clientCanCacheAll = FALSE;
454                         } else {
455                                 pCifsInode->clientCanCacheRead = FALSE;
456                                 pCifsInode->clientCanCacheAll = FALSE;
457                         }
458                         cifs_relock_file(pCifsFile);
459                 }
460         }
461
462         kfree(full_path);
463         FreeXid(xid);
464         return rc;
465 }
466
467 int cifs_close(struct inode *inode, struct file *file)
468 {
469         int rc = 0;
470         int xid;
471         struct cifs_sb_info *cifs_sb;
472         struct cifsTconInfo *pTcon;
473         struct cifsFileInfo *pSMBFile =
474                 (struct cifsFileInfo *)file->private_data;
475
476         xid = GetXid();
477
478         cifs_sb = CIFS_SB(inode->i_sb);
479         pTcon = cifs_sb->tcon;
480         if (pSMBFile) {
481                 struct cifsLockInfo *li, *tmp;
482
483                 pSMBFile->closePend = TRUE;
484                 if (pTcon) {
485                         /* no sense reconnecting to close a file that is
486                            already closed */
487                         if (pTcon->tidStatus != CifsNeedReconnect) {
488                                 int timeout = 2;
489                                 while((atomic_read(&pSMBFile->wrtPending) != 0)
490                                          && (timeout < 1000) ) {
491                                         /* Give write a better chance to get to
492                                         server ahead of the close.  We do not
493                                         want to add a wait_q here as it would
494                                         increase the memory utilization as
495                                         the struct would be in each open file,
496                                         but this should give enough time to 
497                                         clear the socket */
498 #ifdef CONFIG_CIFS_DEBUG2
499                                         cFYI(1,("close delay, write pending"));
500 #endif /* DEBUG2 */
501                                         msleep(timeout);
502                                         timeout *= 4;
503                                 }
504                                 if(atomic_read(&pSMBFile->wrtPending))
505                                         cERROR(1,("close with pending writes"));
506                                 rc = CIFSSMBClose(xid, pTcon,
507                                                   pSMBFile->netfid);
508                         }
509                 }
510
511                 /* Delete any outstanding lock records.
512                    We'll lose them when the file is closed anyway. */
513                 mutex_lock(&pSMBFile->lock_mutex);
514                 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
515                         list_del(&li->llist);
516                         kfree(li);
517                 }
518                 mutex_unlock(&pSMBFile->lock_mutex);
519
520                 write_lock(&GlobalSMBSeslock);
521                 list_del(&pSMBFile->flist);
522                 list_del(&pSMBFile->tlist);
523                 write_unlock(&GlobalSMBSeslock);
524                 kfree(pSMBFile->search_resume_name);
525                 kfree(file->private_data);
526                 file->private_data = NULL;
527         } else
528                 rc = -EBADF;
529
530         if (list_empty(&(CIFS_I(inode)->openFileList))) {
531                 cFYI(1, ("closing last open instance for inode %p", inode));
532                 /* if the file is not open we do not know if we can cache info
533                    on this inode, much less write behind and read ahead */
534                 CIFS_I(inode)->clientCanCacheRead = FALSE;
535                 CIFS_I(inode)->clientCanCacheAll  = FALSE;
536         }
537         if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
538                 rc = CIFS_I(inode)->write_behind_rc;
539         FreeXid(xid);
540         return rc;
541 }
542
543 int cifs_closedir(struct inode *inode, struct file *file)
544 {
545         int rc = 0;
546         int xid;
547         struct cifsFileInfo *pCFileStruct =
548             (struct cifsFileInfo *)file->private_data;
549         char *ptmp;
550
551         cFYI(1, ("Closedir inode = 0x%p", inode));
552
553         xid = GetXid();
554
555         if (pCFileStruct) {
556                 struct cifsTconInfo *pTcon;
557                 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
558
559                 pTcon = cifs_sb->tcon;
560
561                 cFYI(1, ("Freeing private data in close dir"));
562                 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
563                    (pCFileStruct->invalidHandle == FALSE)) {
564                         pCFileStruct->invalidHandle = TRUE;
565                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
566                         cFYI(1, ("Closing uncompleted readdir with rc %d",
567                                  rc));
568                         /* not much we can do if it fails anyway, ignore rc */
569                         rc = 0;
570                 }
571                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
572                 if (ptmp) {
573                         cFYI(1, ("closedir free smb buf in srch struct"));
574                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
575                         if(pCFileStruct->srch_inf.smallBuf)
576                                 cifs_small_buf_release(ptmp);
577                         else
578                                 cifs_buf_release(ptmp);
579                 }
580                 ptmp = pCFileStruct->search_resume_name;
581                 if (ptmp) {
582                         cFYI(1, ("closedir free resume name"));
583                         pCFileStruct->search_resume_name = NULL;
584                         kfree(ptmp);
585                 }
586                 kfree(file->private_data);
587                 file->private_data = NULL;
588         }
589         /* BB can we lock the filestruct while this is going on? */
590         FreeXid(xid);
591         return rc;
592 }
593
594 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
595                                 __u64 offset, __u8 lockType)
596 {
597         struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
598         if (li == NULL)
599                 return -ENOMEM;
600         li->offset = offset;
601         li->length = len;
602         li->type = lockType;
603         mutex_lock(&fid->lock_mutex);
604         list_add(&li->llist, &fid->llist);
605         mutex_unlock(&fid->lock_mutex);
606         return 0;
607 }
608
609 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
610 {
611         int rc, xid;
612         __u32 numLock = 0;
613         __u32 numUnlock = 0;
614         __u64 length;
615         int wait_flag = FALSE;
616         struct cifs_sb_info *cifs_sb;
617         struct cifsTconInfo *pTcon;
618         __u16 netfid;
619         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
620         int posix_locking;
621
622         length = 1 + pfLock->fl_end - pfLock->fl_start;
623         rc = -EACCES;
624         xid = GetXid();
625
626         cFYI(1, ("Lock parm: 0x%x flockflags: "
627                  "0x%x flocktype: 0x%x start: %lld end: %lld",
628                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
629                 pfLock->fl_end));
630
631         if (pfLock->fl_flags & FL_POSIX)
632                 cFYI(1, ("Posix"));
633         if (pfLock->fl_flags & FL_FLOCK)
634                 cFYI(1, ("Flock"));
635         if (pfLock->fl_flags & FL_SLEEP) {
636                 cFYI(1, ("Blocking lock"));
637                 wait_flag = TRUE;
638         }
639         if (pfLock->fl_flags & FL_ACCESS)
640                 cFYI(1, ("Process suspended by mandatory locking - "
641                          "not implemented yet"));
642         if (pfLock->fl_flags & FL_LEASE)
643                 cFYI(1, ("Lease on file - not implemented yet"));
644         if (pfLock->fl_flags & 
645             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
646                 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
647
648         if (pfLock->fl_type == F_WRLCK) {
649                 cFYI(1, ("F_WRLCK "));
650                 numLock = 1;
651         } else if (pfLock->fl_type == F_UNLCK) {
652                 cFYI(1, ("F_UNLCK"));
653                 numUnlock = 1;
654                 /* Check if unlock includes more than
655                 one lock range */
656         } else if (pfLock->fl_type == F_RDLCK) {
657                 cFYI(1, ("F_RDLCK"));
658                 lockType |= LOCKING_ANDX_SHARED_LOCK;
659                 numLock = 1;
660         } else if (pfLock->fl_type == F_EXLCK) {
661                 cFYI(1, ("F_EXLCK"));
662                 numLock = 1;
663         } else if (pfLock->fl_type == F_SHLCK) {
664                 cFYI(1, ("F_SHLCK"));
665                 lockType |= LOCKING_ANDX_SHARED_LOCK;
666                 numLock = 1;
667         } else
668                 cFYI(1, ("Unknown type of lock"));
669
670         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
671         pTcon = cifs_sb->tcon;
672
673         if (file->private_data == NULL) {
674                 FreeXid(xid);
675                 return -EBADF;
676         }
677         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
678
679         posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
680                         (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
681
682         /* BB add code here to normalize offset and length to
683         account for negative length which we can not accept over the
684         wire */
685         if (IS_GETLK(cmd)) {
686                 if(posix_locking) {
687                         int posix_lock_type;
688                         if(lockType & LOCKING_ANDX_SHARED_LOCK)
689                                 posix_lock_type = CIFS_RDLCK;
690                         else
691                                 posix_lock_type = CIFS_WRLCK;
692                         rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
693                                         length, pfLock,
694                                         posix_lock_type, wait_flag);
695                         FreeXid(xid);
696                         return rc;
697                 }
698
699                 /* BB we could chain these into one lock request BB */
700                 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
701                                  0, 1, lockType, 0 /* wait flag */ );
702                 if (rc == 0) {
703                         rc = CIFSSMBLock(xid, pTcon, netfid, length, 
704                                          pfLock->fl_start, 1 /* numUnlock */ ,
705                                          0 /* numLock */ , lockType,
706                                          0 /* wait flag */ );
707                         pfLock->fl_type = F_UNLCK;
708                         if (rc != 0)
709                                 cERROR(1, ("Error unlocking previously locked "
710                                            "range %d during test of lock", rc));
711                         rc = 0;
712
713                 } else {
714                         /* if rc == ERR_SHARING_VIOLATION ? */
715                         rc = 0; /* do not change lock type to unlock
716                                    since range in use */
717                 }
718
719                 FreeXid(xid);
720                 return rc;
721         }
722
723         if (!numLock && !numUnlock) {
724                 /* if no lock or unlock then nothing
725                 to do since we do not know what it is */
726                 FreeXid(xid);
727                 return -EOPNOTSUPP;
728         }
729
730         if (posix_locking) {
731                 int posix_lock_type;
732                 if(lockType & LOCKING_ANDX_SHARED_LOCK)
733                         posix_lock_type = CIFS_RDLCK;
734                 else
735                         posix_lock_type = CIFS_WRLCK;
736                 
737                 if(numUnlock == 1)
738                         posix_lock_type = CIFS_UNLCK;
739
740                 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
741                                       length, pfLock,
742                                       posix_lock_type, wait_flag);
743         } else {
744                 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
745
746                 if (numLock) {
747                         rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
748                                         0, numLock, lockType, wait_flag);
749
750                         if (rc == 0) {
751                                 /* For Windows locks we must store them. */
752                                 rc = store_file_lock(fid, length,
753                                                 pfLock->fl_start, lockType);
754                         }
755                 } else if (numUnlock) {
756                         /* For each stored lock that this unlock overlaps
757                            completely, unlock it. */
758                         int stored_rc = 0;
759                         struct cifsLockInfo *li, *tmp;
760
761                         rc = 0;
762                         mutex_lock(&fid->lock_mutex);
763                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
764                                 if (pfLock->fl_start <= li->offset &&
765                                                 length >= li->length) {
766                                         stored_rc = CIFSSMBLock(xid, pTcon, netfid,
767                                                         li->length, li->offset,
768                                                         1, 0, li->type, FALSE);
769                                         if (stored_rc)
770                                                 rc = stored_rc;
771
772                                         list_del(&li->llist);
773                                         kfree(li);
774                                 }
775                         }
776                         mutex_unlock(&fid->lock_mutex);
777                 }
778         }
779
780         if (pfLock->fl_flags & FL_POSIX)
781                 posix_lock_file_wait(file, pfLock);
782         FreeXid(xid);
783         return rc;
784 }
785
786 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
787         size_t write_size, loff_t *poffset)
788 {
789         int rc = 0;
790         unsigned int bytes_written = 0;
791         unsigned int total_written;
792         struct cifs_sb_info *cifs_sb;
793         struct cifsTconInfo *pTcon;
794         int xid, long_op;
795         struct cifsFileInfo *open_file;
796
797         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
798
799         pTcon = cifs_sb->tcon;
800
801         /* cFYI(1,
802            (" write %d bytes to offset %lld of %s", write_size,
803            *poffset, file->f_path.dentry->d_name.name)); */
804
805         if (file->private_data == NULL)
806                 return -EBADF;
807         open_file = (struct cifsFileInfo *) file->private_data;
808         
809         xid = GetXid();
810
811         if (*poffset > file->f_path.dentry->d_inode->i_size)
812                 long_op = 2; /* writes past end of file can take a long time */
813         else
814                 long_op = 1;
815
816         for (total_written = 0; write_size > total_written;
817              total_written += bytes_written) {
818                 rc = -EAGAIN;
819                 while (rc == -EAGAIN) {
820                         if (file->private_data == NULL) {
821                                 /* file has been closed on us */
822                                 FreeXid(xid);
823                         /* if we have gotten here we have written some data
824                            and blocked, and the file has been freed on us while
825                            we blocked so return what we managed to write */
826                                 return total_written;
827                         } 
828                         if (open_file->closePend) {
829                                 FreeXid(xid);
830                                 if (total_written)
831                                         return total_written;
832                                 else
833                                         return -EBADF;
834                         }
835                         if (open_file->invalidHandle) {
836                                 /* we could deadlock if we called
837                                    filemap_fdatawait from here so tell
838                                    reopen_file not to flush data to server
839                                    now */
840                                 rc = cifs_reopen_file(file, FALSE);
841                                 if (rc != 0)
842                                         break;
843                         }
844
845                         rc = CIFSSMBWrite(xid, pTcon,
846                                 open_file->netfid,
847                                 min_t(const int, cifs_sb->wsize,
848                                       write_size - total_written),
849                                 *poffset, &bytes_written,
850                                 NULL, write_data + total_written, long_op);
851                 }
852                 if (rc || (bytes_written == 0)) {
853                         if (total_written)
854                                 break;
855                         else {
856                                 FreeXid(xid);
857                                 return rc;
858                         }
859                 } else
860                         *poffset += bytes_written;
861                 long_op = FALSE; /* subsequent writes fast -
862                                     15 seconds is plenty */
863         }
864
865         cifs_stats_bytes_written(pTcon, total_written);
866
867         /* since the write may have blocked check these pointers again */
868         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
869                 struct inode *inode = file->f_path.dentry->d_inode;
870 /* Do not update local mtime - server will set its actual value on write                
871  *              inode->i_ctime = inode->i_mtime = 
872  *                      current_fs_time(inode->i_sb);*/
873                 if (total_written > 0) {
874                         spin_lock(&inode->i_lock);
875                         if (*poffset > file->f_path.dentry->d_inode->i_size)
876                                 i_size_write(file->f_path.dentry->d_inode,
877                                         *poffset);
878                         spin_unlock(&inode->i_lock);
879                 }
880                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);    
881         }
882         FreeXid(xid);
883         return total_written;
884 }
885
886 static ssize_t cifs_write(struct file *file, const char *write_data,
887         size_t write_size, loff_t *poffset)
888 {
889         int rc = 0;
890         unsigned int bytes_written = 0;
891         unsigned int total_written;
892         struct cifs_sb_info *cifs_sb;
893         struct cifsTconInfo *pTcon;
894         int xid, long_op;
895         struct cifsFileInfo *open_file;
896
897         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
898
899         pTcon = cifs_sb->tcon;
900
901         cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
902            *poffset, file->f_path.dentry->d_name.name));
903
904         if (file->private_data == NULL)
905                 return -EBADF;
906         open_file = (struct cifsFileInfo *)file->private_data;
907         
908         xid = GetXid();
909
910         if (*poffset > file->f_path.dentry->d_inode->i_size)
911                 long_op = 2; /* writes past end of file can take a long time */
912         else
913                 long_op = 1;
914
915         for (total_written = 0; write_size > total_written;
916              total_written += bytes_written) {
917                 rc = -EAGAIN;
918                 while (rc == -EAGAIN) {
919                         if (file->private_data == NULL) {
920                                 /* file has been closed on us */
921                                 FreeXid(xid);
922                         /* if we have gotten here we have written some data
923                            and blocked, and the file has been freed on us
924                            while we blocked so return what we managed to 
925                            write */
926                                 return total_written;
927                         } 
928                         if (open_file->closePend) {
929                                 FreeXid(xid);
930                                 if (total_written)
931                                         return total_written;
932                                 else
933                                         return -EBADF;
934                         }
935                         if (open_file->invalidHandle) {
936                                 /* we could deadlock if we called
937                                    filemap_fdatawait from here so tell
938                                    reopen_file not to flush data to 
939                                    server now */
940                                 rc = cifs_reopen_file(file, FALSE);
941                                 if (rc != 0)
942                                         break;
943                         }
944                         if(experimEnabled || (pTcon->ses->server &&
945                                 ((pTcon->ses->server->secMode & 
946                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
947                                 == 0))) {
948                                 struct kvec iov[2];
949                                 unsigned int len;
950
951                                 len = min((size_t)cifs_sb->wsize,
952                                           write_size - total_written);
953                                 /* iov[0] is reserved for smb header */
954                                 iov[1].iov_base = (char *)write_data +
955                                                   total_written;
956                                 iov[1].iov_len = len;
957                                 rc = CIFSSMBWrite2(xid, pTcon,
958                                                 open_file->netfid, len,
959                                                 *poffset, &bytes_written,
960                                                 iov, 1, long_op);
961                         } else
962                                 rc = CIFSSMBWrite(xid, pTcon,
963                                          open_file->netfid,
964                                          min_t(const int, cifs_sb->wsize,
965                                                write_size - total_written),
966                                          *poffset, &bytes_written,
967                                          write_data + total_written,
968                                          NULL, long_op);
969                 }
970                 if (rc || (bytes_written == 0)) {
971                         if (total_written)
972                                 break;
973                         else {
974                                 FreeXid(xid);
975                                 return rc;
976                         }
977                 } else
978                         *poffset += bytes_written;
979                 long_op = FALSE; /* subsequent writes fast - 
980                                     15 seconds is plenty */
981         }
982
983         cifs_stats_bytes_written(pTcon, total_written);
984
985         /* since the write may have blocked check these pointers again */
986         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
987 /*BB We could make this contingent on superblock ATIME flag too */
988 /*              file->f_path.dentry->d_inode->i_ctime =
989                 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
990                 if (total_written > 0) {
991                         spin_lock(&file->f_path.dentry->d_inode->i_lock);
992                         if (*poffset > file->f_path.dentry->d_inode->i_size)
993                                 i_size_write(file->f_path.dentry->d_inode,
994                                              *poffset);
995                         spin_unlock(&file->f_path.dentry->d_inode->i_lock);
996                 }
997                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
998         }
999         FreeXid(xid);
1000         return total_written;
1001 }
1002
1003 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1004 {
1005         struct cifsFileInfo *open_file;
1006         int rc;
1007
1008         /* Having a null inode here (because mapping->host was set to zero by
1009         the VFS or MM) should not happen but we had reports of on oops (due to
1010         it being zero) during stress testcases so we need to check for it */
1011
1012         if(cifs_inode == NULL) {
1013                 cERROR(1,("Null inode passed to cifs_writeable_file"));
1014                 dump_stack();
1015                 return NULL;
1016         }
1017
1018         read_lock(&GlobalSMBSeslock);
1019         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1020                 if (open_file->closePend)
1021                         continue;
1022                 if (open_file->pfile &&
1023                     ((open_file->pfile->f_flags & O_RDWR) ||
1024                      (open_file->pfile->f_flags & O_WRONLY))) {
1025                         atomic_inc(&open_file->wrtPending);
1026                         read_unlock(&GlobalSMBSeslock);
1027                         if((open_file->invalidHandle) && 
1028                            (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1029                                 rc = cifs_reopen_file(open_file->pfile, FALSE);
1030                                 /* if it fails, try another handle - might be */
1031                                 /* dangerous to hold up writepages with retry */
1032                                 if(rc) {
1033                                         cFYI(1,("failed on reopen file in wp"));
1034                                         read_lock(&GlobalSMBSeslock);
1035                                         /* can not use this handle, no write
1036                                         pending on this one after all */
1037                                         atomic_dec
1038                                              (&open_file->wrtPending);
1039                                         continue;
1040                                 }
1041                         }
1042                         return open_file;
1043                 }
1044         }
1045         read_unlock(&GlobalSMBSeslock);
1046         return NULL;
1047 }
1048
1049 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1050 {
1051         struct address_space *mapping = page->mapping;
1052         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1053         char *write_data;
1054         int rc = -EFAULT;
1055         int bytes_written = 0;
1056         struct cifs_sb_info *cifs_sb;
1057         struct cifsTconInfo *pTcon;
1058         struct inode *inode;
1059         struct cifsFileInfo *open_file;
1060
1061         if (!mapping || !mapping->host)
1062                 return -EFAULT;
1063
1064         inode = page->mapping->host;
1065         cifs_sb = CIFS_SB(inode->i_sb);
1066         pTcon = cifs_sb->tcon;
1067
1068         offset += (loff_t)from;
1069         write_data = kmap(page);
1070         write_data += from;
1071
1072         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1073                 kunmap(page);
1074                 return -EIO;
1075         }
1076
1077         /* racing with truncate? */
1078         if (offset > mapping->host->i_size) {
1079                 kunmap(page);
1080                 return 0; /* don't care */
1081         }
1082
1083         /* check to make sure that we are not extending the file */
1084         if (mapping->host->i_size - offset < (loff_t)to)
1085                 to = (unsigned)(mapping->host->i_size - offset); 
1086
1087         open_file = find_writable_file(CIFS_I(mapping->host));
1088         if (open_file) {
1089                 bytes_written = cifs_write(open_file->pfile, write_data,
1090                                            to-from, &offset);
1091                 atomic_dec(&open_file->wrtPending);
1092                 /* Does mm or vfs already set times? */
1093                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1094                 if ((bytes_written > 0) && (offset)) {
1095                         rc = 0;
1096                 } else if (bytes_written < 0) {
1097                         if (rc != -EBADF)
1098                                 rc = bytes_written;
1099                 }
1100         } else {
1101                 cFYI(1, ("No writeable filehandles for inode"));
1102                 rc = -EIO;
1103         }
1104
1105         kunmap(page);
1106         return rc;
1107 }
1108
1109 static int cifs_writepages(struct address_space *mapping,
1110                            struct writeback_control *wbc)
1111 {
1112         struct backing_dev_info *bdi = mapping->backing_dev_info;
1113         unsigned int bytes_to_write;
1114         unsigned int bytes_written;
1115         struct cifs_sb_info *cifs_sb;
1116         int done = 0;
1117         pgoff_t end;
1118         pgoff_t index;
1119         int range_whole = 0;
1120         struct kvec * iov;
1121         int len;
1122         int n_iov = 0;
1123         pgoff_t next;
1124         int nr_pages;
1125         __u64 offset = 0;
1126         struct cifsFileInfo *open_file;
1127         struct page *page;
1128         struct pagevec pvec;
1129         int rc = 0;
1130         int scanned = 0;
1131         int xid;
1132
1133         cifs_sb = CIFS_SB(mapping->host->i_sb);
1134         
1135         /*
1136          * If wsize is smaller that the page cache size, default to writing
1137          * one page at a time via cifs_writepage
1138          */
1139         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1140                 return generic_writepages(mapping, wbc);
1141
1142         if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1143                 if(cifs_sb->tcon->ses->server->secMode &
1144                           (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1145                         if(!experimEnabled) 
1146                                 return generic_writepages(mapping, wbc);
1147
1148         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1149         if(iov == NULL)
1150                 return generic_writepages(mapping, wbc);
1151
1152
1153         /*
1154          * BB: Is this meaningful for a non-block-device file system?
1155          * If it is, we should test it again after we do I/O
1156          */
1157         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1158                 wbc->encountered_congestion = 1;
1159                 kfree(iov);
1160                 return 0;
1161         }
1162
1163         xid = GetXid();
1164
1165         pagevec_init(&pvec, 0);
1166         if (wbc->range_cyclic) {
1167                 index = mapping->writeback_index; /* Start from prev offset */
1168                 end = -1;
1169         } else {
1170                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1171                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1172                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1173                         range_whole = 1;
1174                 scanned = 1;
1175         }
1176 retry:
1177         while (!done && (index <= end) &&
1178                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1179                         PAGECACHE_TAG_DIRTY,
1180                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1181                 int first;
1182                 unsigned int i;
1183
1184                 first = -1;
1185                 next = 0;
1186                 n_iov = 0;
1187                 bytes_to_write = 0;
1188
1189                 for (i = 0; i < nr_pages; i++) {
1190                         page = pvec.pages[i];
1191                         /*
1192                          * At this point we hold neither mapping->tree_lock nor
1193                          * lock on the page itself: the page may be truncated or
1194                          * invalidated (changing page->mapping to NULL), or even
1195                          * swizzled back from swapper_space to tmpfs file
1196                          * mapping
1197                          */
1198
1199                         if (first < 0)
1200                                 lock_page(page);
1201                         else if (TestSetPageLocked(page))
1202                                 break;
1203
1204                         if (unlikely(page->mapping != mapping)) {
1205                                 unlock_page(page);
1206                                 break;
1207                         }
1208
1209                         if (!wbc->range_cyclic && page->index > end) {
1210                                 done = 1;
1211                                 unlock_page(page);
1212                                 break;
1213                         }
1214
1215                         if (next && (page->index != next)) {
1216                                 /* Not next consecutive page */
1217                                 unlock_page(page);
1218                                 break;
1219                         }
1220
1221                         if (wbc->sync_mode != WB_SYNC_NONE)
1222                                 wait_on_page_writeback(page);
1223
1224                         if (PageWriteback(page) ||
1225                                         !clear_page_dirty_for_io(page)) {
1226                                 unlock_page(page);
1227                                 break;
1228                         }
1229
1230                         /*
1231                          * This actually clears the dirty bit in the radix tree.
1232                          * See cifs_writepage() for more commentary.
1233                          */
1234                         set_page_writeback(page);
1235
1236                         if (page_offset(page) >= mapping->host->i_size) {
1237                                 done = 1;
1238                                 unlock_page(page);
1239                                 end_page_writeback(page);
1240                                 break;
1241                         }
1242
1243                         /*
1244                          * BB can we get rid of this?  pages are held by pvec
1245                          */
1246                         page_cache_get(page);
1247
1248                         len = min(mapping->host->i_size - page_offset(page),
1249                                   (loff_t)PAGE_CACHE_SIZE);
1250
1251                         /* reserve iov[0] for the smb header */
1252                         n_iov++;
1253                         iov[n_iov].iov_base = kmap(page);
1254                         iov[n_iov].iov_len = len;
1255                         bytes_to_write += len;
1256
1257                         if (first < 0) {
1258                                 first = i;
1259                                 offset = page_offset(page);
1260                         }
1261                         next = page->index + 1;
1262                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1263                                 break;
1264                 }
1265                 if (n_iov) {
1266                         /* Search for a writable handle every time we call
1267                          * CIFSSMBWrite2.  We can't rely on the last handle
1268                          * we used to still be valid
1269                          */
1270                         open_file = find_writable_file(CIFS_I(mapping->host));
1271                         if (!open_file) {
1272                                 cERROR(1, ("No writable handles for inode"));
1273                                 rc = -EBADF;
1274                         } else {
1275                                 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1276                                                    open_file->netfid,
1277                                                    bytes_to_write, offset,
1278                                                    &bytes_written, iov, n_iov,
1279                                                    1);
1280                                 atomic_dec(&open_file->wrtPending);
1281                                 if (rc || bytes_written < bytes_to_write) {
1282                                         cERROR(1,("Write2 ret %d, written = %d",
1283                                                   rc, bytes_written));
1284                                         /* BB what if continued retry is
1285                                            requested via mount flags? */
1286                                         set_bit(AS_EIO, &mapping->flags);
1287                                 } else {
1288                                         cifs_stats_bytes_written(cifs_sb->tcon,
1289                                                                  bytes_written);
1290                                 }
1291                         }
1292                         for (i = 0; i < n_iov; i++) {
1293                                 page = pvec.pages[first + i];
1294                                 /* Should we also set page error on
1295                                 success rc but too little data written? */
1296                                 /* BB investigate retry logic on temporary
1297                                 server crash cases and how recovery works
1298                                 when page marked as error */ 
1299                                 if(rc)
1300                                         SetPageError(page);
1301                                 kunmap(page);
1302                                 unlock_page(page);
1303                                 end_page_writeback(page);
1304                                 page_cache_release(page);
1305                         }
1306                         if ((wbc->nr_to_write -= n_iov) <= 0)
1307                                 done = 1;
1308                         index = next;
1309                 }
1310                 pagevec_release(&pvec);
1311         }
1312         if (!scanned && !done) {
1313                 /*
1314                  * We hit the last page and there is more work to be done: wrap
1315                  * back to the start of the file
1316                  */
1317                 scanned = 1;
1318                 index = 0;
1319                 goto retry;
1320         }
1321         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1322                 mapping->writeback_index = index;
1323
1324         FreeXid(xid);
1325         kfree(iov);
1326         return rc;
1327 }
1328
1329 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1330 {
1331         int rc = -EFAULT;
1332         int xid;
1333
1334         xid = GetXid();
1335 /* BB add check for wbc flags */
1336         page_cache_get(page);
1337         if (!PageUptodate(page)) {
1338                 cFYI(1, ("ppw - page not up to date"));
1339         }
1340
1341         /*
1342          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1343          *
1344          * A writepage() implementation always needs to do either this,
1345          * or re-dirty the page with "redirty_page_for_writepage()" in
1346          * the case of a failure.
1347          *
1348          * Just unlocking the page will cause the radix tree tag-bits
1349          * to fail to update with the state of the page correctly.
1350          */
1351         set_page_writeback(page);               
1352         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1353         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1354         unlock_page(page);
1355         end_page_writeback(page);
1356         page_cache_release(page);
1357         FreeXid(xid);
1358         return rc;
1359 }
1360
1361 static int cifs_commit_write(struct file *file, struct page *page,
1362         unsigned offset, unsigned to)
1363 {
1364         int xid;
1365         int rc = 0;
1366         struct inode *inode = page->mapping->host;
1367         loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1368         char *page_data;
1369
1370         xid = GetXid();
1371         cFYI(1, ("commit write for page %p up to position %lld for %d", 
1372                  page, position, to));
1373         spin_lock(&inode->i_lock);
1374         if (position > inode->i_size) {
1375                 i_size_write(inode, position);
1376         }
1377         spin_unlock(&inode->i_lock);
1378         if (!PageUptodate(page)) {
1379                 position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1380                 /* can not rely on (or let) writepage write this data */
1381                 if (to < offset) {
1382                         cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1383                                 offset, to));
1384                         FreeXid(xid);
1385                         return rc;
1386                 }
1387                 /* this is probably better than directly calling
1388                    partialpage_write since in this function the file handle is
1389                    known which we might as well leverage */
1390                 /* BB check if anything else missing out of ppw
1391                    such as updating last write time */
1392                 page_data = kmap(page);
1393                 rc = cifs_write(file, page_data + offset, to-offset,
1394                                 &position);
1395                 if (rc > 0)
1396                         rc = 0;
1397                 /* else if (rc < 0) should we set writebehind rc? */
1398                 kunmap(page);
1399         } else {        
1400                 set_page_dirty(page);
1401         }
1402
1403         FreeXid(xid);
1404         return rc;
1405 }
1406
1407 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1408 {
1409         int xid;
1410         int rc = 0;
1411         struct inode *inode = file->f_path.dentry->d_inode;
1412
1413         xid = GetXid();
1414
1415         cFYI(1, ("Sync file - name: %s datasync: 0x%x", 
1416                 dentry->d_name.name, datasync));
1417         
1418         rc = filemap_fdatawrite(inode->i_mapping);
1419         if (rc == 0)
1420                 CIFS_I(inode)->write_behind_rc = 0;
1421         FreeXid(xid);
1422         return rc;
1423 }
1424
1425 /* static void cifs_sync_page(struct page *page)
1426 {
1427         struct address_space *mapping;
1428         struct inode *inode;
1429         unsigned long index = page->index;
1430         unsigned int rpages = 0;
1431         int rc = 0;
1432
1433         cFYI(1, ("sync page %p",page));
1434         mapping = page->mapping;
1435         if (!mapping)
1436                 return 0;
1437         inode = mapping->host;
1438         if (!inode)
1439                 return; */
1440
1441 /*      fill in rpages then 
1442         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1443
1444 /*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1445
1446 #if 0
1447         if (rc < 0)
1448                 return rc;
1449         return 0;
1450 #endif
1451 } */
1452
1453 /*
1454  * As file closes, flush all cached write data for this inode checking
1455  * for write behind errors.
1456  */
1457 int cifs_flush(struct file *file, fl_owner_t id)
1458 {
1459         struct inode * inode = file->f_path.dentry->d_inode;
1460         int rc = 0;
1461
1462         /* Rather than do the steps manually:
1463            lock the inode for writing
1464            loop through pages looking for write behind data (dirty pages)
1465            coalesce into contiguous 16K (or smaller) chunks to write to server
1466            send to server (prefer in parallel)
1467            deal with writebehind errors
1468            unlock inode for writing
1469            filemapfdatawrite appears easier for the time being */
1470
1471         rc = filemap_fdatawrite(inode->i_mapping);
1472         if (!rc) /* reset wb rc if we were able to write out dirty pages */
1473                 CIFS_I(inode)->write_behind_rc = 0;
1474                 
1475         cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1476
1477         return rc;
1478 }
1479
1480 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1481         size_t read_size, loff_t *poffset)
1482 {
1483         int rc = -EACCES;
1484         unsigned int bytes_read = 0;
1485         unsigned int total_read = 0;
1486         unsigned int current_read_size;
1487         struct cifs_sb_info *cifs_sb;
1488         struct cifsTconInfo *pTcon;
1489         int xid;
1490         struct cifsFileInfo *open_file;
1491         char *smb_read_data;
1492         char __user *current_offset;
1493         struct smb_com_read_rsp *pSMBr;
1494
1495         xid = GetXid();
1496         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1497         pTcon = cifs_sb->tcon;
1498
1499         if (file->private_data == NULL) {
1500                 FreeXid(xid);
1501                 return -EBADF;
1502         }
1503         open_file = (struct cifsFileInfo *)file->private_data;
1504
1505         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1506                 cFYI(1, ("attempting read on write only file instance"));
1507         }
1508         for (total_read = 0, current_offset = read_data;
1509              read_size > total_read;
1510              total_read += bytes_read, current_offset += bytes_read) {
1511                 current_read_size = min_t(const int, read_size - total_read, 
1512                                           cifs_sb->rsize);
1513                 rc = -EAGAIN;
1514                 smb_read_data = NULL;
1515                 while (rc == -EAGAIN) {
1516                         int buf_type = CIFS_NO_BUFFER;
1517                         if ((open_file->invalidHandle) && 
1518                             (!open_file->closePend)) {
1519                                 rc = cifs_reopen_file(file, TRUE);
1520                                 if (rc != 0)
1521                                         break;
1522                         }
1523                         rc = CIFSSMBRead(xid, pTcon,
1524                                          open_file->netfid,
1525                                          current_read_size, *poffset,
1526                                          &bytes_read, &smb_read_data,
1527                                          &buf_type);
1528                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1529                         if (smb_read_data) {
1530                                 if (copy_to_user(current_offset,
1531                                                 smb_read_data +
1532                                                 4 /* RFC1001 length field */ +
1533                                                 le16_to_cpu(pSMBr->DataOffset),
1534                                                 bytes_read)) {
1535                                         rc = -EFAULT;
1536                                 }
1537
1538                                 if(buf_type == CIFS_SMALL_BUFFER)
1539                                         cifs_small_buf_release(smb_read_data);
1540                                 else if(buf_type == CIFS_LARGE_BUFFER)
1541                                         cifs_buf_release(smb_read_data);
1542                                 smb_read_data = NULL;
1543                         }
1544                 }
1545                 if (rc || (bytes_read == 0)) {
1546                         if (total_read) {
1547                                 break;
1548                         } else {
1549                                 FreeXid(xid);
1550                                 return rc;
1551                         }
1552                 } else {
1553                         cifs_stats_bytes_read(pTcon, bytes_read);
1554                         *poffset += bytes_read;
1555                 }
1556         }
1557         FreeXid(xid);
1558         return total_read;
1559 }
1560
1561
1562 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1563         loff_t *poffset)
1564 {
1565         int rc = -EACCES;
1566         unsigned int bytes_read = 0;
1567         unsigned int total_read;
1568         unsigned int current_read_size;
1569         struct cifs_sb_info *cifs_sb;
1570         struct cifsTconInfo *pTcon;
1571         int xid;
1572         char *current_offset;
1573         struct cifsFileInfo *open_file;
1574         int buf_type = CIFS_NO_BUFFER;
1575
1576         xid = GetXid();
1577         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1578         pTcon = cifs_sb->tcon;
1579
1580         if (file->private_data == NULL) {
1581                 FreeXid(xid);
1582                 return -EBADF;
1583         }
1584         open_file = (struct cifsFileInfo *)file->private_data;
1585
1586         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1587                 cFYI(1, ("attempting read on write only file instance"));
1588
1589         for (total_read = 0, current_offset = read_data; 
1590              read_size > total_read;
1591              total_read += bytes_read, current_offset += bytes_read) {
1592                 current_read_size = min_t(const int, read_size - total_read,
1593                                           cifs_sb->rsize);
1594                 /* For windows me and 9x we do not want to request more
1595                 than it negotiated since it will refuse the read then */
1596                 if((pTcon->ses) && 
1597                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1598                         current_read_size = min_t(const int, current_read_size,
1599                                         pTcon->ses->server->maxBuf - 128);
1600                 }
1601                 rc = -EAGAIN;
1602                 while (rc == -EAGAIN) {
1603                         if ((open_file->invalidHandle) && 
1604                             (!open_file->closePend)) {
1605                                 rc = cifs_reopen_file(file, TRUE);
1606                                 if (rc != 0)
1607                                         break;
1608                         }
1609                         rc = CIFSSMBRead(xid, pTcon,
1610                                          open_file->netfid,
1611                                          current_read_size, *poffset,
1612                                          &bytes_read, &current_offset,
1613                                          &buf_type);
1614                 }
1615                 if (rc || (bytes_read == 0)) {
1616                         if (total_read) {
1617                                 break;
1618                         } else {
1619                                 FreeXid(xid);
1620                                 return rc;
1621                         }
1622                 } else {
1623                         cifs_stats_bytes_read(pTcon, total_read);
1624                         *poffset += bytes_read;
1625                 }
1626         }
1627         FreeXid(xid);
1628         return total_read;
1629 }
1630
1631 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1632 {
1633         struct dentry *dentry = file->f_path.dentry;
1634         int rc, xid;
1635
1636         xid = GetXid();
1637         rc = cifs_revalidate(dentry);
1638         if (rc) {
1639                 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1640                 FreeXid(xid);
1641                 return rc;
1642         }
1643         rc = generic_file_mmap(file, vma);
1644         FreeXid(xid);
1645         return rc;
1646 }
1647
1648
1649 static void cifs_copy_cache_pages(struct address_space *mapping, 
1650         struct list_head *pages, int bytes_read, char *data,
1651         struct pagevec *plru_pvec)
1652 {
1653         struct page *page;
1654         char *target;
1655
1656         while (bytes_read > 0) {
1657                 if (list_empty(pages))
1658                         break;
1659
1660                 page = list_entry(pages->prev, struct page, lru);
1661                 list_del(&page->lru);
1662
1663                 if (add_to_page_cache(page, mapping, page->index,
1664                                       GFP_KERNEL)) {
1665                         page_cache_release(page);
1666                         cFYI(1, ("Add page cache failed"));
1667                         data += PAGE_CACHE_SIZE;
1668                         bytes_read -= PAGE_CACHE_SIZE;
1669                         continue;
1670                 }
1671
1672                 target = kmap_atomic(page,KM_USER0);
1673
1674                 if (PAGE_CACHE_SIZE > bytes_read) {
1675                         memcpy(target, data, bytes_read);
1676                         /* zero the tail end of this partial page */
1677                         memset(target + bytes_read, 0, 
1678                                PAGE_CACHE_SIZE - bytes_read);
1679                         bytes_read = 0;
1680                 } else {
1681                         memcpy(target, data, PAGE_CACHE_SIZE);
1682                         bytes_read -= PAGE_CACHE_SIZE;
1683                 }
1684                 kunmap_atomic(target, KM_USER0);
1685
1686                 flush_dcache_page(page);
1687                 SetPageUptodate(page);
1688                 unlock_page(page);
1689                 if (!pagevec_add(plru_pvec, page))
1690                         __pagevec_lru_add(plru_pvec);
1691                 data += PAGE_CACHE_SIZE;
1692         }
1693         return;
1694 }
1695
1696 static int cifs_readpages(struct file *file, struct address_space *mapping,
1697         struct list_head *page_list, unsigned num_pages)
1698 {
1699         int rc = -EACCES;
1700         int xid;
1701         loff_t offset;
1702         struct page *page;
1703         struct cifs_sb_info *cifs_sb;
1704         struct cifsTconInfo *pTcon;
1705         int bytes_read = 0;
1706         unsigned int read_size,i;
1707         char *smb_read_data = NULL;
1708         struct smb_com_read_rsp *pSMBr;
1709         struct pagevec lru_pvec;
1710         struct cifsFileInfo *open_file;
1711         int buf_type = CIFS_NO_BUFFER;
1712
1713         xid = GetXid();
1714         if (file->private_data == NULL) {
1715                 FreeXid(xid);
1716                 return -EBADF;
1717         }
1718         open_file = (struct cifsFileInfo *)file->private_data;
1719         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1720         pTcon = cifs_sb->tcon;
1721
1722         pagevec_init(&lru_pvec, 0);
1723
1724         for (i = 0; i < num_pages; ) {
1725                 unsigned contig_pages;
1726                 struct page *tmp_page;
1727                 unsigned long expected_index;
1728
1729                 if (list_empty(page_list))
1730                         break;
1731
1732                 page = list_entry(page_list->prev, struct page, lru);
1733                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1734
1735                 /* count adjacent pages that we will read into */
1736                 contig_pages = 0;
1737                 expected_index = 
1738                         list_entry(page_list->prev, struct page, lru)->index;
1739                 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1740                         if (tmp_page->index == expected_index) {
1741                                 contig_pages++;
1742                                 expected_index++;
1743                         } else
1744                                 break; 
1745                 }
1746                 if (contig_pages + i >  num_pages)
1747                         contig_pages = num_pages - i;
1748
1749                 /* for reads over a certain size could initiate async
1750                    read ahead */
1751
1752                 read_size = contig_pages * PAGE_CACHE_SIZE;
1753                 /* Read size needs to be in multiples of one page */
1754                 read_size = min_t(const unsigned int, read_size,
1755                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1756
1757                 rc = -EAGAIN;
1758                 while (rc == -EAGAIN) {
1759                         if ((open_file->invalidHandle) && 
1760                             (!open_file->closePend)) {
1761                                 rc = cifs_reopen_file(file, TRUE);
1762                                 if (rc != 0)
1763                                         break;
1764                         }
1765
1766                         rc = CIFSSMBRead(xid, pTcon,
1767                                          open_file->netfid,
1768                                          read_size, offset,
1769                                          &bytes_read, &smb_read_data,
1770                                          &buf_type);
1771                         /* BB more RC checks ? */
1772                         if (rc== -EAGAIN) {
1773                                 if (smb_read_data) {
1774                                         if(buf_type == CIFS_SMALL_BUFFER)
1775                                                 cifs_small_buf_release(smb_read_data);
1776                                         else if(buf_type == CIFS_LARGE_BUFFER)
1777                                                 cifs_buf_release(smb_read_data);
1778                                         smb_read_data = NULL;
1779                                 }
1780                         }
1781                 }
1782                 if ((rc < 0) || (smb_read_data == NULL)) {
1783                         cFYI(1, ("Read error in readpages: %d", rc));
1784                         break;
1785                 } else if (bytes_read > 0) {
1786                         task_io_account_read(bytes_read);
1787                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1788                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1789                                 smb_read_data + 4 /* RFC1001 hdr */ +
1790                                 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1791
1792                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1793                         cifs_stats_bytes_read(pTcon, bytes_read);
1794                         if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1795                                 i++; /* account for partial page */
1796
1797                                 /* server copy of file can have smaller size 
1798                                    than client */
1799                                 /* BB do we need to verify this common case ? 
1800                                    this case is ok - if we are at server EOF 
1801                                    we will hit it on next read */
1802
1803                                 /* break; */
1804                         }
1805                 } else {
1806                         cFYI(1, ("No bytes read (%d) at offset %lld . "
1807                                  "Cleaning remaining pages from readahead list",
1808                                  bytes_read, offset));
1809                         /* BB turn off caching and do new lookup on 
1810                            file size at server? */
1811                         break;
1812                 }
1813                 if (smb_read_data) {
1814                         if(buf_type == CIFS_SMALL_BUFFER)
1815                                 cifs_small_buf_release(smb_read_data);
1816                         else if(buf_type == CIFS_LARGE_BUFFER)
1817                                 cifs_buf_release(smb_read_data);
1818                         smb_read_data = NULL;
1819                 }
1820                 bytes_read = 0;
1821         }
1822
1823         pagevec_lru_add(&lru_pvec);
1824
1825 /* need to free smb_read_data buf before exit */
1826         if (smb_read_data) {
1827                 if(buf_type == CIFS_SMALL_BUFFER)
1828                         cifs_small_buf_release(smb_read_data);
1829                 else if(buf_type == CIFS_LARGE_BUFFER)
1830                         cifs_buf_release(smb_read_data);
1831                 smb_read_data = NULL;
1832         } 
1833
1834         FreeXid(xid);
1835         return rc;
1836 }
1837
1838 static int cifs_readpage_worker(struct file *file, struct page *page,
1839         loff_t *poffset)
1840 {
1841         char *read_data;
1842         int rc;
1843
1844         page_cache_get(page);
1845         read_data = kmap(page);
1846         /* for reads over a certain size could initiate async read ahead */
1847                                                                                                                            
1848         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1849                                                                                                                            
1850         if (rc < 0)
1851                 goto io_error;
1852         else
1853                 cFYI(1, ("Bytes read %d",rc));
1854                                                                                                                            
1855         file->f_path.dentry->d_inode->i_atime =
1856                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1857                                                                                                                            
1858         if (PAGE_CACHE_SIZE > rc)
1859                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1860
1861         flush_dcache_page(page);
1862         SetPageUptodate(page);
1863         rc = 0;
1864                                                                                                                            
1865 io_error:
1866         kunmap(page);
1867         page_cache_release(page);
1868         return rc;
1869 }
1870
1871 static int cifs_readpage(struct file *file, struct page *page)
1872 {
1873         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1874         int rc = -EACCES;
1875         int xid;
1876
1877         xid = GetXid();
1878
1879         if (file->private_data == NULL) {
1880                 FreeXid(xid);
1881                 return -EBADF;
1882         }
1883
1884         cFYI(1, ("readpage %p at offset %d 0x%x\n", 
1885                  page, (int)offset, (int)offset));
1886
1887         rc = cifs_readpage_worker(file, page, &offset);
1888
1889         unlock_page(page);
1890
1891         FreeXid(xid);
1892         return rc;
1893 }
1894
1895 /* We do not want to update the file size from server for inodes
1896    open for write - to avoid races with writepage extending
1897    the file - in the future we could consider allowing
1898    refreshing the inode only on increases in the file size 
1899    but this is tricky to do without racing with writebehind
1900    page caching in the current Linux kernel design */
1901 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1902 {
1903         struct cifsFileInfo *open_file = NULL;
1904
1905         if (cifsInode)
1906                 open_file =  find_writable_file(cifsInode);
1907  
1908         if(open_file) {
1909                 struct cifs_sb_info *cifs_sb;
1910
1911                 /* there is not actually a write pending so let
1912                 this handle go free and allow it to
1913                 be closable if needed */
1914                 atomic_dec(&open_file->wrtPending);
1915
1916                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1917                 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1918                         /* since no page cache to corrupt on directio 
1919                         we can change size safely */
1920                         return 1;
1921                 }
1922
1923                 if(i_size_read(&cifsInode->vfs_inode) < end_of_file)
1924                         return 1;
1925
1926                 return 0;
1927         } else
1928                 return 1;
1929 }
1930
1931 static int cifs_prepare_write(struct file *file, struct page *page,
1932         unsigned from, unsigned to)
1933 {
1934         int rc = 0;
1935         loff_t i_size;
1936         loff_t offset;
1937
1938         cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1939         if (PageUptodate(page))
1940                 return 0;
1941
1942         /* If we are writing a full page it will be up to date,
1943            no need to read from the server */
1944         if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
1945                 SetPageUptodate(page);
1946                 return 0;
1947         }
1948
1949         offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1950         i_size = i_size_read(page->mapping->host);
1951
1952         if ((offset >= i_size) ||
1953             ((from == 0) && (offset + to) >= i_size)) {
1954                 /*
1955                  * We don't need to read data beyond the end of the file.
1956                  * zero it, and set the page uptodate
1957                  */
1958                 void *kaddr = kmap_atomic(page, KM_USER0);
1959
1960                 if (from)
1961                         memset(kaddr, 0, from);
1962                 if (to < PAGE_CACHE_SIZE)
1963                         memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1964                 flush_dcache_page(page);
1965                 kunmap_atomic(kaddr, KM_USER0);
1966                 SetPageUptodate(page);
1967         } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1968                 /* might as well read a page, it is fast enough */
1969                 rc = cifs_readpage_worker(file, page, &offset);
1970         } else {
1971                 /* we could try using another file handle if there is one -
1972                    but how would we lock it to prevent close of that handle
1973                    racing with this read? In any case
1974                    this will be written out by commit_write so is fine */
1975         }
1976
1977         /* we do not need to pass errors back 
1978            e.g. if we do not have read access to the file 
1979            because cifs_commit_write will do the right thing.  -- shaggy */
1980
1981         return 0;
1982 }
1983
1984 const struct address_space_operations cifs_addr_ops = {
1985         .readpage = cifs_readpage,
1986         .readpages = cifs_readpages,
1987         .writepage = cifs_writepage,
1988         .writepages = cifs_writepages,
1989         .prepare_write = cifs_prepare_write,
1990         .commit_write = cifs_commit_write,
1991         .set_page_dirty = __set_page_dirty_nobuffers,
1992         /* .sync_page = cifs_sync_page, */
1993         /* .direct_IO = */
1994 };
1995
1996 /*
1997  * cifs_readpages requires the server to support a buffer large enough to
1998  * contain the header plus one complete page of data.  Otherwise, we need
1999  * to leave cifs_readpages out of the address space operations.
2000  */
2001 const struct address_space_operations cifs_addr_ops_smallbuf = {
2002         .readpage = cifs_readpage,
2003         .writepage = cifs_writepage,
2004         .writepages = cifs_writepages,
2005         .prepare_write = cifs_prepare_write,
2006         .commit_write = cifs_commit_write,
2007         .set_page_dirty = __set_page_dirty_nobuffers,
2008         /* .sync_page = cifs_sync_page, */
2009         /* .direct_IO = */
2010 };