SUNRPC: Replace svc_addr_u by sockaddr_storage
[linux-3.10.git] / fs / nfsd / nfs4state.c
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/sunrpc/svcauth_gss.h>
42 #include <linux/sunrpc/clnt.h>
43 #include "xdr4.h"
44 #include "vfs.h"
45
46 #define NFSDDBG_FACILITY                NFSDDBG_PROC
47
48 /* Globals */
49 time_t nfsd4_lease = 90;     /* default lease time */
50 time_t nfsd4_grace = 90;
51 static time_t boot_time;
52 static u32 current_ownerid = 1;
53 static u32 current_fileid = 1;
54 static u32 current_delegid = 1;
55 static stateid_t zerostateid;             /* bits all 0 */
56 static stateid_t onestateid;              /* bits all 1 */
57 static u64 current_sessionid = 1;
58
59 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
60 #define ONE_STATEID(stateid)  (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
61
62 /* forward declarations */
63 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
64
65 /* Locking: */
66
67 /* Currently used for almost all code touching nfsv4 state: */
68 static DEFINE_MUTEX(client_mutex);
69
70 /*
71  * Currently used for the del_recall_lru and file hash table.  In an
72  * effort to decrease the scope of the client_mutex, this spinlock may
73  * eventually cover more:
74  */
75 static DEFINE_SPINLOCK(recall_lock);
76
77 static struct kmem_cache *openowner_slab = NULL;
78 static struct kmem_cache *lockowner_slab = NULL;
79 static struct kmem_cache *file_slab = NULL;
80 static struct kmem_cache *stateid_slab = NULL;
81 static struct kmem_cache *deleg_slab = NULL;
82
83 void
84 nfs4_lock_state(void)
85 {
86         mutex_lock(&client_mutex);
87 }
88
89 void
90 nfs4_unlock_state(void)
91 {
92         mutex_unlock(&client_mutex);
93 }
94
95 static inline u32
96 opaque_hashval(const void *ptr, int nbytes)
97 {
98         unsigned char *cptr = (unsigned char *) ptr;
99
100         u32 x = 0;
101         while (nbytes--) {
102                 x *= 37;
103                 x += *cptr++;
104         }
105         return x;
106 }
107
108 static struct list_head del_recall_lru;
109
110 static inline void
111 put_nfs4_file(struct nfs4_file *fi)
112 {
113         if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
114                 list_del(&fi->fi_hash);
115                 spin_unlock(&recall_lock);
116                 iput(fi->fi_inode);
117                 kmem_cache_free(file_slab, fi);
118         }
119 }
120
121 static inline void
122 get_nfs4_file(struct nfs4_file *fi)
123 {
124         atomic_inc(&fi->fi_ref);
125 }
126
127 static int num_delegations;
128 unsigned int max_delegations;
129
130 /*
131  * Open owner state (share locks)
132  */
133
134 /* hash tables for open owners */
135 #define OPEN_OWNER_HASH_BITS              8
136 #define OPEN_OWNER_HASH_SIZE             (1 << OPEN_OWNER_HASH_BITS)
137 #define OPEN_OWNER_HASH_MASK             (OPEN_OWNER_HASH_SIZE - 1)
138
139 static unsigned int open_ownerid_hashval(const u32 id)
140 {
141         return id & OPEN_OWNER_HASH_MASK;
142 }
143
144 static unsigned int open_ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
145 {
146         unsigned int ret;
147
148         ret = opaque_hashval(ownername->data, ownername->len);
149         ret += clientid;
150         return ret & OPEN_OWNER_HASH_MASK;
151 }
152
153 static struct list_head open_ownerid_hashtbl[OPEN_OWNER_HASH_SIZE];
154 static struct list_head open_ownerstr_hashtbl[OPEN_OWNER_HASH_SIZE];
155
156 /* hash table for nfs4_file */
157 #define FILE_HASH_BITS                   8
158 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
159
160 /* hash table for (open)nfs4_ol_stateid */
161 #define STATEID_HASH_BITS              10
162 #define STATEID_HASH_SIZE              (1 << STATEID_HASH_BITS)
163 #define STATEID_HASH_MASK              (STATEID_HASH_SIZE - 1)
164
165 static unsigned int file_hashval(struct inode *ino)
166 {
167         /* XXX: why are we hashing on inode pointer, anyway? */
168         return hash_ptr(ino, FILE_HASH_BITS);
169 }
170
171 static unsigned int stateid_hashval(stateid_t *s)
172 {
173         return opaque_hashval(&s->si_opaque, sizeof(stateid_opaque_t)) & STATEID_HASH_MASK;
174 }
175
176 static struct list_head file_hashtbl[FILE_HASH_SIZE];
177 static struct list_head stateid_hashtbl[STATEID_HASH_SIZE];
178
179 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
180 {
181         BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
182         atomic_inc(&fp->fi_access[oflag]);
183 }
184
185 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
186 {
187         if (oflag == O_RDWR) {
188                 __nfs4_file_get_access(fp, O_RDONLY);
189                 __nfs4_file_get_access(fp, O_WRONLY);
190         } else
191                 __nfs4_file_get_access(fp, oflag);
192 }
193
194 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
195 {
196         if (fp->fi_fds[oflag]) {
197                 fput(fp->fi_fds[oflag]);
198                 fp->fi_fds[oflag] = NULL;
199         }
200 }
201
202 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
203 {
204         if (atomic_dec_and_test(&fp->fi_access[oflag])) {
205                 nfs4_file_put_fd(fp, O_RDWR);
206                 nfs4_file_put_fd(fp, oflag);
207         }
208 }
209
210 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
211 {
212         if (oflag == O_RDWR) {
213                 __nfs4_file_put_access(fp, O_RDONLY);
214                 __nfs4_file_put_access(fp, O_WRONLY);
215         } else
216                 __nfs4_file_put_access(fp, oflag);
217 }
218
219 static inline void hash_stid(struct nfs4_stid *stid)
220 {
221         stateid_t *s = &stid->sc_stateid;
222         unsigned int hashval;
223
224         hashval = stateid_hashval(s);
225         list_add(&stid->sc_hash, &stateid_hashtbl[hashval]);
226 }
227
228 static struct nfs4_delegation *
229 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
230 {
231         struct nfs4_delegation *dp;
232         struct nfs4_file *fp = stp->st_file;
233
234         dprintk("NFSD alloc_init_deleg\n");
235         /*
236          * Major work on the lease subsystem (for example, to support
237          * calbacks on stat) will be required before we can support
238          * write delegations properly.
239          */
240         if (type != NFS4_OPEN_DELEGATE_READ)
241                 return NULL;
242         if (fp->fi_had_conflict)
243                 return NULL;
244         if (num_delegations > max_delegations)
245                 return NULL;
246         dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
247         if (dp == NULL)
248                 return dp;
249         num_delegations++;
250         INIT_LIST_HEAD(&dp->dl_perfile);
251         INIT_LIST_HEAD(&dp->dl_perclnt);
252         INIT_LIST_HEAD(&dp->dl_recall_lru);
253         dp->dl_client = clp;
254         get_nfs4_file(fp);
255         dp->dl_file = fp;
256         dp->dl_type = type;
257         dp->dl_stid.sc_type = NFS4_DELEG_STID;
258         dp->dl_stid.sc_stateid.si_boot = boot_time;
259         dp->dl_stid.sc_stateid.si_stateownerid = current_delegid++;
260         dp->dl_stid.sc_stateid.si_fileid = 0;
261         dp->dl_stid.sc_stateid.si_generation = 1;
262         hash_stid(&dp->dl_stid);
263         fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
264         dp->dl_time = 0;
265         atomic_set(&dp->dl_count, 1);
266         INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
267         return dp;
268 }
269
270 void
271 nfs4_put_delegation(struct nfs4_delegation *dp)
272 {
273         if (atomic_dec_and_test(&dp->dl_count)) {
274                 dprintk("NFSD: freeing dp %p\n",dp);
275                 put_nfs4_file(dp->dl_file);
276                 kmem_cache_free(deleg_slab, dp);
277                 num_delegations--;
278         }
279 }
280
281 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
282 {
283         if (atomic_dec_and_test(&fp->fi_delegees)) {
284                 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
285                 fp->fi_lease = NULL;
286                 fput(fp->fi_deleg_file);
287                 fp->fi_deleg_file = NULL;
288         }
289 }
290
291 /* Called under the state lock. */
292 static void
293 unhash_delegation(struct nfs4_delegation *dp)
294 {
295         list_del_init(&dp->dl_stid.sc_hash);
296         list_del_init(&dp->dl_perclnt);
297         spin_lock(&recall_lock);
298         list_del_init(&dp->dl_perfile);
299         list_del_init(&dp->dl_recall_lru);
300         spin_unlock(&recall_lock);
301         nfs4_put_deleg_lease(dp->dl_file);
302         nfs4_put_delegation(dp);
303 }
304
305 /* 
306  * SETCLIENTID state 
307  */
308
309 /* client_lock protects the client lru list and session hash table */
310 static DEFINE_SPINLOCK(client_lock);
311
312 /* Hash tables for nfs4_clientid state */
313 #define CLIENT_HASH_BITS                 4
314 #define CLIENT_HASH_SIZE                (1 << CLIENT_HASH_BITS)
315 #define CLIENT_HASH_MASK                (CLIENT_HASH_SIZE - 1)
316
317 static unsigned int clientid_hashval(u32 id)
318 {
319         return id & CLIENT_HASH_MASK;
320 }
321
322 static unsigned int clientstr_hashval(const char *name)
323 {
324         return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
325 }
326
327 /*
328  * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
329  * used in reboot/reset lease grace period processing
330  *
331  * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
332  * setclientid_confirmed info. 
333  *
334  * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 
335  * setclientid info.
336  *
337  * client_lru holds client queue ordered by nfs4_client.cl_time
338  * for lease renewal.
339  *
340  * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
341  * for last close replay.
342  */
343 static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
344 static int reclaim_str_hashtbl_size = 0;
345 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
346 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
347 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
348 static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
349 static struct list_head client_lru;
350 static struct list_head close_lru;
351
352 /*
353  * We store the NONE, READ, WRITE, and BOTH bits separately in the
354  * st_{access,deny}_bmap field of the stateid, in order to track not
355  * only what share bits are currently in force, but also what
356  * combinations of share bits previous opens have used.  This allows us
357  * to enforce the recommendation of rfc 3530 14.2.19 that the server
358  * return an error if the client attempt to downgrade to a combination
359  * of share bits not explicable by closing some of its previous opens.
360  *
361  * XXX: This enforcement is actually incomplete, since we don't keep
362  * track of access/deny bit combinations; so, e.g., we allow:
363  *
364  *      OPEN allow read, deny write
365  *      OPEN allow both, deny none
366  *      DOWNGRADE allow read, deny none
367  *
368  * which we should reject.
369  */
370 static void
371 set_access(unsigned int *access, unsigned long bmap) {
372         int i;
373
374         *access = 0;
375         for (i = 1; i < 4; i++) {
376                 if (test_bit(i, &bmap))
377                         *access |= i;
378         }
379 }
380
381 static void
382 set_deny(unsigned int *deny, unsigned long bmap) {
383         int i;
384
385         *deny = 0;
386         for (i = 0; i < 4; i++) {
387                 if (test_bit(i, &bmap))
388                         *deny |= i ;
389         }
390 }
391
392 static int
393 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
394         unsigned int access, deny;
395
396         set_access(&access, stp->st_access_bmap);
397         set_deny(&deny, stp->st_deny_bmap);
398         if ((access & open->op_share_deny) || (deny & open->op_share_access))
399                 return 0;
400         return 1;
401 }
402
403 static int nfs4_access_to_omode(u32 access)
404 {
405         switch (access & NFS4_SHARE_ACCESS_BOTH) {
406         case NFS4_SHARE_ACCESS_READ:
407                 return O_RDONLY;
408         case NFS4_SHARE_ACCESS_WRITE:
409                 return O_WRONLY;
410         case NFS4_SHARE_ACCESS_BOTH:
411                 return O_RDWR;
412         }
413         BUG();
414 }
415
416 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
417 {
418         list_del(&stp->st_stid.sc_hash);
419         list_del(&stp->st_perfile);
420         list_del(&stp->st_perstateowner);
421 }
422
423 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
424 {
425         int i;
426
427         if (stp->st_access_bmap) {
428                 for (i = 1; i < 4; i++) {
429                         if (test_bit(i, &stp->st_access_bmap))
430                                 nfs4_file_put_access(stp->st_file,
431                                                 nfs4_access_to_omode(i));
432                         __clear_bit(i, &stp->st_access_bmap);
433                 }
434         }
435         put_nfs4_file(stp->st_file);
436         stp->st_file = NULL;
437 }
438
439 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
440 {
441         close_generic_stateid(stp);
442         kmem_cache_free(stateid_slab, stp);
443 }
444
445 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
446 {
447         struct file *file;
448
449         unhash_generic_stateid(stp);
450         file = find_any_file(stp->st_file);
451         if (file)
452                 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
453         free_generic_stateid(stp);
454 }
455
456 static void unhash_lockowner(struct nfs4_lockowner *lo)
457 {
458         struct nfs4_ol_stateid *stp;
459
460         list_del(&lo->lo_owner.so_idhash);
461         list_del(&lo->lo_owner.so_strhash);
462         list_del(&lo->lo_perstateid);
463         while (!list_empty(&lo->lo_owner.so_stateids)) {
464                 stp = list_first_entry(&lo->lo_owner.so_stateids,
465                                 struct nfs4_ol_stateid, st_perstateowner);
466                 release_lock_stateid(stp);
467         }
468 }
469
470 static void release_lockowner(struct nfs4_lockowner *lo)
471 {
472         unhash_lockowner(lo);
473         nfs4_free_lockowner(lo);
474 }
475
476 static void
477 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
478 {
479         struct nfs4_lockowner *lo;
480
481         while (!list_empty(&open_stp->st_lockowners)) {
482                 lo = list_entry(open_stp->st_lockowners.next,
483                                 struct nfs4_lockowner, lo_perstateid);
484                 release_lockowner(lo);
485         }
486 }
487
488 static void release_open_stateid(struct nfs4_ol_stateid *stp)
489 {
490         unhash_generic_stateid(stp);
491         release_stateid_lockowners(stp);
492         free_generic_stateid(stp);
493 }
494
495 static void unhash_openowner(struct nfs4_openowner *oo)
496 {
497         struct nfs4_ol_stateid *stp;
498
499         list_del(&oo->oo_owner.so_idhash);
500         list_del(&oo->oo_owner.so_strhash);
501         list_del(&oo->oo_perclient);
502         while (!list_empty(&oo->oo_owner.so_stateids)) {
503                 stp = list_first_entry(&oo->oo_owner.so_stateids,
504                                 struct nfs4_ol_stateid, st_perstateowner);
505                 release_open_stateid(stp);
506         }
507 }
508
509 static void release_openowner(struct nfs4_openowner *oo)
510 {
511         unhash_openowner(oo);
512         list_del(&oo->oo_close_lru);
513         nfs4_free_openowner(oo);
514 }
515
516 #define SESSION_HASH_SIZE       512
517 static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
518
519 static inline int
520 hash_sessionid(struct nfs4_sessionid *sessionid)
521 {
522         struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
523
524         return sid->sequence % SESSION_HASH_SIZE;
525 }
526
527 static inline void
528 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
529 {
530         u32 *ptr = (u32 *)(&sessionid->data[0]);
531         dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
532 }
533
534 static void
535 gen_sessionid(struct nfsd4_session *ses)
536 {
537         struct nfs4_client *clp = ses->se_client;
538         struct nfsd4_sessionid *sid;
539
540         sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
541         sid->clientid = clp->cl_clientid;
542         sid->sequence = current_sessionid++;
543         sid->reserved = 0;
544 }
545
546 /*
547  * The protocol defines ca_maxresponssize_cached to include the size of
548  * the rpc header, but all we need to cache is the data starting after
549  * the end of the initial SEQUENCE operation--the rest we regenerate
550  * each time.  Therefore we can advertise a ca_maxresponssize_cached
551  * value that is the number of bytes in our cache plus a few additional
552  * bytes.  In order to stay on the safe side, and not promise more than
553  * we can cache, those additional bytes must be the minimum possible: 24
554  * bytes of rpc header (xid through accept state, with AUTH_NULL
555  * verifier), 12 for the compound header (with zero-length tag), and 44
556  * for the SEQUENCE op response:
557  */
558 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
559
560 static void
561 free_session_slots(struct nfsd4_session *ses)
562 {
563         int i;
564
565         for (i = 0; i < ses->se_fchannel.maxreqs; i++)
566                 kfree(ses->se_slots[i]);
567 }
568
569 /*
570  * We don't actually need to cache the rpc and session headers, so we
571  * can allocate a little less for each slot:
572  */
573 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
574 {
575         return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
576 }
577
578 static int nfsd4_sanitize_slot_size(u32 size)
579 {
580         size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
581         size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
582
583         return size;
584 }
585
586 /*
587  * XXX: If we run out of reserved DRC memory we could (up to a point)
588  * re-negotiate active sessions and reduce their slot usage to make
589  * rooom for new connections. For now we just fail the create session.
590  */
591 static int nfsd4_get_drc_mem(int slotsize, u32 num)
592 {
593         int avail;
594
595         num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
596
597         spin_lock(&nfsd_drc_lock);
598         avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
599                         nfsd_drc_max_mem - nfsd_drc_mem_used);
600         num = min_t(int, num, avail / slotsize);
601         nfsd_drc_mem_used += num * slotsize;
602         spin_unlock(&nfsd_drc_lock);
603
604         return num;
605 }
606
607 static void nfsd4_put_drc_mem(int slotsize, int num)
608 {
609         spin_lock(&nfsd_drc_lock);
610         nfsd_drc_mem_used -= slotsize * num;
611         spin_unlock(&nfsd_drc_lock);
612 }
613
614 static struct nfsd4_session *alloc_session(int slotsize, int numslots)
615 {
616         struct nfsd4_session *new;
617         int mem, i;
618
619         BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
620                         + sizeof(struct nfsd4_session) > PAGE_SIZE);
621         mem = numslots * sizeof(struct nfsd4_slot *);
622
623         new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
624         if (!new)
625                 return NULL;
626         /* allocate each struct nfsd4_slot and data cache in one piece */
627         for (i = 0; i < numslots; i++) {
628                 mem = sizeof(struct nfsd4_slot) + slotsize;
629                 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
630                 if (!new->se_slots[i])
631                         goto out_free;
632         }
633         return new;
634 out_free:
635         while (i--)
636                 kfree(new->se_slots[i]);
637         kfree(new);
638         return NULL;
639 }
640
641 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
642 {
643         u32 maxrpc = nfsd_serv->sv_max_mesg;
644
645         new->maxreqs = numslots;
646         new->maxresp_cached = min_t(u32, req->maxresp_cached,
647                                         slotsize + NFSD_MIN_HDR_SEQ_SZ);
648         new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
649         new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
650         new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
651 }
652
653 static void free_conn(struct nfsd4_conn *c)
654 {
655         svc_xprt_put(c->cn_xprt);
656         kfree(c);
657 }
658
659 static void nfsd4_conn_lost(struct svc_xpt_user *u)
660 {
661         struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
662         struct nfs4_client *clp = c->cn_session->se_client;
663
664         spin_lock(&clp->cl_lock);
665         if (!list_empty(&c->cn_persession)) {
666                 list_del(&c->cn_persession);
667                 free_conn(c);
668         }
669         spin_unlock(&clp->cl_lock);
670         nfsd4_probe_callback(clp);
671 }
672
673 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
674 {
675         struct nfsd4_conn *conn;
676
677         conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
678         if (!conn)
679                 return NULL;
680         svc_xprt_get(rqstp->rq_xprt);
681         conn->cn_xprt = rqstp->rq_xprt;
682         conn->cn_flags = flags;
683         INIT_LIST_HEAD(&conn->cn_xpt_user.list);
684         return conn;
685 }
686
687 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
688 {
689         conn->cn_session = ses;
690         list_add(&conn->cn_persession, &ses->se_conns);
691 }
692
693 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
694 {
695         struct nfs4_client *clp = ses->se_client;
696
697         spin_lock(&clp->cl_lock);
698         __nfsd4_hash_conn(conn, ses);
699         spin_unlock(&clp->cl_lock);
700 }
701
702 static int nfsd4_register_conn(struct nfsd4_conn *conn)
703 {
704         conn->cn_xpt_user.callback = nfsd4_conn_lost;
705         return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
706 }
707
708 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
709 {
710         struct nfsd4_conn *conn;
711         int ret;
712
713         conn = alloc_conn(rqstp, dir);
714         if (!conn)
715                 return nfserr_jukebox;
716         nfsd4_hash_conn(conn, ses);
717         ret = nfsd4_register_conn(conn);
718         if (ret)
719                 /* oops; xprt is already down: */
720                 nfsd4_conn_lost(&conn->cn_xpt_user);
721         return nfs_ok;
722 }
723
724 static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
725 {
726         u32 dir = NFS4_CDFC4_FORE;
727
728         if (ses->se_flags & SESSION4_BACK_CHAN)
729                 dir |= NFS4_CDFC4_BACK;
730
731         return nfsd4_new_conn(rqstp, ses, dir);
732 }
733
734 /* must be called under client_lock */
735 static void nfsd4_del_conns(struct nfsd4_session *s)
736 {
737         struct nfs4_client *clp = s->se_client;
738         struct nfsd4_conn *c;
739
740         spin_lock(&clp->cl_lock);
741         while (!list_empty(&s->se_conns)) {
742                 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
743                 list_del_init(&c->cn_persession);
744                 spin_unlock(&clp->cl_lock);
745
746                 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
747                 free_conn(c);
748
749                 spin_lock(&clp->cl_lock);
750         }
751         spin_unlock(&clp->cl_lock);
752 }
753
754 void free_session(struct kref *kref)
755 {
756         struct nfsd4_session *ses;
757         int mem;
758
759         ses = container_of(kref, struct nfsd4_session, se_ref);
760         nfsd4_del_conns(ses);
761         spin_lock(&nfsd_drc_lock);
762         mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
763         nfsd_drc_mem_used -= mem;
764         spin_unlock(&nfsd_drc_lock);
765         free_session_slots(ses);
766         kfree(ses);
767 }
768
769 static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
770 {
771         struct nfsd4_session *new;
772         struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
773         int numslots, slotsize;
774         int status;
775         int idx;
776
777         /*
778          * Note decreasing slot size below client's request may
779          * make it difficult for client to function correctly, whereas
780          * decreasing the number of slots will (just?) affect
781          * performance.  When short on memory we therefore prefer to
782          * decrease number of slots instead of their size.
783          */
784         slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
785         numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
786         if (numslots < 1)
787                 return NULL;
788
789         new = alloc_session(slotsize, numslots);
790         if (!new) {
791                 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
792                 return NULL;
793         }
794         init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
795
796         new->se_client = clp;
797         gen_sessionid(new);
798
799         INIT_LIST_HEAD(&new->se_conns);
800
801         new->se_cb_seq_nr = 1;
802         new->se_flags = cses->flags;
803         new->se_cb_prog = cses->callback_prog;
804         kref_init(&new->se_ref);
805         idx = hash_sessionid(&new->se_sessionid);
806         spin_lock(&client_lock);
807         list_add(&new->se_hash, &sessionid_hashtbl[idx]);
808         spin_lock(&clp->cl_lock);
809         list_add(&new->se_perclnt, &clp->cl_sessions);
810         spin_unlock(&clp->cl_lock);
811         spin_unlock(&client_lock);
812
813         status = nfsd4_new_conn_from_crses(rqstp, new);
814         /* whoops: benny points out, status is ignored! (err, or bogus) */
815         if (status) {
816                 free_session(&new->se_ref);
817                 return NULL;
818         }
819         if (cses->flags & SESSION4_BACK_CHAN) {
820                 struct sockaddr *sa = svc_addr(rqstp);
821                 /*
822                  * This is a little silly; with sessions there's no real
823                  * use for the callback address.  Use the peer address
824                  * as a reasonable default for now, but consider fixing
825                  * the rpc client not to require an address in the
826                  * future:
827                  */
828                 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
829                 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
830         }
831         nfsd4_probe_callback(clp);
832         return new;
833 }
834
835 /* caller must hold client_lock */
836 static struct nfsd4_session *
837 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
838 {
839         struct nfsd4_session *elem;
840         int idx;
841
842         dump_sessionid(__func__, sessionid);
843         idx = hash_sessionid(sessionid);
844         /* Search in the appropriate list */
845         list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
846                 if (!memcmp(elem->se_sessionid.data, sessionid->data,
847                             NFS4_MAX_SESSIONID_LEN)) {
848                         return elem;
849                 }
850         }
851
852         dprintk("%s: session not found\n", __func__);
853         return NULL;
854 }
855
856 /* caller must hold client_lock */
857 static void
858 unhash_session(struct nfsd4_session *ses)
859 {
860         list_del(&ses->se_hash);
861         spin_lock(&ses->se_client->cl_lock);
862         list_del(&ses->se_perclnt);
863         spin_unlock(&ses->se_client->cl_lock);
864 }
865
866 /* must be called under the client_lock */
867 static inline void
868 renew_client_locked(struct nfs4_client *clp)
869 {
870         if (is_client_expired(clp)) {
871                 dprintk("%s: client (clientid %08x/%08x) already expired\n",
872                         __func__,
873                         clp->cl_clientid.cl_boot,
874                         clp->cl_clientid.cl_id);
875                 return;
876         }
877
878         /*
879         * Move client to the end to the LRU list.
880         */
881         dprintk("renewing client (clientid %08x/%08x)\n", 
882                         clp->cl_clientid.cl_boot, 
883                         clp->cl_clientid.cl_id);
884         list_move_tail(&clp->cl_lru, &client_lru);
885         clp->cl_time = get_seconds();
886 }
887
888 static inline void
889 renew_client(struct nfs4_client *clp)
890 {
891         spin_lock(&client_lock);
892         renew_client_locked(clp);
893         spin_unlock(&client_lock);
894 }
895
896 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
897 static int
898 STALE_CLIENTID(clientid_t *clid)
899 {
900         if (clid->cl_boot == boot_time)
901                 return 0;
902         dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
903                 clid->cl_boot, clid->cl_id, boot_time);
904         return 1;
905 }
906
907 /* 
908  * XXX Should we use a slab cache ?
909  * This type of memory management is somewhat inefficient, but we use it
910  * anyway since SETCLIENTID is not a common operation.
911  */
912 static struct nfs4_client *alloc_client(struct xdr_netobj name)
913 {
914         struct nfs4_client *clp;
915
916         clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
917         if (clp == NULL)
918                 return NULL;
919         clp->cl_name.data = kmalloc(name.len, GFP_KERNEL);
920         if (clp->cl_name.data == NULL) {
921                 kfree(clp);
922                 return NULL;
923         }
924         memcpy(clp->cl_name.data, name.data, name.len);
925         clp->cl_name.len = name.len;
926         return clp;
927 }
928
929 static inline void
930 free_client(struct nfs4_client *clp)
931 {
932         while (!list_empty(&clp->cl_sessions)) {
933                 struct nfsd4_session *ses;
934                 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
935                                 se_perclnt);
936                 list_del(&ses->se_perclnt);
937                 nfsd4_put_session(ses);
938         }
939         if (clp->cl_cred.cr_group_info)
940                 put_group_info(clp->cl_cred.cr_group_info);
941         kfree(clp->cl_principal);
942         kfree(clp->cl_name.data);
943         kfree(clp);
944 }
945
946 void
947 release_session_client(struct nfsd4_session *session)
948 {
949         struct nfs4_client *clp = session->se_client;
950
951         if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
952                 return;
953         if (is_client_expired(clp)) {
954                 free_client(clp);
955                 session->se_client = NULL;
956         } else
957                 renew_client_locked(clp);
958         spin_unlock(&client_lock);
959 }
960
961 /* must be called under the client_lock */
962 static inline void
963 unhash_client_locked(struct nfs4_client *clp)
964 {
965         struct nfsd4_session *ses;
966
967         mark_client_expired(clp);
968         list_del(&clp->cl_lru);
969         spin_lock(&clp->cl_lock);
970         list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
971                 list_del_init(&ses->se_hash);
972         spin_unlock(&clp->cl_lock);
973 }
974
975 static void
976 expire_client(struct nfs4_client *clp)
977 {
978         struct nfs4_openowner *oo;
979         struct nfs4_delegation *dp;
980         struct list_head reaplist;
981
982         INIT_LIST_HEAD(&reaplist);
983         spin_lock(&recall_lock);
984         while (!list_empty(&clp->cl_delegations)) {
985                 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
986                 list_del_init(&dp->dl_perclnt);
987                 list_move(&dp->dl_recall_lru, &reaplist);
988         }
989         spin_unlock(&recall_lock);
990         while (!list_empty(&reaplist)) {
991                 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
992                 list_del_init(&dp->dl_recall_lru);
993                 unhash_delegation(dp);
994         }
995         while (!list_empty(&clp->cl_openowners)) {
996                 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
997                 release_openowner(oo);
998         }
999         nfsd4_shutdown_callback(clp);
1000         if (clp->cl_cb_conn.cb_xprt)
1001                 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1002         list_del(&clp->cl_idhash);
1003         list_del(&clp->cl_strhash);
1004         spin_lock(&client_lock);
1005         unhash_client_locked(clp);
1006         if (atomic_read(&clp->cl_refcount) == 0)
1007                 free_client(clp);
1008         spin_unlock(&client_lock);
1009 }
1010
1011 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1012 {
1013         memcpy(target->cl_verifier.data, source->data,
1014                         sizeof(target->cl_verifier.data));
1015 }
1016
1017 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1018 {
1019         target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
1020         target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
1021 }
1022
1023 static void copy_cred(struct svc_cred *target, struct svc_cred *source)
1024 {
1025         target->cr_uid = source->cr_uid;
1026         target->cr_gid = source->cr_gid;
1027         target->cr_group_info = source->cr_group_info;
1028         get_group_info(target->cr_group_info);
1029 }
1030
1031 static int same_name(const char *n1, const char *n2)
1032 {
1033         return 0 == memcmp(n1, n2, HEXDIR_LEN);
1034 }
1035
1036 static int
1037 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1038 {
1039         return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1040 }
1041
1042 static int
1043 same_clid(clientid_t *cl1, clientid_t *cl2)
1044 {
1045         return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1046 }
1047
1048 /* XXX what about NGROUP */
1049 static int
1050 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1051 {
1052         return cr1->cr_uid == cr2->cr_uid;
1053 }
1054
1055 static void gen_clid(struct nfs4_client *clp)
1056 {
1057         static u32 current_clientid = 1;
1058
1059         clp->cl_clientid.cl_boot = boot_time;
1060         clp->cl_clientid.cl_id = current_clientid++; 
1061 }
1062
1063 static void gen_confirm(struct nfs4_client *clp)
1064 {
1065         static u32 i;
1066         u32 *p;
1067
1068         p = (u32 *)clp->cl_confirm.data;
1069         *p++ = get_seconds();
1070         *p++ = i++;
1071 }
1072
1073 static int
1074 same_stateid(stateid_t *id_one, stateid_t *id_two)
1075 {
1076         if (id_one->si_stateownerid != id_two->si_stateownerid)
1077                 return 0;
1078         return id_one->si_fileid == id_two->si_fileid;
1079 }
1080
1081 static struct nfs4_stid *find_stateid(stateid_t *t)
1082 {
1083         struct nfs4_stid *s;
1084         unsigned int hashval;
1085
1086         hashval = stateid_hashval(t);
1087         list_for_each_entry(s, &stateid_hashtbl[hashval], sc_hash)
1088                 if (same_stateid(&s->sc_stateid, t))
1089                         return s;
1090         return NULL;
1091 }
1092
1093 static struct nfs4_ol_stateid *find_ol_stateid(stateid_t *t)
1094 {
1095         struct nfs4_stid *s;
1096
1097         s = find_stateid(t);
1098         if (!s)
1099                 return NULL;
1100         return openlockstateid(s);
1101 }
1102
1103 static struct nfs4_stid *find_stateid_by_type(stateid_t *t, char typemask)
1104 {
1105         struct nfs4_stid *s;
1106
1107         s = find_stateid(t);
1108         if (!s)
1109                 return NULL;
1110         if (typemask & s->sc_type)
1111                 return s;
1112         return NULL;
1113 }
1114
1115 static struct nfs4_ol_stateid *find_ol_stateid_by_type(stateid_t *t, char typemask)
1116 {
1117         struct nfs4_stid *s;
1118
1119         s = find_stateid_by_type(t, typemask);
1120         if (!s)
1121                 return NULL;
1122         return openlockstateid(s);
1123 }
1124
1125 static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1126                 struct svc_rqst *rqstp, nfs4_verifier *verf)
1127 {
1128         struct nfs4_client *clp;
1129         struct sockaddr *sa = svc_addr(rqstp);
1130         char *princ;
1131
1132         clp = alloc_client(name);
1133         if (clp == NULL)
1134                 return NULL;
1135
1136         INIT_LIST_HEAD(&clp->cl_sessions);
1137
1138         princ = svc_gss_principal(rqstp);
1139         if (princ) {
1140                 clp->cl_principal = kstrdup(princ, GFP_KERNEL);
1141                 if (clp->cl_principal == NULL) {
1142                         free_client(clp);
1143                         return NULL;
1144                 }
1145         }
1146
1147         memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1148         atomic_set(&clp->cl_refcount, 0);
1149         clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1150         INIT_LIST_HEAD(&clp->cl_idhash);
1151         INIT_LIST_HEAD(&clp->cl_strhash);
1152         INIT_LIST_HEAD(&clp->cl_openowners);
1153         INIT_LIST_HEAD(&clp->cl_delegations);
1154         INIT_LIST_HEAD(&clp->cl_lru);
1155         INIT_LIST_HEAD(&clp->cl_callbacks);
1156         spin_lock_init(&clp->cl_lock);
1157         INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
1158         clp->cl_time = get_seconds();
1159         clear_bit(0, &clp->cl_cb_slot_busy);
1160         rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1161         copy_verf(clp, verf);
1162         rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1163         clp->cl_flavor = rqstp->rq_flavor;
1164         copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1165         gen_confirm(clp);
1166         clp->cl_cb_session = NULL;
1167         return clp;
1168 }
1169
1170 static int check_name(struct xdr_netobj name)
1171 {
1172         if (name.len == 0) 
1173                 return 0;
1174         if (name.len > NFS4_OPAQUE_LIMIT) {
1175                 dprintk("NFSD: check_name: name too long(%d)!\n", name.len);
1176                 return 0;
1177         }
1178         return 1;
1179 }
1180
1181 static void
1182 add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
1183 {
1184         unsigned int idhashval;
1185
1186         list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
1187         idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1188         list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
1189         renew_client(clp);
1190 }
1191
1192 static void
1193 move_to_confirmed(struct nfs4_client *clp)
1194 {
1195         unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1196         unsigned int strhashval;
1197
1198         dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1199         list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
1200         strhashval = clientstr_hashval(clp->cl_recdir);
1201         list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
1202         renew_client(clp);
1203 }
1204
1205 static struct nfs4_client *
1206 find_confirmed_client(clientid_t *clid)
1207 {
1208         struct nfs4_client *clp;
1209         unsigned int idhashval = clientid_hashval(clid->cl_id);
1210
1211         list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
1212                 if (same_clid(&clp->cl_clientid, clid))
1213                         return clp;
1214         }
1215         return NULL;
1216 }
1217
1218 static struct nfs4_client *
1219 find_unconfirmed_client(clientid_t *clid)
1220 {
1221         struct nfs4_client *clp;
1222         unsigned int idhashval = clientid_hashval(clid->cl_id);
1223
1224         list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
1225                 if (same_clid(&clp->cl_clientid, clid))
1226                         return clp;
1227         }
1228         return NULL;
1229 }
1230
1231 static bool clp_used_exchangeid(struct nfs4_client *clp)
1232 {
1233         return clp->cl_exchange_flags != 0;
1234
1235
1236 static struct nfs4_client *
1237 find_confirmed_client_by_str(const char *dname, unsigned int hashval)
1238 {
1239         struct nfs4_client *clp;
1240
1241         list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
1242                 if (same_name(clp->cl_recdir, dname))
1243                         return clp;
1244         }
1245         return NULL;
1246 }
1247
1248 static struct nfs4_client *
1249 find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
1250 {
1251         struct nfs4_client *clp;
1252
1253         list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
1254                 if (same_name(clp->cl_recdir, dname))
1255                         return clp;
1256         }
1257         return NULL;
1258 }
1259
1260 static void
1261 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1262 {
1263         struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1264         struct sockaddr *sa = svc_addr(rqstp);
1265         u32 scopeid = rpc_get_scope_id(sa);
1266         unsigned short expected_family;
1267
1268         /* Currently, we only support tcp and tcp6 for the callback channel */
1269         if (se->se_callback_netid_len == 3 &&
1270             !memcmp(se->se_callback_netid_val, "tcp", 3))
1271                 expected_family = AF_INET;
1272         else if (se->se_callback_netid_len == 4 &&
1273                  !memcmp(se->se_callback_netid_val, "tcp6", 4))
1274                 expected_family = AF_INET6;
1275         else
1276                 goto out_err;
1277
1278         conn->cb_addrlen = rpc_uaddr2sockaddr(se->se_callback_addr_val,
1279                                             se->se_callback_addr_len,
1280                                             (struct sockaddr *)&conn->cb_addr,
1281                                             sizeof(conn->cb_addr));
1282
1283         if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1284                 goto out_err;
1285
1286         if (conn->cb_addr.ss_family == AF_INET6)
1287                 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1288
1289         conn->cb_prog = se->se_callback_prog;
1290         conn->cb_ident = se->se_callback_ident;
1291         memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1292         return;
1293 out_err:
1294         conn->cb_addr.ss_family = AF_UNSPEC;
1295         conn->cb_addrlen = 0;
1296         dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1297                 "will not receive delegations\n",
1298                 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1299
1300         return;
1301 }
1302
1303 /*
1304  * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1305  */
1306 void
1307 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1308 {
1309         struct nfsd4_slot *slot = resp->cstate.slot;
1310         unsigned int base;
1311
1312         dprintk("--> %s slot %p\n", __func__, slot);
1313
1314         slot->sl_opcnt = resp->opcnt;
1315         slot->sl_status = resp->cstate.status;
1316
1317         if (nfsd4_not_cached(resp)) {
1318                 slot->sl_datalen = 0;
1319                 return;
1320         }
1321         slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1322         base = (char *)resp->cstate.datap -
1323                                         (char *)resp->xbuf->head[0].iov_base;
1324         if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1325                                     slot->sl_datalen))
1326                 WARN("%s: sessions DRC could not cache compound\n", __func__);
1327         return;
1328 }
1329
1330 /*
1331  * Encode the replay sequence operation from the slot values.
1332  * If cachethis is FALSE encode the uncached rep error on the next
1333  * operation which sets resp->p and increments resp->opcnt for
1334  * nfs4svc_encode_compoundres.
1335  *
1336  */
1337 static __be32
1338 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1339                           struct nfsd4_compoundres *resp)
1340 {
1341         struct nfsd4_op *op;
1342         struct nfsd4_slot *slot = resp->cstate.slot;
1343
1344         dprintk("--> %s resp->opcnt %d cachethis %u \n", __func__,
1345                 resp->opcnt, resp->cstate.slot->sl_cachethis);
1346
1347         /* Encode the replayed sequence operation */
1348         op = &args->ops[resp->opcnt - 1];
1349         nfsd4_encode_operation(resp, op);
1350
1351         /* Return nfserr_retry_uncached_rep in next operation. */
1352         if (args->opcnt > 1 && slot->sl_cachethis == 0) {
1353                 op = &args->ops[resp->opcnt++];
1354                 op->status = nfserr_retry_uncached_rep;
1355                 nfsd4_encode_operation(resp, op);
1356         }
1357         return op->status;
1358 }
1359
1360 /*
1361  * The sequence operation is not cached because we can use the slot and
1362  * session values.
1363  */
1364 __be32
1365 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1366                          struct nfsd4_sequence *seq)
1367 {
1368         struct nfsd4_slot *slot = resp->cstate.slot;
1369         __be32 status;
1370
1371         dprintk("--> %s slot %p\n", __func__, slot);
1372
1373         /* Either returns 0 or nfserr_retry_uncached */
1374         status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1375         if (status == nfserr_retry_uncached_rep)
1376                 return status;
1377
1378         /* The sequence operation has been encoded, cstate->datap set. */
1379         memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1380
1381         resp->opcnt = slot->sl_opcnt;
1382         resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1383         status = slot->sl_status;
1384
1385         return status;
1386 }
1387
1388 /*
1389  * Set the exchange_id flags returned by the server.
1390  */
1391 static void
1392 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1393 {
1394         /* pNFS is not supported */
1395         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1396
1397         /* Referrals are supported, Migration is not. */
1398         new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1399
1400         /* set the wire flags to return to client. */
1401         clid->flags = new->cl_exchange_flags;
1402 }
1403
1404 __be32
1405 nfsd4_exchange_id(struct svc_rqst *rqstp,
1406                   struct nfsd4_compound_state *cstate,
1407                   struct nfsd4_exchange_id *exid)
1408 {
1409         struct nfs4_client *unconf, *conf, *new;
1410         int status;
1411         unsigned int            strhashval;
1412         char                    dname[HEXDIR_LEN];
1413         char                    addr_str[INET6_ADDRSTRLEN];
1414         nfs4_verifier           verf = exid->verifier;
1415         struct sockaddr         *sa = svc_addr(rqstp);
1416
1417         rpc_ntop(sa, addr_str, sizeof(addr_str));
1418         dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1419                 "ip_addr=%s flags %x, spa_how %d\n",
1420                 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1421                 addr_str, exid->flags, exid->spa_how);
1422
1423         if (!check_name(exid->clname) || (exid->flags & ~EXCHGID4_FLAG_MASK_A))
1424                 return nfserr_inval;
1425
1426         /* Currently only support SP4_NONE */
1427         switch (exid->spa_how) {
1428         case SP4_NONE:
1429                 break;
1430         case SP4_SSV:
1431                 return nfserr_serverfault;
1432         default:
1433                 BUG();                          /* checked by xdr code */
1434         case SP4_MACH_CRED:
1435                 return nfserr_serverfault;      /* no excuse :-/ */
1436         }
1437
1438         status = nfs4_make_rec_clidname(dname, &exid->clname);
1439
1440         if (status)
1441                 goto error;
1442
1443         strhashval = clientstr_hashval(dname);
1444
1445         nfs4_lock_state();
1446         status = nfs_ok;
1447
1448         conf = find_confirmed_client_by_str(dname, strhashval);
1449         if (conf) {
1450                 if (!clp_used_exchangeid(conf)) {
1451                         status = nfserr_clid_inuse; /* XXX: ? */
1452                         goto out;
1453                 }
1454                 if (!same_verf(&verf, &conf->cl_verifier)) {
1455                         /* 18.35.4 case 8 */
1456                         if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1457                                 status = nfserr_not_same;
1458                                 goto out;
1459                         }
1460                         /* Client reboot: destroy old state */
1461                         expire_client(conf);
1462                         goto out_new;
1463                 }
1464                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
1465                         /* 18.35.4 case 9 */
1466                         if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1467                                 status = nfserr_perm;
1468                                 goto out;
1469                         }
1470                         expire_client(conf);
1471                         goto out_new;
1472                 }
1473                 /*
1474                  * Set bit when the owner id and verifier map to an already
1475                  * confirmed client id (18.35.3).
1476                  */
1477                 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1478
1479                 /*
1480                  * Falling into 18.35.4 case 2, possible router replay.
1481                  * Leave confirmed record intact and return same result.
1482                  */
1483                 copy_verf(conf, &verf);
1484                 new = conf;
1485                 goto out_copy;
1486         }
1487
1488         /* 18.35.4 case 7 */
1489         if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1490                 status = nfserr_noent;
1491                 goto out;
1492         }
1493
1494         unconf  = find_unconfirmed_client_by_str(dname, strhashval);
1495         if (unconf) {
1496                 /*
1497                  * Possible retry or client restart.  Per 18.35.4 case 4,
1498                  * a new unconfirmed record should be generated regardless
1499                  * of whether any properties have changed.
1500                  */
1501                 expire_client(unconf);
1502         }
1503
1504 out_new:
1505         /* Normal case */
1506         new = create_client(exid->clname, dname, rqstp, &verf);
1507         if (new == NULL) {
1508                 status = nfserr_jukebox;
1509                 goto out;
1510         }
1511
1512         gen_clid(new);
1513         add_to_unconfirmed(new, strhashval);
1514 out_copy:
1515         exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1516         exid->clientid.cl_id = new->cl_clientid.cl_id;
1517
1518         exid->seqid = 1;
1519         nfsd4_set_ex_flags(new, exid);
1520
1521         dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1522                 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1523         status = nfs_ok;
1524
1525 out:
1526         nfs4_unlock_state();
1527 error:
1528         dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
1529         return status;
1530 }
1531
1532 static int
1533 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1534 {
1535         dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1536                 slot_seqid);
1537
1538         /* The slot is in use, and no response has been sent. */
1539         if (slot_inuse) {
1540                 if (seqid == slot_seqid)
1541                         return nfserr_jukebox;
1542                 else
1543                         return nfserr_seq_misordered;
1544         }
1545         /* Normal */
1546         if (likely(seqid == slot_seqid + 1))
1547                 return nfs_ok;
1548         /* Replay */
1549         if (seqid == slot_seqid)
1550                 return nfserr_replay_cache;
1551         /* Wraparound */
1552         if (seqid == 1 && (slot_seqid + 1) == 0)
1553                 return nfs_ok;
1554         /* Misordered replay or misordered new request */
1555         return nfserr_seq_misordered;
1556 }
1557
1558 /*
1559  * Cache the create session result into the create session single DRC
1560  * slot cache by saving the xdr structure. sl_seqid has been set.
1561  * Do this for solo or embedded create session operations.
1562  */
1563 static void
1564 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1565                            struct nfsd4_clid_slot *slot, int nfserr)
1566 {
1567         slot->sl_status = nfserr;
1568         memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1569 }
1570
1571 static __be32
1572 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1573                             struct nfsd4_clid_slot *slot)
1574 {
1575         memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1576         return slot->sl_status;
1577 }
1578
1579 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1580                         2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1581                         1 +     /* MIN tag is length with zero, only length */ \
1582                         3 +     /* version, opcount, opcode */ \
1583                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1584                                 /* seqid, slotID, slotID, cache */ \
1585                         4 ) * sizeof(__be32))
1586
1587 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1588                         2 +     /* verifier: AUTH_NULL, length 0 */\
1589                         1 +     /* status */ \
1590                         1 +     /* MIN tag is length with zero, only length */ \
1591                         3 +     /* opcount, opcode, opstatus*/ \
1592                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1593                                 /* seqid, slotID, slotID, slotID, status */ \
1594                         5 ) * sizeof(__be32))
1595
1596 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1597 {
1598         return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1599                 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1600 }
1601
1602 __be32
1603 nfsd4_create_session(struct svc_rqst *rqstp,
1604                      struct nfsd4_compound_state *cstate,
1605                      struct nfsd4_create_session *cr_ses)
1606 {
1607         struct sockaddr *sa = svc_addr(rqstp);
1608         struct nfs4_client *conf, *unconf;
1609         struct nfsd4_session *new;
1610         struct nfsd4_clid_slot *cs_slot = NULL;
1611         bool confirm_me = false;
1612         int status = 0;
1613
1614         if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1615                 return nfserr_inval;
1616
1617         nfs4_lock_state();
1618         unconf = find_unconfirmed_client(&cr_ses->clientid);
1619         conf = find_confirmed_client(&cr_ses->clientid);
1620
1621         if (conf) {
1622                 cs_slot = &conf->cl_cs_slot;
1623                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1624                 if (status == nfserr_replay_cache) {
1625                         dprintk("Got a create_session replay! seqid= %d\n",
1626                                 cs_slot->sl_seqid);
1627                         /* Return the cached reply status */
1628                         status = nfsd4_replay_create_session(cr_ses, cs_slot);
1629                         goto out;
1630                 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1631                         status = nfserr_seq_misordered;
1632                         dprintk("Sequence misordered!\n");
1633                         dprintk("Expected seqid= %d but got seqid= %d\n",
1634                                 cs_slot->sl_seqid, cr_ses->seqid);
1635                         goto out;
1636                 }
1637         } else if (unconf) {
1638                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1639                     !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1640                         status = nfserr_clid_inuse;
1641                         goto out;
1642                 }
1643
1644                 cs_slot = &unconf->cl_cs_slot;
1645                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1646                 if (status) {
1647                         /* an unconfirmed replay returns misordered */
1648                         status = nfserr_seq_misordered;
1649                         goto out;
1650                 }
1651
1652                 confirm_me = true;
1653                 conf = unconf;
1654         } else {
1655                 status = nfserr_stale_clientid;
1656                 goto out;
1657         }
1658
1659         /*
1660          * XXX: we should probably set this at creation time, and check
1661          * for consistent minorversion use throughout:
1662          */
1663         conf->cl_minorversion = 1;
1664         /*
1665          * We do not support RDMA or persistent sessions
1666          */
1667         cr_ses->flags &= ~SESSION4_PERSIST;
1668         cr_ses->flags &= ~SESSION4_RDMA;
1669
1670         status = nfserr_toosmall;
1671         if (check_forechannel_attrs(cr_ses->fore_channel))
1672                 goto out;
1673
1674         status = nfserr_jukebox;
1675         new = alloc_init_session(rqstp, conf, cr_ses);
1676         if (!new)
1677                 goto out;
1678         status = nfs_ok;
1679         memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1680                NFS4_MAX_SESSIONID_LEN);
1681         memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1682                 sizeof(struct nfsd4_channel_attrs));
1683         cs_slot->sl_seqid++;
1684         cr_ses->seqid = cs_slot->sl_seqid;
1685
1686         /* cache solo and embedded create sessions under the state lock */
1687         nfsd4_cache_create_session(cr_ses, cs_slot, status);
1688         if (confirm_me)
1689                 move_to_confirmed(conf);
1690 out:
1691         nfs4_unlock_state();
1692         dprintk("%s returns %d\n", __func__, ntohl(status));
1693         return status;
1694 }
1695
1696 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1697 {
1698         struct nfsd4_compoundres *resp = rqstp->rq_resp;
1699         struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1700
1701         return argp->opcnt == resp->opcnt;
1702 }
1703
1704 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1705 {
1706         switch (*dir) {
1707         case NFS4_CDFC4_FORE:
1708         case NFS4_CDFC4_BACK:
1709                 return nfs_ok;
1710         case NFS4_CDFC4_FORE_OR_BOTH:
1711         case NFS4_CDFC4_BACK_OR_BOTH:
1712                 *dir = NFS4_CDFC4_BOTH;
1713                 return nfs_ok;
1714         };
1715         return nfserr_inval;
1716 }
1717
1718 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1719                      struct nfsd4_compound_state *cstate,
1720                      struct nfsd4_bind_conn_to_session *bcts)
1721 {
1722         __be32 status;
1723
1724         if (!nfsd4_last_compound_op(rqstp))
1725                 return nfserr_not_only_op;
1726         spin_lock(&client_lock);
1727         cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1728         /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1729          * client_lock iself: */
1730         if (cstate->session) {
1731                 nfsd4_get_session(cstate->session);
1732                 atomic_inc(&cstate->session->se_client->cl_refcount);
1733         }
1734         spin_unlock(&client_lock);
1735         if (!cstate->session)
1736                 return nfserr_badsession;
1737
1738         status = nfsd4_map_bcts_dir(&bcts->dir);
1739         if (!status)
1740                 nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
1741         return status;
1742 }
1743
1744 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1745 {
1746         if (!session)
1747                 return 0;
1748         return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1749 }
1750
1751 __be32
1752 nfsd4_destroy_session(struct svc_rqst *r,
1753                       struct nfsd4_compound_state *cstate,
1754                       struct nfsd4_destroy_session *sessionid)
1755 {
1756         struct nfsd4_session *ses;
1757         u32 status = nfserr_badsession;
1758
1759         /* Notes:
1760          * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1761          * - Should we return nfserr_back_chan_busy if waiting for
1762          *   callbacks on to-be-destroyed session?
1763          * - Do we need to clear any callback info from previous session?
1764          */
1765
1766         if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1767                 if (!nfsd4_last_compound_op(r))
1768                         return nfserr_not_only_op;
1769         }
1770         dump_sessionid(__func__, &sessionid->sessionid);
1771         spin_lock(&client_lock);
1772         ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1773         if (!ses) {
1774                 spin_unlock(&client_lock);
1775                 goto out;
1776         }
1777
1778         unhash_session(ses);
1779         spin_unlock(&client_lock);
1780
1781         nfs4_lock_state();
1782         nfsd4_probe_callback_sync(ses->se_client);
1783         nfs4_unlock_state();
1784
1785         nfsd4_del_conns(ses);
1786
1787         nfsd4_put_session(ses);
1788         status = nfs_ok;
1789 out:
1790         dprintk("%s returns %d\n", __func__, ntohl(status));
1791         return status;
1792 }
1793
1794 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1795 {
1796         struct nfsd4_conn *c;
1797
1798         list_for_each_entry(c, &s->se_conns, cn_persession) {
1799                 if (c->cn_xprt == xpt) {
1800                         return c;
1801                 }
1802         }
1803         return NULL;
1804 }
1805
1806 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1807 {
1808         struct nfs4_client *clp = ses->se_client;
1809         struct nfsd4_conn *c;
1810         int ret;
1811
1812         spin_lock(&clp->cl_lock);
1813         c = __nfsd4_find_conn(new->cn_xprt, ses);
1814         if (c) {
1815                 spin_unlock(&clp->cl_lock);
1816                 free_conn(new);
1817                 return;
1818         }
1819         __nfsd4_hash_conn(new, ses);
1820         spin_unlock(&clp->cl_lock);
1821         ret = nfsd4_register_conn(new);
1822         if (ret)
1823                 /* oops; xprt is already down: */
1824                 nfsd4_conn_lost(&new->cn_xpt_user);
1825         return;
1826 }
1827
1828 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
1829 {
1830         struct nfsd4_compoundargs *args = rqstp->rq_argp;
1831
1832         return args->opcnt > session->se_fchannel.maxops;
1833 }
1834
1835 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
1836                                   struct nfsd4_session *session)
1837 {
1838         struct xdr_buf *xb = &rqstp->rq_arg;
1839
1840         return xb->len > session->se_fchannel.maxreq_sz;
1841 }
1842
1843 __be32
1844 nfsd4_sequence(struct svc_rqst *rqstp,
1845                struct nfsd4_compound_state *cstate,
1846                struct nfsd4_sequence *seq)
1847 {
1848         struct nfsd4_compoundres *resp = rqstp->rq_resp;
1849         struct nfsd4_session *session;
1850         struct nfsd4_slot *slot;
1851         struct nfsd4_conn *conn;
1852         int status;
1853
1854         if (resp->opcnt != 1)
1855                 return nfserr_sequence_pos;
1856
1857         /*
1858          * Will be either used or freed by nfsd4_sequence_check_conn
1859          * below.
1860          */
1861         conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
1862         if (!conn)
1863                 return nfserr_jukebox;
1864
1865         spin_lock(&client_lock);
1866         status = nfserr_badsession;
1867         session = find_in_sessionid_hashtbl(&seq->sessionid);
1868         if (!session)
1869                 goto out;
1870
1871         status = nfserr_too_many_ops;
1872         if (nfsd4_session_too_many_ops(rqstp, session))
1873                 goto out;
1874
1875         status = nfserr_req_too_big;
1876         if (nfsd4_request_too_big(rqstp, session))
1877                 goto out;
1878
1879         status = nfserr_badslot;
1880         if (seq->slotid >= session->se_fchannel.maxreqs)
1881                 goto out;
1882
1883         slot = session->se_slots[seq->slotid];
1884         dprintk("%s: slotid %d\n", __func__, seq->slotid);
1885
1886         /* We do not negotiate the number of slots yet, so set the
1887          * maxslots to the session maxreqs which is used to encode
1888          * sr_highest_slotid and the sr_target_slot id to maxslots */
1889         seq->maxslots = session->se_fchannel.maxreqs;
1890
1891         status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_inuse);
1892         if (status == nfserr_replay_cache) {
1893                 cstate->slot = slot;
1894                 cstate->session = session;
1895                 /* Return the cached reply status and set cstate->status
1896                  * for nfsd4_proc_compound processing */
1897                 status = nfsd4_replay_cache_entry(resp, seq);
1898                 cstate->status = nfserr_replay_cache;
1899                 goto out;
1900         }
1901         if (status)
1902                 goto out;
1903
1904         nfsd4_sequence_check_conn(conn, session);
1905         conn = NULL;
1906
1907         /* Success! bump slot seqid */
1908         slot->sl_inuse = true;
1909         slot->sl_seqid = seq->seqid;
1910         slot->sl_cachethis = seq->cachethis;
1911
1912         cstate->slot = slot;
1913         cstate->session = session;
1914
1915 out:
1916         /* Hold a session reference until done processing the compound. */
1917         if (cstate->session) {
1918                 struct nfs4_client *clp = session->se_client;
1919
1920                 nfsd4_get_session(cstate->session);
1921                 atomic_inc(&clp->cl_refcount);
1922                 if (clp->cl_cb_state == NFSD4_CB_DOWN)
1923                         seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN;
1924         }
1925         kfree(conn);
1926         spin_unlock(&client_lock);
1927         dprintk("%s: return %d\n", __func__, ntohl(status));
1928         return status;
1929 }
1930
1931 __be32
1932 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
1933 {
1934         int status = 0;
1935
1936         if (rc->rca_one_fs) {
1937                 if (!cstate->current_fh.fh_dentry)
1938                         return nfserr_nofilehandle;
1939                 /*
1940                  * We don't take advantage of the rca_one_fs case.
1941                  * That's OK, it's optional, we can safely ignore it.
1942                  */
1943                  return nfs_ok;
1944         }
1945
1946         nfs4_lock_state();
1947         status = nfserr_complete_already;
1948         if (cstate->session->se_client->cl_firststate)
1949                 goto out;
1950
1951         status = nfserr_stale_clientid;
1952         if (is_client_expired(cstate->session->se_client))
1953                 /*
1954                  * The following error isn't really legal.
1955                  * But we only get here if the client just explicitly
1956                  * destroyed the client.  Surely it no longer cares what
1957                  * error it gets back on an operation for the dead
1958                  * client.
1959                  */
1960                 goto out;
1961
1962         status = nfs_ok;
1963         nfsd4_create_clid_dir(cstate->session->se_client);
1964 out:
1965         nfs4_unlock_state();
1966         return status;
1967 }
1968
1969 __be32
1970 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1971                   struct nfsd4_setclientid *setclid)
1972 {
1973         struct xdr_netobj       clname = { 
1974                 .len = setclid->se_namelen,
1975                 .data = setclid->se_name,
1976         };
1977         nfs4_verifier           clverifier = setclid->se_verf;
1978         unsigned int            strhashval;
1979         struct nfs4_client      *conf, *unconf, *new;
1980         __be32                  status;
1981         char                    dname[HEXDIR_LEN];
1982         
1983         if (!check_name(clname))
1984                 return nfserr_inval;
1985
1986         status = nfs4_make_rec_clidname(dname, &clname);
1987         if (status)
1988                 return status;
1989
1990         /* 
1991          * XXX The Duplicate Request Cache (DRC) has been checked (??)
1992          * We get here on a DRC miss.
1993          */
1994
1995         strhashval = clientstr_hashval(dname);
1996
1997         nfs4_lock_state();
1998         conf = find_confirmed_client_by_str(dname, strhashval);
1999         if (conf) {
2000                 /* RFC 3530 14.2.33 CASE 0: */
2001                 status = nfserr_clid_inuse;
2002                 if (clp_used_exchangeid(conf))
2003                         goto out;
2004                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2005                         char addr_str[INET6_ADDRSTRLEN];
2006                         rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2007                                  sizeof(addr_str));
2008                         dprintk("NFSD: setclientid: string in use by client "
2009                                 "at %s\n", addr_str);
2010                         goto out;
2011                 }
2012         }
2013         /*
2014          * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
2015          * has a description of SETCLIENTID request processing consisting
2016          * of 5 bullet points, labeled as CASE0 - CASE4 below.
2017          */
2018         unconf = find_unconfirmed_client_by_str(dname, strhashval);
2019         status = nfserr_jukebox;
2020         if (!conf) {
2021                 /*
2022                  * RFC 3530 14.2.33 CASE 4:
2023                  * placed first, because it is the normal case
2024                  */
2025                 if (unconf)
2026                         expire_client(unconf);
2027                 new = create_client(clname, dname, rqstp, &clverifier);
2028                 if (new == NULL)
2029                         goto out;
2030                 gen_clid(new);
2031         } else if (same_verf(&conf->cl_verifier, &clverifier)) {
2032                 /*
2033                  * RFC 3530 14.2.33 CASE 1:
2034                  * probable callback update
2035                  */
2036                 if (unconf) {
2037                         /* Note this is removing unconfirmed {*x***},
2038                          * which is stronger than RFC recommended {vxc**}.
2039                          * This has the advantage that there is at most
2040                          * one {*x***} in either list at any time.
2041                          */
2042                         expire_client(unconf);
2043                 }
2044                 new = create_client(clname, dname, rqstp, &clverifier);
2045                 if (new == NULL)
2046                         goto out;
2047                 copy_clid(new, conf);
2048         } else if (!unconf) {
2049                 /*
2050                  * RFC 3530 14.2.33 CASE 2:
2051                  * probable client reboot; state will be removed if
2052                  * confirmed.
2053                  */
2054                 new = create_client(clname, dname, rqstp, &clverifier);
2055                 if (new == NULL)
2056                         goto out;
2057                 gen_clid(new);
2058         } else {
2059                 /*
2060                  * RFC 3530 14.2.33 CASE 3:
2061                  * probable client reboot; state will be removed if
2062                  * confirmed.
2063                  */
2064                 expire_client(unconf);
2065                 new = create_client(clname, dname, rqstp, &clverifier);
2066                 if (new == NULL)
2067                         goto out;
2068                 gen_clid(new);
2069         }
2070         /*
2071          * XXX: we should probably set this at creation time, and check
2072          * for consistent minorversion use throughout:
2073          */
2074         new->cl_minorversion = 0;
2075         gen_callback(new, setclid, rqstp);
2076         add_to_unconfirmed(new, strhashval);
2077         setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2078         setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2079         memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2080         status = nfs_ok;
2081 out:
2082         nfs4_unlock_state();
2083         return status;
2084 }
2085
2086
2087 /*
2088  * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
2089  * a description of SETCLIENTID_CONFIRM request processing consisting of 4
2090  * bullets, labeled as CASE1 - CASE4 below.
2091  */
2092 __be32
2093 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2094                          struct nfsd4_compound_state *cstate,
2095                          struct nfsd4_setclientid_confirm *setclientid_confirm)
2096 {
2097         struct sockaddr *sa = svc_addr(rqstp);
2098         struct nfs4_client *conf, *unconf;
2099         nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
2100         clientid_t * clid = &setclientid_confirm->sc_clientid;
2101         __be32 status;
2102
2103         if (STALE_CLIENTID(clid))
2104                 return nfserr_stale_clientid;
2105         /* 
2106          * XXX The Duplicate Request Cache (DRC) has been checked (??)
2107          * We get here on a DRC miss.
2108          */
2109
2110         nfs4_lock_state();
2111
2112         conf = find_confirmed_client(clid);
2113         unconf = find_unconfirmed_client(clid);
2114
2115         status = nfserr_clid_inuse;
2116         if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
2117                 goto out;
2118         if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
2119                 goto out;
2120
2121         /*
2122          * section 14.2.34 of RFC 3530 has a description of
2123          * SETCLIENTID_CONFIRM request processing consisting
2124          * of 4 bullet points, labeled as CASE1 - CASE4 below.
2125          */
2126         if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) {
2127                 /*
2128                  * RFC 3530 14.2.34 CASE 1:
2129                  * callback update
2130                  */
2131                 if (!same_creds(&conf->cl_cred, &unconf->cl_cred))
2132                         status = nfserr_clid_inuse;
2133                 else {
2134                         nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2135                         nfsd4_probe_callback(conf);
2136                         expire_client(unconf);
2137                         status = nfs_ok;
2138
2139                 }
2140         } else if (conf && !unconf) {
2141                 /*
2142                  * RFC 3530 14.2.34 CASE 2:
2143                  * probable retransmitted request; play it safe and
2144                  * do nothing.
2145                  */
2146                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
2147                         status = nfserr_clid_inuse;
2148                 else
2149                         status = nfs_ok;
2150         } else if (!conf && unconf
2151                         && same_verf(&unconf->cl_confirm, &confirm)) {
2152                 /*
2153                  * RFC 3530 14.2.34 CASE 3:
2154                  * Normal case; new or rebooted client:
2155                  */
2156                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
2157                         status = nfserr_clid_inuse;
2158                 } else {
2159                         unsigned int hash =
2160                                 clientstr_hashval(unconf->cl_recdir);
2161                         conf = find_confirmed_client_by_str(unconf->cl_recdir,
2162                                                             hash);
2163                         if (conf) {
2164                                 nfsd4_remove_clid_dir(conf);
2165                                 expire_client(conf);
2166                         }
2167                         move_to_confirmed(unconf);
2168                         conf = unconf;
2169                         nfsd4_probe_callback(conf);
2170                         status = nfs_ok;
2171                 }
2172         } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
2173             && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
2174                                                                 &confirm)))) {
2175                 /*
2176                  * RFC 3530 14.2.34 CASE 4:
2177                  * Client probably hasn't noticed that we rebooted yet.
2178                  */
2179                 status = nfserr_stale_clientid;
2180         } else {
2181                 /* check that we have hit one of the cases...*/
2182                 status = nfserr_clid_inuse;
2183         }
2184 out:
2185         nfs4_unlock_state();
2186         return status;
2187 }
2188
2189 /* OPEN Share state helper functions */
2190 static inline struct nfs4_file *
2191 alloc_init_file(struct inode *ino)
2192 {
2193         struct nfs4_file *fp;
2194         unsigned int hashval = file_hashval(ino);
2195
2196         fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
2197         if (fp) {
2198                 atomic_set(&fp->fi_ref, 1);
2199                 INIT_LIST_HEAD(&fp->fi_hash);
2200                 INIT_LIST_HEAD(&fp->fi_stateids);
2201                 INIT_LIST_HEAD(&fp->fi_delegations);
2202                 fp->fi_inode = igrab(ino);
2203                 fp->fi_id = current_fileid++;
2204                 fp->fi_had_conflict = false;
2205                 fp->fi_lease = NULL;
2206                 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2207                 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2208                 spin_lock(&recall_lock);
2209                 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2210                 spin_unlock(&recall_lock);
2211                 return fp;
2212         }
2213         return NULL;
2214 }
2215
2216 static void
2217 nfsd4_free_slab(struct kmem_cache **slab)
2218 {
2219         if (*slab == NULL)
2220                 return;
2221         kmem_cache_destroy(*slab);
2222         *slab = NULL;
2223 }
2224
2225 void
2226 nfsd4_free_slabs(void)
2227 {
2228         nfsd4_free_slab(&openowner_slab);
2229         nfsd4_free_slab(&lockowner_slab);
2230         nfsd4_free_slab(&file_slab);
2231         nfsd4_free_slab(&stateid_slab);
2232         nfsd4_free_slab(&deleg_slab);
2233 }
2234
2235 static int
2236 nfsd4_init_slabs(void)
2237 {
2238         openowner_slab = kmem_cache_create("nfsd4_openowners",
2239                         sizeof(struct nfs4_openowner), 0, 0, NULL);
2240         if (openowner_slab == NULL)
2241                 goto out_nomem;
2242         lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2243                         sizeof(struct nfs4_openowner), 0, 0, NULL);
2244         if (lockowner_slab == NULL)
2245                 goto out_nomem;
2246         file_slab = kmem_cache_create("nfsd4_files",
2247                         sizeof(struct nfs4_file), 0, 0, NULL);
2248         if (file_slab == NULL)
2249                 goto out_nomem;
2250         stateid_slab = kmem_cache_create("nfsd4_stateids",
2251                         sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2252         if (stateid_slab == NULL)
2253                 goto out_nomem;
2254         deleg_slab = kmem_cache_create("nfsd4_delegations",
2255                         sizeof(struct nfs4_delegation), 0, 0, NULL);
2256         if (deleg_slab == NULL)
2257                 goto out_nomem;
2258         return 0;
2259 out_nomem:
2260         nfsd4_free_slabs();
2261         dprintk("nfsd4: out of memory while initializing nfsv4\n");
2262         return -ENOMEM;
2263 }
2264
2265 void nfs4_free_openowner(struct nfs4_openowner *oo)
2266 {
2267         kfree(oo->oo_owner.so_owner.data);
2268         kmem_cache_free(openowner_slab, oo);
2269 }
2270
2271 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2272 {
2273         kfree(lo->lo_owner.so_owner.data);
2274         kmem_cache_free(lockowner_slab, lo);
2275 }
2276
2277 static void init_nfs4_replay(struct nfs4_replay *rp)
2278 {
2279         rp->rp_status = nfserr_serverfault;
2280         rp->rp_buflen = 0;
2281         rp->rp_buf = rp->rp_ibuf;
2282 }
2283
2284 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2285 {
2286         struct nfs4_stateowner *sop;
2287
2288         sop = kmem_cache_alloc(slab, GFP_KERNEL);
2289         if (!sop)
2290                 return NULL;
2291
2292         sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2293         if (!sop->so_owner.data) {
2294                 kmem_cache_free(slab, sop);
2295                 return NULL;
2296         }
2297         sop->so_owner.len = owner->len;
2298
2299         INIT_LIST_HEAD(&sop->so_stateids);
2300         sop->so_id = current_ownerid++;
2301         sop->so_client = clp;
2302         init_nfs4_replay(&sop->so_replay);
2303         return sop;
2304 }
2305
2306 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2307 {
2308         unsigned int idhashval;
2309
2310         idhashval = open_ownerid_hashval(oo->oo_owner.so_id);
2311         list_add(&oo->oo_owner.so_idhash, &open_ownerid_hashtbl[idhashval]);
2312         list_add(&oo->oo_owner.so_strhash, &open_ownerstr_hashtbl[strhashval]);
2313         list_add(&oo->oo_perclient, &clp->cl_openowners);
2314 }
2315
2316 static struct nfs4_openowner *
2317 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2318         struct nfs4_openowner *oo;
2319
2320         oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2321         if (!oo)
2322                 return NULL;
2323         oo->oo_owner.so_is_open_owner = 1;
2324         oo->oo_owner.so_seqid = open->op_seqid;
2325         oo->oo_confirmed = 0;
2326         oo->oo_time = 0;
2327         INIT_LIST_HEAD(&oo->oo_close_lru);
2328         hash_openowner(oo, clp, strhashval);
2329         return oo;
2330 }
2331
2332 static inline void
2333 init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2334         struct nfs4_openowner *oo = open->op_openowner;
2335
2336         INIT_LIST_HEAD(&stp->st_lockowners);
2337         list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2338         list_add(&stp->st_perfile, &fp->fi_stateids);
2339         stp->st_stid.sc_type = NFS4_OPEN_STID;
2340         stp->st_stateowner = &oo->oo_owner;
2341         get_nfs4_file(fp);
2342         stp->st_file = fp;
2343         stp->st_stid.sc_stateid.si_boot = boot_time;
2344         stp->st_stid.sc_stateid.si_stateownerid = oo->oo_owner.so_id;
2345         stp->st_stid.sc_stateid.si_fileid = fp->fi_id;
2346         /* note will be incremented before first return to client: */
2347         stp->st_stid.sc_stateid.si_generation = 0;
2348         hash_stid(&stp->st_stid);
2349         stp->st_access_bmap = 0;
2350         stp->st_deny_bmap = 0;
2351         __set_bit(open->op_share_access & ~NFS4_SHARE_WANT_MASK,
2352                   &stp->st_access_bmap);
2353         __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2354         stp->st_openstp = NULL;
2355 }
2356
2357 static void
2358 move_to_close_lru(struct nfs4_openowner *oo)
2359 {
2360         dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2361
2362         list_move_tail(&oo->oo_close_lru, &close_lru);
2363         oo->oo_time = get_seconds();
2364 }
2365
2366 static int
2367 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2368                                                         clientid_t *clid)
2369 {
2370         return (sop->so_owner.len == owner->len) &&
2371                 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2372                 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2373 }
2374
2375 static struct nfs4_openowner *
2376 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
2377 {
2378         struct nfs4_stateowner *so = NULL;
2379
2380         list_for_each_entry(so, &open_ownerstr_hashtbl[hashval], so_strhash) {
2381                 if (same_owner_str(so, &open->op_owner, &open->op_clientid))
2382                         return container_of(so, struct nfs4_openowner, oo_owner);
2383         }
2384         return NULL;
2385 }
2386
2387 /* search file_hashtbl[] for file */
2388 static struct nfs4_file *
2389 find_file(struct inode *ino)
2390 {
2391         unsigned int hashval = file_hashval(ino);
2392         struct nfs4_file *fp;
2393
2394         spin_lock(&recall_lock);
2395         list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2396                 if (fp->fi_inode == ino) {
2397                         get_nfs4_file(fp);
2398                         spin_unlock(&recall_lock);
2399                         return fp;
2400                 }
2401         }
2402         spin_unlock(&recall_lock);
2403         return NULL;
2404 }
2405
2406 static inline int access_valid(u32 x, u32 minorversion)
2407 {
2408         if ((x & NFS4_SHARE_ACCESS_MASK) < NFS4_SHARE_ACCESS_READ)
2409                 return 0;
2410         if ((x & NFS4_SHARE_ACCESS_MASK) > NFS4_SHARE_ACCESS_BOTH)
2411                 return 0;
2412         x &= ~NFS4_SHARE_ACCESS_MASK;
2413         if (minorversion && x) {
2414                 if ((x & NFS4_SHARE_WANT_MASK) > NFS4_SHARE_WANT_CANCEL)
2415                         return 0;
2416                 if ((x & NFS4_SHARE_WHEN_MASK) > NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED)
2417                         return 0;
2418                 x &= ~(NFS4_SHARE_WANT_MASK | NFS4_SHARE_WHEN_MASK);
2419         }
2420         if (x)
2421                 return 0;
2422         return 1;
2423 }
2424
2425 static inline int deny_valid(u32 x)
2426 {
2427         /* Note: unlike access bits, deny bits may be zero. */
2428         return x <= NFS4_SHARE_DENY_BOTH;
2429 }
2430
2431 /*
2432  * Called to check deny when READ with all zero stateid or
2433  * WRITE with all zero or all one stateid
2434  */
2435 static __be32
2436 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2437 {
2438         struct inode *ino = current_fh->fh_dentry->d_inode;
2439         struct nfs4_file *fp;
2440         struct nfs4_ol_stateid *stp;
2441         __be32 ret;
2442
2443         dprintk("NFSD: nfs4_share_conflict\n");
2444
2445         fp = find_file(ino);
2446         if (!fp)
2447                 return nfs_ok;
2448         ret = nfserr_locked;
2449         /* Search for conflicting share reservations */
2450         list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2451                 if (test_bit(deny_type, &stp->st_deny_bmap) ||
2452                     test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap))
2453                         goto out;
2454         }
2455         ret = nfs_ok;
2456 out:
2457         put_nfs4_file(fp);
2458         return ret;
2459 }
2460
2461 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2462 {
2463         /* We're assuming the state code never drops its reference
2464          * without first removing the lease.  Since we're in this lease
2465          * callback (and since the lease code is serialized by the kernel
2466          * lock) we know the server hasn't removed the lease yet, we know
2467          * it's safe to take a reference: */
2468         atomic_inc(&dp->dl_count);
2469
2470         list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2471
2472         /* only place dl_time is set. protected by lock_flocks*/
2473         dp->dl_time = get_seconds();
2474
2475         nfsd4_cb_recall(dp);
2476 }
2477
2478 /* Called from break_lease() with lock_flocks() held. */
2479 static void nfsd_break_deleg_cb(struct file_lock *fl)
2480 {
2481         struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2482         struct nfs4_delegation *dp;
2483
2484         BUG_ON(!fp);
2485         /* We assume break_lease is only called once per lease: */
2486         BUG_ON(fp->fi_had_conflict);
2487         /*
2488          * We don't want the locks code to timeout the lease for us;
2489          * we'll remove it ourself if a delegation isn't returned
2490          * in time:
2491          */
2492         fl->fl_break_time = 0;
2493
2494         spin_lock(&recall_lock);
2495         fp->fi_had_conflict = true;
2496         list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2497                 nfsd_break_one_deleg(dp);
2498         spin_unlock(&recall_lock);
2499 }
2500
2501 static
2502 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2503 {
2504         if (arg & F_UNLCK)
2505                 return lease_modify(onlist, arg);
2506         else
2507                 return -EAGAIN;
2508 }
2509
2510 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2511         .lm_break = nfsd_break_deleg_cb,
2512         .lm_change = nfsd_change_deleg_cb,
2513 };
2514
2515 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2516 {
2517         if (nfsd4_has_session(cstate))
2518                 return nfs_ok;
2519         if (seqid == so->so_seqid - 1)
2520                 return nfserr_replay_me;
2521         if (seqid == so->so_seqid)
2522                 return nfs_ok;
2523         return nfserr_bad_seqid;
2524 }
2525
2526 __be32
2527 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2528                     struct nfsd4_open *open)
2529 {
2530         clientid_t *clientid = &open->op_clientid;
2531         struct nfs4_client *clp = NULL;
2532         unsigned int strhashval;
2533         struct nfs4_openowner *oo = NULL;
2534         __be32 status;
2535
2536         if (!check_name(open->op_owner))
2537                 return nfserr_inval;
2538
2539         if (STALE_CLIENTID(&open->op_clientid))
2540                 return nfserr_stale_clientid;
2541
2542         strhashval = open_ownerstr_hashval(clientid->cl_id, &open->op_owner);
2543         oo = find_openstateowner_str(strhashval, open);
2544         open->op_openowner = oo;
2545         if (!oo) {
2546                 /* Make sure the client's lease hasn't expired. */
2547                 clp = find_confirmed_client(clientid);
2548                 if (clp == NULL)
2549                         return nfserr_expired;
2550                 goto renew;
2551         }
2552         if (!oo->oo_confirmed) {
2553                 /* Replace unconfirmed owners without checking for replay. */
2554                 clp = oo->oo_owner.so_client;
2555                 release_openowner(oo);
2556                 open->op_openowner = NULL;
2557                 goto renew;
2558         }
2559         status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2560         if (status)
2561                 return status;
2562 renew:
2563         if (open->op_openowner == NULL) {
2564                 oo = alloc_init_open_stateowner(strhashval, clp, open);
2565                 if (oo == NULL)
2566                         return nfserr_jukebox;
2567                 open->op_openowner = oo;
2568         }
2569         list_del_init(&oo->oo_close_lru);
2570         renew_client(oo->oo_owner.so_client);
2571         return nfs_ok;
2572 }
2573
2574 static inline __be32
2575 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2576 {
2577         if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2578                 return nfserr_openmode;
2579         else
2580                 return nfs_ok;
2581 }
2582
2583 static int share_access_to_flags(u32 share_access)
2584 {
2585         share_access &= ~NFS4_SHARE_WANT_MASK;
2586
2587         return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2588 }
2589
2590 static struct nfs4_delegation *find_deleg_stateid(stateid_t *s)
2591 {
2592         struct nfs4_stid *ret;
2593
2594         ret = find_stateid_by_type(s, NFS4_DELEG_STID);
2595         if (!ret)
2596                 return NULL;
2597         return delegstateid(ret);
2598 }
2599
2600 static __be32
2601 nfs4_check_deleg(struct nfs4_file *fp, struct nfsd4_open *open,
2602                 struct nfs4_delegation **dp)
2603 {
2604         int flags;
2605         __be32 status = nfserr_bad_stateid;
2606
2607         *dp = find_deleg_stateid(&open->op_delegate_stateid);
2608         if (*dp == NULL)
2609                 goto out;
2610         flags = share_access_to_flags(open->op_share_access);
2611         status = nfs4_check_delegmode(*dp, flags);
2612         if (status)
2613                 *dp = NULL;
2614 out:
2615         if (open->op_claim_type != NFS4_OPEN_CLAIM_DELEGATE_CUR)
2616                 return nfs_ok;
2617         if (status)
2618                 return status;
2619         open->op_openowner->oo_confirmed = 1;
2620         return nfs_ok;
2621 }
2622
2623 static __be32
2624 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2625 {
2626         struct nfs4_ol_stateid *local;
2627         struct nfs4_openowner *oo = open->op_openowner;
2628
2629         list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2630                 /* ignore lock owners */
2631                 if (local->st_stateowner->so_is_open_owner == 0)
2632                         continue;
2633                 /* remember if we have seen this open owner */
2634                 if (local->st_stateowner == &oo->oo_owner)
2635                         *stpp = local;
2636                 /* check for conflicting share reservations */
2637                 if (!test_share(local, open))
2638                         return nfserr_share_denied;
2639         }
2640         return nfs_ok;
2641 }
2642
2643 static inline struct nfs4_ol_stateid *
2644 nfs4_alloc_stateid(void)
2645 {
2646         return kmem_cache_alloc(stateid_slab, GFP_KERNEL);
2647 }
2648
2649 static inline int nfs4_access_to_access(u32 nfs4_access)
2650 {
2651         int flags = 0;
2652
2653         if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2654                 flags |= NFSD_MAY_READ;
2655         if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2656                 flags |= NFSD_MAY_WRITE;
2657         return flags;
2658 }
2659
2660 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2661                 struct svc_fh *cur_fh, struct nfsd4_open *open)
2662 {
2663         __be32 status;
2664         int oflag = nfs4_access_to_omode(open->op_share_access);
2665         int access = nfs4_access_to_access(open->op_share_access);
2666
2667         /* CLAIM_DELEGATE_CUR is used in response to a broken lease;
2668          * allowing it to break the lease and return EAGAIN leaves the
2669          * client unable to make progress in returning the delegation */
2670         if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2671                 access |= NFSD_MAY_NOT_BREAK_LEASE;
2672
2673         if (!fp->fi_fds[oflag]) {
2674                 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2675                         &fp->fi_fds[oflag]);
2676                 if (status)
2677                         return status;
2678         }
2679         nfs4_file_get_access(fp, oflag);
2680
2681         return nfs_ok;
2682 }
2683
2684 static __be32
2685 nfs4_new_open(struct svc_rqst *rqstp, struct nfs4_ol_stateid **stpp,
2686                 struct nfs4_file *fp, struct svc_fh *cur_fh,
2687                 struct nfsd4_open *open)
2688 {
2689         struct nfs4_ol_stateid *stp;
2690         __be32 status;
2691
2692         stp = nfs4_alloc_stateid();
2693         if (stp == NULL)
2694                 return nfserr_jukebox;
2695
2696         status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2697         if (status) {
2698                 kmem_cache_free(stateid_slab, stp);
2699                 return status;
2700         }
2701         *stpp = stp;
2702         return 0;
2703 }
2704
2705 static inline __be32
2706 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2707                 struct nfsd4_open *open)
2708 {
2709         struct iattr iattr = {
2710                 .ia_valid = ATTR_SIZE,
2711                 .ia_size = 0,
2712         };
2713         if (!open->op_truncate)
2714                 return 0;
2715         if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2716                 return nfserr_inval;
2717         return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2718 }
2719
2720 static __be32
2721 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2722 {
2723         u32 op_share_access = open->op_share_access & ~NFS4_SHARE_WANT_MASK;
2724         bool new_access;
2725         __be32 status;
2726
2727         new_access = !test_bit(op_share_access, &stp->st_access_bmap);
2728         if (new_access) {
2729                 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2730                 if (status)
2731                         return status;
2732         }
2733         status = nfsd4_truncate(rqstp, cur_fh, open);
2734         if (status) {
2735                 if (new_access) {
2736                         int oflag = nfs4_access_to_omode(op_share_access);
2737                         nfs4_file_put_access(fp, oflag);
2738                 }
2739                 return status;
2740         }
2741         /* remember the open */
2742         __set_bit(op_share_access, &stp->st_access_bmap);
2743         __set_bit(open->op_share_deny, &stp->st_deny_bmap);
2744
2745         return nfs_ok;
2746 }
2747
2748
2749 static void
2750 nfs4_set_claim_prev(struct nfsd4_open *open)
2751 {
2752         open->op_openowner->oo_confirmed = 1;
2753         open->op_openowner->oo_owner.so_client->cl_firststate = 1;
2754 }
2755
2756 /* Should we give out recallable state?: */
2757 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2758 {
2759         if (clp->cl_cb_state == NFSD4_CB_UP)
2760                 return true;
2761         /*
2762          * In the sessions case, since we don't have to establish a
2763          * separate connection for callbacks, we assume it's OK
2764          * until we hear otherwise:
2765          */
2766         return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2767 }
2768
2769 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2770 {
2771         struct file_lock *fl;
2772
2773         fl = locks_alloc_lock();
2774         if (!fl)
2775                 return NULL;
2776         locks_init_lock(fl);
2777         fl->fl_lmops = &nfsd_lease_mng_ops;
2778         fl->fl_flags = FL_LEASE;
2779         fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2780         fl->fl_end = OFFSET_MAX;
2781         fl->fl_owner = (fl_owner_t)(dp->dl_file);
2782         fl->fl_pid = current->tgid;
2783         return fl;
2784 }
2785
2786 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2787 {
2788         struct nfs4_file *fp = dp->dl_file;
2789         struct file_lock *fl;
2790         int status;
2791
2792         fl = nfs4_alloc_init_lease(dp, flag);
2793         if (!fl)
2794                 return -ENOMEM;
2795         fl->fl_file = find_readable_file(fp);
2796         list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2797         status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2798         if (status) {
2799                 list_del_init(&dp->dl_perclnt);
2800                 locks_free_lock(fl);
2801                 return -ENOMEM;
2802         }
2803         fp->fi_lease = fl;
2804         fp->fi_deleg_file = fl->fl_file;
2805         get_file(fp->fi_deleg_file);
2806         atomic_set(&fp->fi_delegees, 1);
2807         list_add(&dp->dl_perfile, &fp->fi_delegations);
2808         return 0;
2809 }
2810
2811 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2812 {
2813         struct nfs4_file *fp = dp->dl_file;
2814
2815         if (!fp->fi_lease)
2816                 return nfs4_setlease(dp, flag);
2817         spin_lock(&recall_lock);
2818         if (fp->fi_had_conflict) {
2819                 spin_unlock(&recall_lock);
2820                 return -EAGAIN;
2821         }
2822         atomic_inc(&fp->fi_delegees);
2823         list_add(&dp->dl_perfile, &fp->fi_delegations);
2824         spin_unlock(&recall_lock);
2825         list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2826         return 0;
2827 }
2828
2829 /*
2830  * Attempt to hand out a delegation.
2831  */
2832 static void
2833 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2834 {
2835         struct nfs4_delegation *dp;
2836         struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2837         int cb_up;
2838         int status, flag = 0;
2839
2840         cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2841         flag = NFS4_OPEN_DELEGATE_NONE;
2842         open->op_recall = 0;
2843         switch (open->op_claim_type) {
2844                 case NFS4_OPEN_CLAIM_PREVIOUS:
2845                         if (!cb_up)
2846                                 open->op_recall = 1;
2847                         flag = open->op_delegate_type;
2848                         if (flag == NFS4_OPEN_DELEGATE_NONE)
2849                                 goto out;
2850                         break;
2851                 case NFS4_OPEN_CLAIM_NULL:
2852                         /* Let's not give out any delegations till everyone's
2853                          * had the chance to reclaim theirs.... */
2854                         if (locks_in_grace())
2855                                 goto out;
2856                         if (!cb_up || !oo->oo_confirmed)
2857                                 goto out;
2858                         if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2859                                 flag = NFS4_OPEN_DELEGATE_WRITE;
2860                         else
2861                                 flag = NFS4_OPEN_DELEGATE_READ;
2862                         break;
2863                 default:
2864                         goto out;
2865         }
2866
2867         dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2868         if (dp == NULL)
2869                 goto out_no_deleg;
2870         status = nfs4_set_delegation(dp, flag);
2871         if (status)
2872                 goto out_free;
2873
2874         memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2875
2876         dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2877                 STATEID_VAL(&dp->dl_stid.sc_stateid));
2878 out:
2879         if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS
2880                         && flag == NFS4_OPEN_DELEGATE_NONE
2881                         && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2882                 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2883         open->op_delegate_type = flag;
2884         return;
2885 out_free:
2886         nfs4_put_delegation(dp);
2887 out_no_deleg:
2888         flag = NFS4_OPEN_DELEGATE_NONE;
2889         goto out;
2890 }
2891
2892 /*
2893  * called with nfs4_lock_state() held.
2894  */
2895 __be32
2896 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2897 {
2898         struct nfsd4_compoundres *resp = rqstp->rq_resp;
2899         struct nfs4_file *fp = NULL;
2900         struct inode *ino = current_fh->fh_dentry->d_inode;
2901         struct nfs4_ol_stateid *stp = NULL;
2902         struct nfs4_delegation *dp = NULL;
2903         __be32 status;
2904
2905         status = nfserr_inval;
2906         if (!access_valid(open->op_share_access, resp->cstate.minorversion)
2907                         || !deny_valid(open->op_share_deny))
2908                 goto out;
2909         /*
2910          * Lookup file; if found, lookup stateid and check open request,
2911          * and check for delegations in the process of being recalled.
2912          * If not found, create the nfs4_file struct
2913          */
2914         fp = find_file(ino);
2915         if (fp) {
2916                 if ((status = nfs4_check_open(fp, open, &stp)))
2917                         goto out;
2918                 status = nfs4_check_deleg(fp, open, &dp);
2919                 if (status)
2920                         goto out;
2921         } else {
2922                 status = nfserr_bad_stateid;
2923                 if (open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR)
2924                         goto out;
2925                 status = nfserr_jukebox;
2926                 fp = alloc_init_file(ino);
2927                 if (fp == NULL)
2928                         goto out;
2929         }
2930
2931         /*
2932          * OPEN the file, or upgrade an existing OPEN.
2933          * If truncate fails, the OPEN fails.
2934          */
2935         if (stp) {
2936                 /* Stateid was found, this is an OPEN upgrade */
2937                 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
2938                 if (status)
2939                         goto out;
2940         } else {
2941                 status = nfs4_new_open(rqstp, &stp, fp, current_fh, open);
2942                 if (status)
2943                         goto out;
2944                 init_open_stateid(stp, fp, open);
2945                 status = nfsd4_truncate(rqstp, current_fh, open);
2946                 if (status) {
2947                         release_open_stateid(stp);
2948                         goto out;
2949                 }
2950         }
2951         update_stateid(&stp->st_stid.sc_stateid);
2952         memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
2953
2954         if (nfsd4_has_session(&resp->cstate))
2955                 open->op_openowner->oo_confirmed = 1;
2956
2957         /*
2958         * Attempt to hand out a delegation. No error return, because the
2959         * OPEN succeeds even if we fail.
2960         */
2961         nfs4_open_delegation(current_fh, open, stp);
2962
2963         status = nfs_ok;
2964
2965         dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
2966                 STATEID_VAL(&stp->st_stid.sc_stateid));
2967 out:
2968         if (fp)
2969                 put_nfs4_file(fp);
2970         if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
2971                 nfs4_set_claim_prev(open);
2972         /*
2973         * To finish the open response, we just need to set the rflags.
2974         */
2975         open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
2976         if (!open->op_openowner->oo_confirmed &&
2977             !nfsd4_has_session(&resp->cstate))
2978                 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
2979
2980         return status;
2981 }
2982
2983 __be32
2984 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2985             clientid_t *clid)
2986 {
2987         struct nfs4_client *clp;
2988         __be32 status;
2989
2990         nfs4_lock_state();
2991         dprintk("process_renew(%08x/%08x): starting\n", 
2992                         clid->cl_boot, clid->cl_id);
2993         status = nfserr_stale_clientid;
2994         if (STALE_CLIENTID(clid))
2995                 goto out;
2996         clp = find_confirmed_client(clid);
2997         status = nfserr_expired;
2998         if (clp == NULL) {
2999                 /* We assume the client took too long to RENEW. */
3000                 dprintk("nfsd4_renew: clientid not found!\n");
3001                 goto out;
3002         }
3003         renew_client(clp);
3004         status = nfserr_cb_path_down;
3005         if (!list_empty(&clp->cl_delegations)
3006                         && clp->cl_cb_state != NFSD4_CB_UP)
3007                 goto out;
3008         status = nfs_ok;
3009 out:
3010         nfs4_unlock_state();
3011         return status;
3012 }
3013
3014 static struct lock_manager nfsd4_manager = {
3015 };
3016
3017 static void
3018 nfsd4_end_grace(void)
3019 {
3020         dprintk("NFSD: end of grace period\n");
3021         nfsd4_recdir_purge_old();
3022         locks_end_grace(&nfsd4_manager);
3023         /*
3024          * Now that every NFSv4 client has had the chance to recover and
3025          * to see the (possibly new, possibly shorter) lease time, we
3026          * can safely set the next grace time to the current lease time:
3027          */
3028         nfsd4_grace = nfsd4_lease;
3029 }
3030
3031 static time_t
3032 nfs4_laundromat(void)
3033 {
3034         struct nfs4_client *clp;
3035         struct nfs4_openowner *oo;
3036         struct nfs4_delegation *dp;
3037         struct list_head *pos, *next, reaplist;
3038         time_t cutoff = get_seconds() - nfsd4_lease;
3039         time_t t, clientid_val = nfsd4_lease;
3040         time_t u, test_val = nfsd4_lease;
3041
3042         nfs4_lock_state();
3043
3044         dprintk("NFSD: laundromat service - starting\n");
3045         if (locks_in_grace())
3046                 nfsd4_end_grace();
3047         INIT_LIST_HEAD(&reaplist);
3048         spin_lock(&client_lock);
3049         list_for_each_safe(pos, next, &client_lru) {
3050                 clp = list_entry(pos, struct nfs4_client, cl_lru);
3051                 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3052                         t = clp->cl_time - cutoff;
3053                         if (clientid_val > t)
3054                                 clientid_val = t;
3055                         break;
3056                 }
3057                 if (atomic_read(&clp->cl_refcount)) {
3058                         dprintk("NFSD: client in use (clientid %08x)\n",
3059                                 clp->cl_clientid.cl_id);
3060                         continue;
3061                 }
3062                 unhash_client_locked(clp);
3063                 list_add(&clp->cl_lru, &reaplist);
3064         }
3065         spin_unlock(&client_lock);
3066         list_for_each_safe(pos, next, &reaplist) {
3067                 clp = list_entry(pos, struct nfs4_client, cl_lru);
3068                 dprintk("NFSD: purging unused client (clientid %08x)\n",
3069                         clp->cl_clientid.cl_id);
3070                 nfsd4_remove_clid_dir(clp);
3071                 expire_client(clp);
3072         }
3073         spin_lock(&recall_lock);
3074         list_for_each_safe(pos, next, &del_recall_lru) {
3075                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3076                 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3077                         u = dp->dl_time - cutoff;
3078                         if (test_val > u)
3079                                 test_val = u;
3080                         break;
3081                 }
3082                 list_move(&dp->dl_recall_lru, &reaplist);
3083         }
3084         spin_unlock(&recall_lock);
3085         list_for_each_safe(pos, next, &reaplist) {
3086                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3087                 list_del_init(&dp->dl_recall_lru);
3088                 unhash_delegation(dp);
3089         }
3090         test_val = nfsd4_lease;
3091         list_for_each_safe(pos, next, &close_lru) {
3092                 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3093                 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3094                         u = oo->oo_time - cutoff;
3095                         if (test_val > u)
3096                                 test_val = u;
3097                         break;
3098                 }
3099                 dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
3100                         oo->oo_owner.so_id);
3101                 release_openowner(oo);
3102         }
3103         if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3104                 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3105         nfs4_unlock_state();
3106         return clientid_val;
3107 }
3108
3109 static struct workqueue_struct *laundry_wq;
3110 static void laundromat_main(struct work_struct *);
3111 static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3112
3113 static void
3114 laundromat_main(struct work_struct *not_used)
3115 {
3116         time_t t;
3117
3118         t = nfs4_laundromat();
3119         dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3120         queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3121 }
3122
3123 static struct nfs4_openowner * search_close_lru(u32 st_id)
3124 {
3125         struct nfs4_openowner *local;
3126
3127         list_for_each_entry(local, &close_lru, oo_close_lru) {
3128                 if (local->oo_owner.so_id == st_id)
3129                         return local;
3130         }
3131         return NULL;
3132 }
3133
3134 static inline int
3135 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3136 {
3137         return fhp->fh_dentry->d_inode != stp->st_file->fi_inode;
3138 }
3139
3140 static int
3141 STALE_STATEID(stateid_t *stateid)
3142 {
3143         if (stateid->si_boot == boot_time)
3144                 return 0;
3145         dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3146                 STATEID_VAL(stateid));
3147         return 1;
3148 }
3149
3150 static inline int
3151 access_permit_read(unsigned long access_bmap)
3152 {
3153         return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) ||
3154                 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) ||
3155                 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap);
3156 }
3157
3158 static inline int
3159 access_permit_write(unsigned long access_bmap)
3160 {
3161         return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) ||
3162                 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap);
3163 }
3164
3165 static
3166 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3167 {
3168         __be32 status = nfserr_openmode;
3169
3170         /* For lock stateid's, we test the parent open, not the lock: */
3171         if (stp->st_openstp)
3172                 stp = stp->st_openstp;
3173         if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap)))
3174                 goto out;
3175         if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap)))
3176                 goto out;
3177         status = nfs_ok;
3178 out:
3179         return status;
3180 }
3181
3182 static inline __be32
3183 check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3184 {
3185         if (ONE_STATEID(stateid) && (flags & RD_STATE))
3186                 return nfs_ok;
3187         else if (locks_in_grace()) {
3188                 /* Answer in remaining cases depends on existence of
3189                  * conflicting state; so we must wait out the grace period. */
3190                 return nfserr_grace;
3191         } else if (flags & WR_STATE)
3192                 return nfs4_share_conflict(current_fh,
3193                                 NFS4_SHARE_DENY_WRITE);
3194         else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3195                 return nfs4_share_conflict(current_fh,
3196                                 NFS4_SHARE_DENY_READ);
3197 }
3198
3199 /*
3200  * Allow READ/WRITE during grace period on recovered state only for files
3201  * that are not able to provide mandatory locking.
3202  */
3203 static inline int
3204 grace_disallows_io(struct inode *inode)
3205 {
3206         return locks_in_grace() && mandatory_lock(inode);
3207 }
3208
3209 /* Returns true iff a is later than b: */
3210 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3211 {
3212         return (s32)a->si_generation - (s32)b->si_generation > 0;
3213 }
3214
3215 static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3216 {
3217         /*
3218          * When sessions are used the stateid generation number is ignored
3219          * when it is zero.
3220          */
3221         if (has_session && in->si_generation == 0)
3222                 return nfs_ok;
3223
3224         if (in->si_generation == ref->si_generation)
3225                 return nfs_ok;
3226
3227         /* If the client sends us a stateid from the future, it's buggy: */
3228         if (stateid_generation_after(in, ref))
3229                 return nfserr_bad_stateid;
3230         /*
3231          * However, we could see a stateid from the past, even from a
3232          * non-buggy client.  For example, if the client sends a lock
3233          * while some IO is outstanding, the lock may bump si_generation
3234          * while the IO is still in flight.  The client could avoid that
3235          * situation by waiting for responses on all the IO requests,
3236          * but better performance may result in retrying IO that
3237          * receives an old_stateid error if requests are rarely
3238          * reordered in flight:
3239          */
3240         return nfserr_old_stateid;
3241 }
3242
3243 static int is_delegation_stateid(stateid_t *stateid)
3244 {
3245         return stateid->si_fileid == 0;
3246 }
3247
3248 __be32 nfs4_validate_stateid(stateid_t *stateid, bool has_session)
3249 {
3250         struct nfs4_stid *s;
3251         struct nfs4_ol_stateid *ols;
3252         __be32 status;
3253
3254         if (STALE_STATEID(stateid))
3255                 return nfserr_stale_stateid;
3256
3257         s = find_stateid(stateid);
3258         if (!s)
3259                  return nfserr_stale_stateid;
3260         status = check_stateid_generation(stateid, &s->sc_stateid, has_session);
3261         if (status)
3262                 return status;
3263         if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3264                 return nfs_ok;
3265         ols = openlockstateid(s);
3266         if (ols->st_stateowner->so_is_open_owner
3267             && !openowner(ols->st_stateowner)->oo_confirmed)
3268                 return nfserr_bad_stateid;
3269         return nfs_ok;
3270 }
3271
3272 /*
3273 * Checks for stateid operations
3274 */
3275 __be32
3276 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3277                            stateid_t *stateid, int flags, struct file **filpp)
3278 {
3279         struct nfs4_stid *s;
3280         struct nfs4_ol_stateid *stp = NULL;
3281         struct nfs4_delegation *dp = NULL;
3282         struct svc_fh *current_fh = &cstate->current_fh;
3283         struct inode *ino = current_fh->fh_dentry->d_inode;
3284         __be32 status;
3285
3286         if (filpp)
3287                 *filpp = NULL;
3288
3289         if (grace_disallows_io(ino))
3290                 return nfserr_grace;
3291
3292         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3293                 return check_special_stateids(current_fh, stateid, flags);
3294
3295         status = nfserr_stale_stateid;
3296         if (STALE_STATEID(stateid)) 
3297                 goto out;
3298
3299         /*
3300          * We assume that any stateid that has the current boot time,
3301          * but that we can't find, is expired:
3302          */
3303         status = nfserr_expired;
3304         s = find_stateid(stateid);
3305         if (!s)
3306                 goto out;
3307         status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3308         if (status)
3309                 goto out;
3310         if (s->sc_type == NFS4_DELEG_STID) {
3311                 dp = delegstateid(s);
3312                 status = nfs4_check_delegmode(dp, flags);
3313                 if (status)
3314                         goto out;
3315                 renew_client(dp->dl_client);
3316                 if (filpp) {
3317                         *filpp = dp->dl_file->fi_deleg_file;
3318                         BUG_ON(!*filpp);
3319                 }
3320         } else { /* open or lock stateid */
3321                 stp = openlockstateid(s);
3322                 status = nfserr_bad_stateid;
3323                 if (nfs4_check_fh(current_fh, stp))
3324                         goto out;
3325                 if (stp->st_stateowner->so_is_open_owner
3326                     && !openowner(stp->st_stateowner)->oo_confirmed)
3327                         goto out;
3328                 status = nfs4_check_openmode(stp, flags);
3329                 if (status)
3330                         goto out;
3331                 renew_client(stp->st_stateowner->so_client);
3332                 if (filpp) {
3333                         if (flags & RD_STATE)
3334                                 *filpp = find_readable_file(stp->st_file);
3335                         else
3336                                 *filpp = find_writeable_file(stp->st_file);
3337                 }
3338         }
3339         status = nfs_ok;
3340 out:
3341         return status;
3342 }
3343
3344 static __be32
3345 nfsd4_free_delegation_stateid(stateid_t *stateid)
3346 {
3347         struct nfs4_delegation *dp = find_deleg_stateid(stateid);
3348         if (dp)
3349                 return nfserr_locks_held;
3350
3351         return nfserr_bad_stateid;
3352 }
3353
3354 static __be32
3355 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3356 {
3357         if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3358                 return nfserr_locks_held;
3359         release_lock_stateid(stp);
3360         return nfs_ok;
3361 }
3362
3363 /*
3364  * Test if the stateid is valid
3365  */
3366 __be32
3367 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3368                    struct nfsd4_test_stateid *test_stateid)
3369 {
3370         test_stateid->ts_has_session = nfsd4_has_session(cstate);
3371         return nfs_ok;
3372 }
3373
3374 /*
3375  * Free a state id
3376  */
3377 __be32
3378 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3379                    struct nfsd4_free_stateid *free_stateid)
3380 {
3381         stateid_t *stateid = &free_stateid->fr_stateid;
3382         struct nfs4_ol_stateid *stp;
3383         __be32 ret;
3384
3385         nfs4_lock_state();
3386         if (is_delegation_stateid(stateid)) {
3387                 ret = nfsd4_free_delegation_stateid(stateid);
3388                 goto out;
3389         }
3390
3391         stp = find_ol_stateid(stateid);
3392         if (!stp) {
3393                 ret = nfserr_bad_stateid;
3394                 goto out;
3395         }
3396         ret = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, 1);
3397         if (ret)
3398                 goto out;
3399
3400         if (stp->st_stid.sc_type == NFS4_OPEN_STID) {
3401                 ret = nfserr_locks_held;
3402                 goto out;
3403         } else {
3404                 ret = nfsd4_free_lock_stateid(stp);
3405                 goto out;
3406         }
3407
3408 out:
3409         nfs4_unlock_state();
3410         return ret;
3411 }
3412
3413 static inline int
3414 setlkflg (int type)
3415 {
3416         return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3417                 RD_STATE : WR_STATE;
3418 }
3419
3420 static __be32 nfs4_nospecial_stateid_checks(stateid_t *stateid)
3421 {
3422         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3423                 return nfserr_bad_stateid;
3424         if (STALE_STATEID(stateid))
3425                 return nfserr_stale_stateid;
3426         return nfs_ok;
3427 }
3428
3429 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3430 {
3431         struct svc_fh *current_fh = &cstate->current_fh;
3432         struct nfs4_stateowner *sop = stp->st_stateowner;
3433         __be32 status;
3434
3435         if (nfs4_check_fh(current_fh, stp))
3436                 return nfserr_bad_stateid;
3437         status = nfsd4_check_seqid(cstate, sop, seqid);
3438         if (status)
3439                 return status;
3440         return check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3441 }
3442
3443 /* 
3444  * Checks for sequence id mutating operations. 
3445  */
3446 static __be32
3447 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3448                          stateid_t *stateid, char typemask,
3449                          struct nfs4_ol_stateid **stpp)
3450 {
3451         __be32 status;
3452
3453         dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3454                 seqid, STATEID_VAL(stateid));
3455
3456         *stpp = NULL;
3457         status = nfs4_nospecial_stateid_checks(stateid);
3458         if (status)
3459                 return status;
3460         *stpp = find_ol_stateid_by_type(stateid, typemask);
3461         if (*stpp == NULL)
3462                 return nfserr_expired;
3463         cstate->replay_owner = (*stpp)->st_stateowner;
3464         renew_client((*stpp)->st_stateowner->so_client);
3465
3466         return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3467 }
3468
3469 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
3470 {
3471         __be32 status;
3472         struct nfs4_openowner *oo;
3473
3474         status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3475                                                 NFS4_OPEN_STID, stpp);
3476         if (status)
3477                 return status;
3478         oo = openowner((*stpp)->st_stateowner);
3479         if (!oo->oo_confirmed)
3480                 return nfserr_bad_stateid;
3481         return nfs_ok;
3482 }
3483
3484 __be32
3485 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3486                    struct nfsd4_open_confirm *oc)
3487 {
3488         __be32 status;
3489         struct nfs4_openowner *oo;
3490         struct nfs4_ol_stateid *stp;
3491
3492         dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3493                         (int)cstate->current_fh.fh_dentry->d_name.len,
3494                         cstate->current_fh.fh_dentry->d_name.name);
3495
3496         status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3497         if (status)
3498                 return status;
3499
3500         nfs4_lock_state();
3501
3502         status = nfs4_preprocess_seqid_op(cstate,
3503                                         oc->oc_seqid, &oc->oc_req_stateid,
3504                                         NFS4_OPEN_STID, &stp);
3505         if (status)
3506                 goto out;
3507         oo = openowner(stp->st_stateowner);
3508         status = nfserr_bad_stateid;
3509         if (oo->oo_confirmed)
3510                 goto out;
3511         oo->oo_confirmed = 1;
3512         update_stateid(&stp->st_stid.sc_stateid);
3513         memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3514         dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3515                 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3516
3517         nfsd4_create_clid_dir(oo->oo_owner.so_client);
3518         status = nfs_ok;
3519 out:
3520         if (!cstate->replay_owner)
3521                 nfs4_unlock_state();
3522         return status;
3523 }
3524
3525 static inline void nfs4_file_downgrade(struct nfs4_ol_stateid *stp, unsigned int to_access)
3526 {
3527         int i;
3528
3529         for (i = 1; i < 4; i++) {
3530                 if (test_bit(i, &stp->st_access_bmap) && !(i & to_access)) {
3531                         nfs4_file_put_access(stp->st_file, i);
3532                         __clear_bit(i, &stp->st_access_bmap);
3533                 }
3534         }
3535 }
3536
3537 static void
3538 reset_union_bmap_deny(unsigned long deny, unsigned long *bmap)
3539 {
3540         int i;
3541         for (i = 0; i < 4; i++) {
3542                 if ((i & deny) != i)
3543                         __clear_bit(i, bmap);
3544         }
3545 }
3546
3547 __be32
3548 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3549                      struct nfsd4_compound_state *cstate,
3550                      struct nfsd4_open_downgrade *od)
3551 {
3552         __be32 status;
3553         struct nfs4_ol_stateid *stp;
3554
3555         dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
3556                         (int)cstate->current_fh.fh_dentry->d_name.len,
3557                         cstate->current_fh.fh_dentry->d_name.name);
3558
3559         if (!access_valid(od->od_share_access, cstate->minorversion)
3560                         || !deny_valid(od->od_share_deny))
3561                 return nfserr_inval;
3562
3563         nfs4_lock_state();
3564         status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3565                                         &od->od_stateid, &stp);
3566         if (status)
3567                 goto out; 
3568         status = nfserr_inval;
3569         if (!test_bit(od->od_share_access, &stp->st_access_bmap)) {
3570                 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n",
3571                         stp->st_access_bmap, od->od_share_access);
3572                 goto out;
3573         }
3574         if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) {
3575                 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3576                         stp->st_deny_bmap, od->od_share_deny);
3577                 goto out;
3578         }
3579         nfs4_file_downgrade(stp, od->od_share_access);
3580
3581         reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap);
3582
3583         update_stateid(&stp->st_stid.sc_stateid);
3584         memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3585         status = nfs_ok;
3586 out:
3587         if (!cstate->replay_owner)
3588                 nfs4_unlock_state();
3589         return status;
3590 }
3591
3592 /*
3593  * nfs4_unlock_state() called after encode
3594  */
3595 __be32
3596 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3597             struct nfsd4_close *close)
3598 {
3599         __be32 status;
3600         struct nfs4_openowner *oo;
3601         struct nfs4_ol_stateid *stp;
3602
3603         dprintk("NFSD: nfsd4_close on file %.*s\n", 
3604                         (int)cstate->current_fh.fh_dentry->d_name.len,
3605                         cstate->current_fh.fh_dentry->d_name.name);
3606
3607         nfs4_lock_state();
3608         /* check close_lru for replay */
3609         status = nfs4_preprocess_confirmed_seqid_op(cstate, close->cl_seqid,
3610                                         &close->cl_stateid, &stp);
3611         if (stp == NULL && status == nfserr_expired) {
3612                 /*
3613                  * Also, we should make sure this isn't just the result of
3614                  * a replayed close:
3615                  */
3616                 oo = search_close_lru(close->cl_stateid.si_stateownerid);
3617                 /* It's not stale; let's assume it's expired: */
3618                 if (oo == NULL)
3619                         goto out;
3620                 cstate->replay_owner = &oo->oo_owner;
3621                 status = nfsd4_check_seqid(cstate, &oo->oo_owner, close->cl_seqid);
3622                 if (status)
3623                         goto out;
3624                 status = nfserr_bad_seqid;
3625         }
3626         if (status)
3627                 goto out; 
3628         oo = openowner(stp->st_stateowner);
3629         status = nfs_ok;
3630         update_stateid(&stp->st_stid.sc_stateid);
3631         memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3632
3633         /* release_stateid() calls nfsd_close() if needed */
3634         release_open_stateid(stp);
3635
3636         /* place unused nfs4_stateowners on so_close_lru list to be
3637          * released by the laundromat service after the lease period
3638          * to enable us to handle CLOSE replay
3639          */
3640         if (list_empty(&oo->oo_owner.so_stateids))
3641                 move_to_close_lru(oo);
3642 out:
3643         if (!cstate->replay_owner)
3644                 nfs4_unlock_state();
3645         return status;
3646 }
3647
3648 __be32
3649 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3650                   struct nfsd4_delegreturn *dr)
3651 {
3652         struct nfs4_delegation *dp;
3653         stateid_t *stateid = &dr->dr_stateid;
3654         struct inode *inode;
3655         __be32 status;
3656
3657         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3658                 return status;
3659         inode = cstate->current_fh.fh_dentry->d_inode;
3660
3661         nfs4_lock_state();
3662         status = nfserr_bad_stateid;
3663         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3664                 goto out;
3665         status = nfserr_stale_stateid;
3666         if (STALE_STATEID(stateid))
3667                 goto out;
3668         status = nfserr_bad_stateid;
3669         if (!is_delegation_stateid(stateid))
3670                 goto out;
3671         status = nfserr_expired;
3672         dp = find_deleg_stateid(stateid);
3673         if (!dp)
3674                 goto out;
3675         status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3676         if (status)
3677                 goto out;
3678         renew_client(dp->dl_client);
3679
3680         unhash_delegation(dp);
3681 out:
3682         nfs4_unlock_state();
3683
3684         return status;
3685 }
3686
3687
3688 /* 
3689  * Lock owner state (byte-range locks)
3690  */
3691 #define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
3692 #define LOCK_HASH_BITS              8
3693 #define LOCK_HASH_SIZE             (1 << LOCK_HASH_BITS)
3694 #define LOCK_HASH_MASK             (LOCK_HASH_SIZE - 1)
3695
3696 static inline u64
3697 end_offset(u64 start, u64 len)
3698 {
3699         u64 end;
3700
3701         end = start + len;
3702         return end >= start ? end: NFS4_MAX_UINT64;
3703 }
3704
3705 /* last octet in a range */
3706 static inline u64
3707 last_byte_offset(u64 start, u64 len)
3708 {
3709         u64 end;
3710
3711         BUG_ON(!len);
3712         end = start + len;
3713         return end > start ? end - 1: NFS4_MAX_UINT64;
3714 }
3715
3716 static unsigned int lockownerid_hashval(u32 id)
3717 {
3718         return id & LOCK_HASH_MASK;
3719 }
3720
3721 static inline unsigned int
3722 lock_ownerstr_hashval(struct inode *inode, u32 cl_id,
3723                 struct xdr_netobj *ownername)
3724 {
3725         return (file_hashval(inode) + cl_id
3726                         + opaque_hashval(ownername->data, ownername->len))
3727                 & LOCK_HASH_MASK;
3728 }
3729
3730 static struct list_head lock_ownerid_hashtbl[LOCK_HASH_SIZE];
3731 static struct list_head lock_ownerstr_hashtbl[LOCK_HASH_SIZE];
3732
3733 /*
3734  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3735  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3736  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
3737  * locking, this prevents us from being completely protocol-compliant.  The
3738  * real solution to this problem is to start using unsigned file offsets in
3739  * the VFS, but this is a very deep change!
3740  */
3741 static inline void
3742 nfs4_transform_lock_offset(struct file_lock *lock)
3743 {
3744         if (lock->fl_start < 0)
3745                 lock->fl_start = OFFSET_MAX;
3746         if (lock->fl_end < 0)
3747                 lock->fl_end = OFFSET_MAX;
3748 }
3749
3750 /* Hack!: For now, we're defining this just so we can use a pointer to it
3751  * as a unique cookie to identify our (NFSv4's) posix locks. */
3752 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
3753 };
3754
3755 static inline void
3756 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3757 {
3758         struct nfs4_lockowner *lo;
3759
3760         if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3761                 lo = (struct nfs4_lockowner *) fl->fl_owner;
3762                 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3763                                         lo->lo_owner.so_owner.len, GFP_KERNEL);
3764                 if (!deny->ld_owner.data)
3765                         /* We just don't care that much */
3766                         goto nevermind;
3767                 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3768                 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3769         } else {
3770 nevermind:
3771                 deny->ld_owner.len = 0;
3772                 deny->ld_owner.data = NULL;
3773                 deny->ld_clientid.cl_boot = 0;
3774                 deny->ld_clientid.cl_id = 0;
3775         }
3776         deny->ld_start = fl->fl_start;
3777         deny->ld_length = NFS4_MAX_UINT64;
3778         if (fl->fl_end != NFS4_MAX_UINT64)
3779                 deny->ld_length = fl->fl_end - fl->fl_start + 1;        
3780         deny->ld_type = NFS4_READ_LT;
3781         if (fl->fl_type != F_RDLCK)
3782                 deny->ld_type = NFS4_WRITE_LT;
3783 }
3784
3785 static struct nfs4_lockowner *
3786 find_lockowner_str(struct inode *inode, clientid_t *clid,
3787                 struct xdr_netobj *owner)
3788 {
3789         unsigned int hashval = lock_ownerstr_hashval(inode, clid->cl_id, owner);
3790         struct nfs4_stateowner *op;
3791
3792         list_for_each_entry(op, &lock_ownerstr_hashtbl[hashval], so_strhash) {
3793                 if (same_owner_str(op, owner, clid))
3794                         return lockowner(op);
3795         }
3796         return NULL;
3797 }
3798
3799 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3800 {
3801         unsigned int idhashval;
3802
3803         idhashval = lockownerid_hashval(lo->lo_owner.so_id);
3804         list_add(&lo->lo_owner.so_idhash, &lock_ownerid_hashtbl[idhashval]);
3805         list_add(&lo->lo_owner.so_strhash, &lock_ownerstr_hashtbl[strhashval]);
3806         list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
3807 }
3808
3809 /*
3810  * Alloc a lock owner structure.
3811  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
3812  * occurred. 
3813  *
3814  * strhashval = lock_ownerstr_hashval 
3815  */
3816
3817 static struct nfs4_lockowner *
3818 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3819         struct nfs4_lockowner *lo;
3820
3821         lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3822         if (!lo)
3823                 return NULL;
3824         INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
3825         lo->lo_owner.so_is_open_owner = 0;
3826         /* It is the openowner seqid that will be incremented in encode in the
3827          * case of new lockowners; so increment the lock seqid manually: */
3828         lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
3829         hash_lockowner(lo, strhashval, clp, open_stp);
3830         return lo;
3831 }
3832
3833 static struct nfs4_ol_stateid *
3834 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
3835 {
3836         struct nfs4_ol_stateid *stp;
3837
3838         stp = nfs4_alloc_stateid();
3839         if (stp == NULL)
3840                 goto out;
3841         list_add(&stp->st_perfile, &fp->fi_stateids);
3842         list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3843         stp->st_stateowner = &lo->lo_owner;
3844         stp->st_stid.sc_type = NFS4_LOCK_STID;
3845         get_nfs4_file(fp);
3846         stp->st_file = fp;
3847         stp->st_stid.sc_stateid.si_boot = boot_time;
3848         stp->st_stid.sc_stateid.si_stateownerid = lo->lo_owner.so_id;
3849         stp->st_stid.sc_stateid.si_fileid = fp->fi_id;
3850         /* note will be incremented before first return to client: */
3851         stp->st_stid.sc_stateid.si_generation = 0;
3852         hash_stid(&stp->st_stid);
3853         stp->st_access_bmap = 0;
3854         stp->st_deny_bmap = open_stp->st_deny_bmap;
3855         stp->st_openstp = open_stp;
3856
3857 out:
3858         return stp;
3859 }
3860
3861 static int
3862 check_lock_length(u64 offset, u64 length)
3863 {
3864         return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
3865              LOFF_OVERFLOW(offset, length)));
3866 }
3867
3868 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
3869 {
3870         struct nfs4_file *fp = lock_stp->st_file;
3871         int oflag = nfs4_access_to_omode(access);
3872
3873         if (test_bit(access, &lock_stp->st_access_bmap))
3874                 return;
3875         nfs4_file_get_access(fp, oflag);
3876         __set_bit(access, &lock_stp->st_access_bmap);
3877 }
3878
3879 /*
3880  *  LOCK operation 
3881  */
3882 __be32
3883 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3884            struct nfsd4_lock *lock)
3885 {
3886         struct nfs4_openowner *open_sop = NULL;
3887         struct nfs4_lockowner *lock_sop = NULL;
3888         struct nfs4_ol_stateid *lock_stp;
3889         struct nfs4_file *fp;
3890         struct file *filp = NULL;
3891         struct file_lock file_lock;
3892         struct file_lock conflock;
3893         __be32 status = 0;
3894         unsigned int strhashval;
3895         int lkflg;
3896         int err;
3897
3898         dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
3899                 (long long) lock->lk_offset,
3900                 (long long) lock->lk_length);
3901
3902         if (check_lock_length(lock->lk_offset, lock->lk_length))
3903                  return nfserr_inval;
3904
3905         if ((status = fh_verify(rqstp, &cstate->current_fh,
3906                                 S_IFREG, NFSD_MAY_LOCK))) {
3907                 dprintk("NFSD: nfsd4_lock: permission denied!\n");
3908                 return status;
3909         }
3910
3911         nfs4_lock_state();
3912
3913         if (lock->lk_is_new) {
3914                 /*
3915                  * Client indicates that this is a new lockowner.
3916                  * Use open owner and open stateid to create lock owner and
3917                  * lock stateid.
3918                  */
3919                 struct nfs4_ol_stateid *open_stp = NULL;
3920                 
3921                 status = nfserr_stale_clientid;
3922                 if (!nfsd4_has_session(cstate) &&
3923                     STALE_CLIENTID(&lock->lk_new_clientid))
3924                         goto out;
3925
3926                 /* validate and update open stateid and open seqid */
3927                 status = nfs4_preprocess_confirmed_seqid_op(cstate,
3928                                         lock->lk_new_open_seqid,
3929                                         &lock->lk_new_open_stateid,
3930                                         &open_stp);
3931                 if (status)
3932                         goto out;
3933                 open_sop = openowner(open_stp->st_stateowner);
3934                 status = nfserr_bad_stateid;
3935                 if (!nfsd4_has_session(cstate) &&
3936                         !same_clid(&open_sop->oo_owner.so_client->cl_clientid,
3937                                                 &lock->v.new.clientid))
3938                         goto out;
3939                 /* create lockowner and lock stateid */
3940                 fp = open_stp->st_file;
3941                 strhashval = lock_ownerstr_hashval(fp->fi_inode,
3942                                 open_sop->oo_owner.so_client->cl_clientid.cl_id,
3943                                 &lock->v.new.owner);
3944                 /* XXX: Do we need to check for duplicate stateowners on
3945                  * the same file, or should they just be allowed (and
3946                  * create new stateids)? */
3947                 status = nfserr_jukebox;
3948                 lock_sop = alloc_init_lock_stateowner(strhashval,
3949                                 open_sop->oo_owner.so_client, open_stp, lock);
3950                 if (lock_sop == NULL)
3951                         goto out;
3952                 lock_stp = alloc_init_lock_stateid(lock_sop, fp, open_stp);
3953                 if (lock_stp == NULL)
3954                         goto out;
3955         } else {
3956                 /* lock (lock owner + lock stateid) already exists */
3957                 status = nfs4_preprocess_seqid_op(cstate,
3958                                        lock->lk_old_lock_seqid,
3959                                        &lock->lk_old_lock_stateid,
3960                                        NFS4_LOCK_STID, &lock_stp);
3961                 if (status)
3962                         goto out;
3963                 lock_sop = lockowner(lock_stp->st_stateowner);
3964                 fp = lock_stp->st_file;
3965         }
3966         /* lock_sop and lock_stp have been created or found */
3967
3968         lkflg = setlkflg(lock->lk_type);
3969         status = nfs4_check_openmode(lock_stp, lkflg);
3970         if (status)
3971                 goto out;
3972
3973         status = nfserr_grace;
3974         if (locks_in_grace() && !lock->lk_reclaim)
3975                 goto out;
3976         status = nfserr_no_grace;
3977         if (!locks_in_grace() && lock->lk_reclaim)
3978                 goto out;
3979
3980         locks_init_lock(&file_lock);
3981         switch (lock->lk_type) {
3982                 case NFS4_READ_LT:
3983                 case NFS4_READW_LT:
3984                         filp = find_readable_file(lock_stp->st_file);
3985                         if (filp)
3986                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
3987                         file_lock.fl_type = F_RDLCK;
3988                         break;
3989                 case NFS4_WRITE_LT:
3990                 case NFS4_WRITEW_LT:
3991                         filp = find_writeable_file(lock_stp->st_file);
3992                         if (filp)
3993                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
3994                         file_lock.fl_type = F_WRLCK;
3995                         break;
3996                 default:
3997                         status = nfserr_inval;
3998                 goto out;
3999         }
4000         if (!filp) {
4001                 status = nfserr_openmode;
4002                 goto out;
4003         }
4004         file_lock.fl_owner = (fl_owner_t)lock_sop;
4005         file_lock.fl_pid = current->tgid;
4006         file_lock.fl_file = filp;
4007         file_lock.fl_flags = FL_POSIX;
4008         file_lock.fl_lmops = &nfsd_posix_mng_ops;
4009
4010         file_lock.fl_start = lock->lk_offset;
4011         file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4012         nfs4_transform_lock_offset(&file_lock);
4013
4014         /*
4015         * Try to lock the file in the VFS.
4016         * Note: locks.c uses the BKL to protect the inode's lock list.
4017         */
4018
4019         err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
4020         switch (-err) {
4021         case 0: /* success! */
4022                 update_stateid(&lock_stp->st_stid.sc_stateid);
4023                 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 
4024                                 sizeof(stateid_t));
4025                 status = 0;
4026                 break;
4027         case (EAGAIN):          /* conflock holds conflicting lock */
4028                 status = nfserr_denied;
4029                 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4030                 nfs4_set_lock_denied(&conflock, &lock->lk_denied);
4031                 break;
4032         case (EDEADLK):
4033                 status = nfserr_deadlock;
4034                 break;
4035         default:
4036                 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4037                 status = nfserrno(err);
4038                 break;
4039         }
4040 out:
4041         if (status && lock->lk_is_new && lock_sop)
4042                 release_lockowner(lock_sop);
4043         if (!cstate->replay_owner)
4044                 nfs4_unlock_state();
4045         return status;
4046 }
4047
4048 /*
4049  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4050  * so we do a temporary open here just to get an open file to pass to
4051  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
4052  * inode operation.)
4053  */
4054 static int nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4055 {
4056         struct file *file;
4057         int err;
4058
4059         err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4060         if (err)
4061                 return err;
4062         err = vfs_test_lock(file, lock);
4063         nfsd_close(file);
4064         return err;
4065 }
4066
4067 /*
4068  * LOCKT operation
4069  */
4070 __be32
4071 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4072             struct nfsd4_lockt *lockt)
4073 {
4074         struct inode *inode;
4075         struct file_lock file_lock;
4076         struct nfs4_lockowner *lo;
4077         int error;
4078         __be32 status;
4079
4080         if (locks_in_grace())
4081                 return nfserr_grace;
4082
4083         if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4084                  return nfserr_inval;
4085
4086         nfs4_lock_state();
4087
4088         status = nfserr_stale_clientid;
4089         if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
4090                 goto out;
4091
4092         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4093                 goto out;
4094
4095         inode = cstate->current_fh.fh_dentry->d_inode;
4096         locks_init_lock(&file_lock);
4097         switch (lockt->lt_type) {
4098                 case NFS4_READ_LT:
4099                 case NFS4_READW_LT:
4100                         file_lock.fl_type = F_RDLCK;
4101                 break;
4102                 case NFS4_WRITE_LT:
4103                 case NFS4_WRITEW_LT:
4104                         file_lock.fl_type = F_WRLCK;
4105                 break;
4106                 default:
4107                         dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4108                         status = nfserr_inval;
4109                 goto out;
4110         }
4111
4112         lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4113         if (lo)
4114                 file_lock.fl_owner = (fl_owner_t)lo;
4115         file_lock.fl_pid = current->tgid;
4116         file_lock.fl_flags = FL_POSIX;
4117
4118         file_lock.fl_start = lockt->lt_offset;
4119         file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4120
4121         nfs4_transform_lock_offset(&file_lock);
4122
4123         status = nfs_ok;
4124         error = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4125         if (error) {
4126                 status = nfserrno(error);
4127                 goto out;
4128         }
4129         if (file_lock.fl_type != F_UNLCK) {
4130                 status = nfserr_denied;
4131                 nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4132         }
4133 out:
4134         nfs4_unlock_state();
4135         return status;
4136 }
4137
4138 __be32
4139 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4140             struct nfsd4_locku *locku)
4141 {
4142         struct nfs4_ol_stateid *stp;
4143         struct file *filp = NULL;
4144         struct file_lock file_lock;
4145         __be32 status;
4146         int err;
4147                                                         
4148         dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4149                 (long long) locku->lu_offset,
4150                 (long long) locku->lu_length);
4151
4152         if (check_lock_length(locku->lu_offset, locku->lu_length))
4153                  return nfserr_inval;
4154
4155         nfs4_lock_state();
4156                                                                                 
4157         status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4158                                         &locku->lu_stateid, NFS4_LOCK_STID, &stp);
4159         if (status)
4160                 goto out;
4161         filp = find_any_file(stp->st_file);
4162         if (!filp) {
4163                 status = nfserr_lock_range;
4164                 goto out;
4165         }
4166         BUG_ON(!filp);
4167         locks_init_lock(&file_lock);
4168         file_lock.fl_type = F_UNLCK;
4169         file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4170         file_lock.fl_pid = current->tgid;
4171         file_lock.fl_file = filp;
4172         file_lock.fl_flags = FL_POSIX; 
4173         file_lock.fl_lmops = &nfsd_posix_mng_ops;
4174         file_lock.fl_start = locku->lu_offset;
4175
4176         file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
4177         nfs4_transform_lock_offset(&file_lock);
4178
4179         /*
4180         *  Try to unlock the file in the VFS.
4181         */
4182         err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
4183         if (err) {
4184                 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4185                 goto out_nfserr;
4186         }
4187         /*
4188         * OK, unlock succeeded; the only thing left to do is update the stateid.
4189         */
4190         update_stateid(&stp->st_stid.sc_stateid);
4191         memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4192
4193 out:
4194         nfs4_unlock_state();
4195         return status;
4196
4197 out_nfserr:
4198         status = nfserrno(err);
4199         goto out;
4200 }
4201
4202 /*
4203  * returns
4204  *      1: locks held by lockowner
4205  *      0: no locks held by lockowner
4206  */
4207 static int
4208 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4209 {
4210         struct file_lock **flpp;
4211         struct inode *inode = filp->fi_inode;
4212         int status = 0;
4213
4214         lock_flocks();
4215         for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4216                 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4217                         status = 1;
4218                         goto out;
4219                 }
4220         }
4221 out:
4222         unlock_flocks();
4223         return status;
4224 }
4225
4226 __be32
4227 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4228                         struct nfsd4_compound_state *cstate,
4229                         struct nfsd4_release_lockowner *rlockowner)
4230 {
4231         clientid_t *clid = &rlockowner->rl_clientid;
4232         struct nfs4_stateowner *sop;
4233         struct nfs4_lockowner *lo;
4234         struct nfs4_ol_stateid *stp;
4235         struct xdr_netobj *owner = &rlockowner->rl_owner;
4236         struct list_head matches;
4237         int i;
4238         __be32 status;
4239
4240         dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4241                 clid->cl_boot, clid->cl_id);
4242
4243         /* XXX check for lease expiration */
4244
4245         status = nfserr_stale_clientid;
4246         if (STALE_CLIENTID(clid))
4247                 return status;
4248
4249         nfs4_lock_state();
4250
4251         status = nfserr_locks_held;
4252         /* XXX: we're doing a linear search through all the lockowners.
4253          * Yipes!  For now we'll just hope clients aren't really using
4254          * release_lockowner much, but eventually we have to fix these
4255          * data structures. */
4256         INIT_LIST_HEAD(&matches);
4257         for (i = 0; i < LOCK_HASH_SIZE; i++) {
4258                 list_for_each_entry(sop, &lock_ownerid_hashtbl[i], so_idhash) {
4259                         if (!same_owner_str(sop, owner, clid))
4260                                 continue;
4261                         list_for_each_entry(stp, &sop->so_stateids,
4262                                         st_perstateowner) {
4263                                 lo = lockowner(sop);
4264                                 if (check_for_locks(stp->st_file, lo))
4265                                         goto out;
4266                                 list_add(&lo->lo_list, &matches);
4267                         }
4268                 }
4269         }
4270         /* Clients probably won't expect us to return with some (but not all)
4271          * of the lockowner state released; so don't release any until all
4272          * have been checked. */
4273         status = nfs_ok;
4274         while (!list_empty(&matches)) {
4275                 lo = list_entry(matches.next, struct nfs4_lockowner,
4276                                                                 lo_list);
4277                 /* unhash_stateowner deletes so_perclient only
4278                  * for openowners. */
4279                 list_del(&lo->lo_list);
4280                 release_lockowner(lo);
4281         }
4282 out:
4283         nfs4_unlock_state();
4284         return status;
4285 }
4286
4287 static inline struct nfs4_client_reclaim *
4288 alloc_reclaim(void)
4289 {
4290         return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4291 }
4292
4293 int
4294 nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4295 {
4296         unsigned int strhashval = clientstr_hashval(name);
4297         struct nfs4_client *clp;
4298
4299         clp = find_confirmed_client_by_str(name, strhashval);
4300         return clp ? 1 : 0;
4301 }
4302
4303 /*
4304  * failure => all reset bets are off, nfserr_no_grace...
4305  */
4306 int
4307 nfs4_client_to_reclaim(const char *name)
4308 {
4309         unsigned int strhashval;
4310         struct nfs4_client_reclaim *crp = NULL;
4311
4312         dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4313         crp = alloc_reclaim();
4314         if (!crp)
4315                 return 0;
4316         strhashval = clientstr_hashval(name);
4317         INIT_LIST_HEAD(&crp->cr_strhash);
4318         list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
4319         memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4320         reclaim_str_hashtbl_size++;
4321         return 1;
4322 }
4323
4324 static void
4325 nfs4_release_reclaim(void)
4326 {
4327         struct nfs4_client_reclaim *crp = NULL;
4328         int i;
4329
4330         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4331                 while (!list_empty(&reclaim_str_hashtbl[i])) {
4332                         crp = list_entry(reclaim_str_hashtbl[i].next,
4333                                         struct nfs4_client_reclaim, cr_strhash);
4334                         list_del(&crp->cr_strhash);
4335                         kfree(crp);
4336                         reclaim_str_hashtbl_size--;
4337                 }
4338         }
4339         BUG_ON(reclaim_str_hashtbl_size);
4340 }
4341
4342 /*
4343  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4344 static struct nfs4_client_reclaim *
4345 nfs4_find_reclaim_client(clientid_t *clid)
4346 {
4347         unsigned int strhashval;
4348         struct nfs4_client *clp;
4349         struct nfs4_client_reclaim *crp = NULL;
4350
4351
4352         /* find clientid in conf_id_hashtbl */
4353         clp = find_confirmed_client(clid);
4354         if (clp == NULL)
4355                 return NULL;
4356
4357         dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4358                             clp->cl_name.len, clp->cl_name.data,
4359                             clp->cl_recdir);
4360
4361         /* find clp->cl_name in reclaim_str_hashtbl */
4362         strhashval = clientstr_hashval(clp->cl_recdir);
4363         list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
4364                 if (same_name(crp->cr_recdir, clp->cl_recdir)) {
4365                         return crp;
4366                 }
4367         }
4368         return NULL;
4369 }
4370
4371 /*
4372 * Called from OPEN. Look for clientid in reclaim list.
4373 */
4374 __be32
4375 nfs4_check_open_reclaim(clientid_t *clid)
4376 {
4377         return nfs4_find_reclaim_client(clid) ? nfs_ok : nfserr_reclaim_bad;
4378 }
4379
4380 /* initialization to perform at module load time: */
4381
4382 int
4383 nfs4_state_init(void)
4384 {
4385         int i, status;
4386
4387         status = nfsd4_init_slabs();
4388         if (status)
4389                 return status;
4390         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4391                 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
4392                 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
4393                 INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
4394                 INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
4395                 INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
4396         }
4397         for (i = 0; i < SESSION_HASH_SIZE; i++)
4398                 INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4399         for (i = 0; i < FILE_HASH_SIZE; i++) {
4400                 INIT_LIST_HEAD(&file_hashtbl[i]);
4401         }
4402         for (i = 0; i < OPEN_OWNER_HASH_SIZE; i++) {
4403                 INIT_LIST_HEAD(&open_ownerstr_hashtbl[i]);
4404                 INIT_LIST_HEAD(&open_ownerid_hashtbl[i]);
4405         }
4406         for (i = 0; i < STATEID_HASH_SIZE; i++)
4407                 INIT_LIST_HEAD(&stateid_hashtbl[i]);
4408         for (i = 0; i < LOCK_HASH_SIZE; i++) {
4409                 INIT_LIST_HEAD(&lock_ownerid_hashtbl[i]);
4410                 INIT_LIST_HEAD(&lock_ownerstr_hashtbl[i]);
4411         }
4412         memset(&onestateid, ~0, sizeof(stateid_t));
4413         INIT_LIST_HEAD(&close_lru);
4414         INIT_LIST_HEAD(&client_lru);
4415         INIT_LIST_HEAD(&del_recall_lru);
4416         reclaim_str_hashtbl_size = 0;
4417         return 0;
4418 }
4419
4420 static void
4421 nfsd4_load_reboot_recovery_data(void)
4422 {
4423         int status;
4424
4425         nfs4_lock_state();
4426         nfsd4_init_recdir();
4427         status = nfsd4_recdir_load();
4428         nfs4_unlock_state();
4429         if (status)
4430                 printk("NFSD: Failure reading reboot recovery data\n");
4431 }
4432
4433 /*
4434  * Since the lifetime of a delegation isn't limited to that of an open, a
4435  * client may quite reasonably hang on to a delegation as long as it has
4436  * the inode cached.  This becomes an obvious problem the first time a
4437  * client's inode cache approaches the size of the server's total memory.
4438  *
4439  * For now we avoid this problem by imposing a hard limit on the number
4440  * of delegations, which varies according to the server's memory size.
4441  */
4442 static void
4443 set_max_delegations(void)
4444 {
4445         /*
4446          * Allow at most 4 delegations per megabyte of RAM.  Quick
4447          * estimates suggest that in the worst case (where every delegation
4448          * is for a different inode), a delegation could take about 1.5K,
4449          * giving a worst case usage of about 6% of memory.
4450          */
4451         max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4452 }
4453
4454 /* initialization to perform when the nfsd service is started: */
4455
4456 static int
4457 __nfs4_state_start(void)
4458 {
4459         int ret;
4460
4461         boot_time = get_seconds();
4462         locks_start_grace(&nfsd4_manager);
4463         printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4464                nfsd4_grace);
4465         ret = set_callback_cred();
4466         if (ret)
4467                 return -ENOMEM;
4468         laundry_wq = create_singlethread_workqueue("nfsd4");
4469         if (laundry_wq == NULL)
4470                 return -ENOMEM;
4471         ret = nfsd4_create_callback_queue();
4472         if (ret)
4473                 goto out_free_laundry;
4474         queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4475         set_max_delegations();
4476         return 0;
4477 out_free_laundry:
4478         destroy_workqueue(laundry_wq);
4479         return ret;
4480 }
4481
4482 int
4483 nfs4_state_start(void)
4484 {
4485         nfsd4_load_reboot_recovery_data();
4486         return __nfs4_state_start();
4487 }
4488
4489 static void
4490 __nfs4_state_shutdown(void)
4491 {
4492         int i;
4493         struct nfs4_client *clp = NULL;
4494         struct nfs4_delegation *dp = NULL;
4495         struct list_head *pos, *next, reaplist;
4496
4497         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4498                 while (!list_empty(&conf_id_hashtbl[i])) {
4499                         clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4500                         expire_client(clp);
4501                 }
4502                 while (!list_empty(&unconf_str_hashtbl[i])) {
4503                         clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
4504                         expire_client(clp);
4505                 }
4506         }
4507         INIT_LIST_HEAD(&reaplist);
4508         spin_lock(&recall_lock);
4509         list_for_each_safe(pos, next, &del_recall_lru) {
4510                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4511                 list_move(&dp->dl_recall_lru, &reaplist);
4512         }
4513         spin_unlock(&recall_lock);
4514         list_for_each_safe(pos, next, &reaplist) {
4515                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4516                 list_del_init(&dp->dl_recall_lru);
4517                 unhash_delegation(dp);
4518         }
4519
4520         nfsd4_shutdown_recdir();
4521 }
4522
4523 void
4524 nfs4_state_shutdown(void)
4525 {
4526         cancel_delayed_work_sync(&laundromat_work);
4527         destroy_workqueue(laundry_wq);
4528         locks_end_grace(&nfsd4_manager);
4529         nfs4_lock_state();
4530         nfs4_release_reclaim();
4531         __nfs4_state_shutdown();
4532         nfs4_unlock_state();
4533         nfsd4_destroy_callback_queue();
4534 }