WorkStruct: make allyesconfig
[linux-3.10.git] / fs / 9p / mux.c
1 /*
2  * linux/fs/9p/mux.c
3  *
4  * Protocol Multiplexer
5  *
6  *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *
9  *  This program is free software; you can redistribute it and/or modify
10  *  it under the terms of the GNU General Public License version 2
11  *  as published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to:
20  *  Free Software Foundation
21  *  51 Franklin Street, Fifth Floor
22  *  Boston, MA  02111-1301  USA
23  *
24  */
25
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/fs.h>
29 #include <linux/poll.h>
30 #include <linux/kthread.h>
31 #include <linux/idr.h>
32 #include <linux/mutex.h>
33
34 #include "debug.h"
35 #include "v9fs.h"
36 #include "9p.h"
37 #include "conv.h"
38 #include "transport.h"
39 #include "mux.h"
40
41 #define ERREQFLUSH      1
42 #define SCHED_TIMEOUT   10
43 #define MAXPOLLWADDR    2
44
45 enum {
46         Rworksched = 1,         /* read work scheduled or running */
47         Rpending = 2,           /* can read */
48         Wworksched = 4,         /* write work scheduled or running */
49         Wpending = 8,           /* can write */
50 };
51
52 enum {
53         None,
54         Flushing,
55         Flushed,
56 };
57
58 struct v9fs_mux_poll_task;
59
60 struct v9fs_req {
61         spinlock_t lock;
62         int tag;
63         struct v9fs_fcall *tcall;
64         struct v9fs_fcall *rcall;
65         int err;
66         v9fs_mux_req_callback cb;
67         void *cba;
68         int flush;
69         struct list_head req_list;
70 };
71
72 struct v9fs_mux_data {
73         spinlock_t lock;
74         struct list_head mux_list;
75         struct v9fs_mux_poll_task *poll_task;
76         int msize;
77         unsigned char *extended;
78         struct v9fs_transport *trans;
79         struct v9fs_idpool tagpool;
80         int err;
81         wait_queue_head_t equeue;
82         struct list_head req_list;
83         struct list_head unsent_req_list;
84         struct v9fs_fcall *rcall;
85         int rpos;
86         char *rbuf;
87         int wpos;
88         int wsize;
89         char *wbuf;
90         wait_queue_t poll_wait[MAXPOLLWADDR];
91         wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
92         poll_table pt;
93         struct work_struct rq;
94         struct work_struct wq;
95         unsigned long wsched;
96 };
97
98 struct v9fs_mux_poll_task {
99         struct task_struct *task;
100         struct list_head mux_list;
101         int muxnum;
102 };
103
104 struct v9fs_mux_rpc {
105         struct v9fs_mux_data *m;
106         int err;
107         struct v9fs_fcall *tcall;
108         struct v9fs_fcall *rcall;
109         wait_queue_head_t wqueue;
110 };
111
112 static int v9fs_poll_proc(void *);
113 static void v9fs_read_work(struct work_struct *work);
114 static void v9fs_write_work(struct work_struct *work);
115 static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
116                           poll_table * p);
117 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
118 static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
119
120 static DEFINE_MUTEX(v9fs_mux_task_lock);
121 static struct workqueue_struct *v9fs_mux_wq;
122
123 static int v9fs_mux_num;
124 static int v9fs_mux_poll_task_num;
125 static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
126
127 int v9fs_mux_global_init(void)
128 {
129         int i;
130
131         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
132                 v9fs_mux_poll_tasks[i].task = NULL;
133
134         v9fs_mux_wq = create_workqueue("v9fs");
135         if (!v9fs_mux_wq)
136                 return -ENOMEM;
137
138         return 0;
139 }
140
141 void v9fs_mux_global_exit(void)
142 {
143         destroy_workqueue(v9fs_mux_wq);
144 }
145
146 /**
147  * v9fs_mux_calc_poll_procs - calculates the number of polling procs
148  * based on the number of mounted v9fs filesystems.
149  *
150  * The current implementation returns sqrt of the number of mounts.
151  */
152 static int v9fs_mux_calc_poll_procs(int muxnum)
153 {
154         int n;
155
156         if (v9fs_mux_poll_task_num)
157                 n = muxnum / v9fs_mux_poll_task_num +
158                     (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
159         else
160                 n = 1;
161
162         if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
163                 n = ARRAY_SIZE(v9fs_mux_poll_tasks);
164
165         return n;
166 }
167
168 static int v9fs_mux_poll_start(struct v9fs_mux_data *m)
169 {
170         int i, n;
171         struct v9fs_mux_poll_task *vpt, *vptlast;
172         struct task_struct *pproc;
173
174         dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
175                 v9fs_mux_poll_task_num);
176         mutex_lock(&v9fs_mux_task_lock);
177
178         n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
179         if (n > v9fs_mux_poll_task_num) {
180                 for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
181                         if (v9fs_mux_poll_tasks[i].task == NULL) {
182                                 vpt = &v9fs_mux_poll_tasks[i];
183                                 dprintk(DEBUG_MUX, "create proc %p\n", vpt);
184                                 pproc = kthread_create(v9fs_poll_proc, vpt,
185                                                    "v9fs-poll");
186
187                                 if (!IS_ERR(pproc)) {
188                                         vpt->task = pproc;
189                                         INIT_LIST_HEAD(&vpt->mux_list);
190                                         vpt->muxnum = 0;
191                                         v9fs_mux_poll_task_num++;
192                                         wake_up_process(vpt->task);
193                                 }
194                                 break;
195                         }
196                 }
197
198                 if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
199                         dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
200         }
201
202         n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
203             ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
204
205         vptlast = NULL;
206         for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
207                 vpt = &v9fs_mux_poll_tasks[i];
208                 if (vpt->task != NULL) {
209                         vptlast = vpt;
210                         if (vpt->muxnum < n) {
211                                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
212                                 list_add(&m->mux_list, &vpt->mux_list);
213                                 vpt->muxnum++;
214                                 m->poll_task = vpt;
215                                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
216                                 init_poll_funcptr(&m->pt, v9fs_pollwait);
217                                 break;
218                         }
219                 }
220         }
221
222         if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
223                 if (vptlast == NULL)
224                         return -ENOMEM;
225
226                 dprintk(DEBUG_MUX, "put in proc %d\n", i);
227                 list_add(&m->mux_list, &vptlast->mux_list);
228                 vptlast->muxnum++;
229                 m->poll_task = vptlast;
230                 memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
231                 init_poll_funcptr(&m->pt, v9fs_pollwait);
232         }
233
234         v9fs_mux_num++;
235         mutex_unlock(&v9fs_mux_task_lock);
236
237         return 0;
238 }
239
240 static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
241 {
242         int i;
243         struct v9fs_mux_poll_task *vpt;
244
245         mutex_lock(&v9fs_mux_task_lock);
246         vpt = m->poll_task;
247         list_del(&m->mux_list);
248         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
249                 if (m->poll_waddr[i] != NULL) {
250                         remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
251                         m->poll_waddr[i] = NULL;
252                 }
253         }
254         vpt->muxnum--;
255         if (!vpt->muxnum) {
256                 dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
257                 send_sig(SIGKILL, vpt->task, 1);
258                 vpt->task = NULL;
259                 v9fs_mux_poll_task_num--;
260         }
261         v9fs_mux_num--;
262         mutex_unlock(&v9fs_mux_task_lock);
263 }
264
265 /**
266  * v9fs_mux_init - allocate and initialize the per-session mux data
267  * Creates the polling task if this is the first session.
268  *
269  * @trans - transport structure
270  * @msize - maximum message size
271  * @extended - pointer to the extended flag
272  */
273 struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
274                                     unsigned char *extended)
275 {
276         int i, n;
277         struct v9fs_mux_data *m, *mtmp;
278
279         dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
280         m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
281         if (!m)
282                 return ERR_PTR(-ENOMEM);
283
284         spin_lock_init(&m->lock);
285         INIT_LIST_HEAD(&m->mux_list);
286         m->msize = msize;
287         m->extended = extended;
288         m->trans = trans;
289         idr_init(&m->tagpool.pool);
290         init_MUTEX(&m->tagpool.lock);
291         m->err = 0;
292         init_waitqueue_head(&m->equeue);
293         INIT_LIST_HEAD(&m->req_list);
294         INIT_LIST_HEAD(&m->unsent_req_list);
295         m->rcall = NULL;
296         m->rpos = 0;
297         m->rbuf = NULL;
298         m->wpos = m->wsize = 0;
299         m->wbuf = NULL;
300         INIT_WORK(&m->rq, v9fs_read_work);
301         INIT_WORK(&m->wq, v9fs_write_work);
302         m->wsched = 0;
303         memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
304         m->poll_task = NULL;
305         n = v9fs_mux_poll_start(m);
306         if (n)
307                 return ERR_PTR(n);
308
309         n = trans->poll(trans, &m->pt);
310         if (n & POLLIN) {
311                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
312                 set_bit(Rpending, &m->wsched);
313         }
314
315         if (n & POLLOUT) {
316                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
317                 set_bit(Wpending, &m->wsched);
318         }
319
320         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
321                 if (IS_ERR(m->poll_waddr[i])) {
322                         v9fs_mux_poll_stop(m);
323                         mtmp = (void *)m->poll_waddr;   /* the error code */
324                         kfree(m);
325                         m = mtmp;
326                         break;
327                 }
328         }
329
330         return m;
331 }
332
333 /**
334  * v9fs_mux_destroy - cancels all pending requests and frees mux resources
335  */
336 void v9fs_mux_destroy(struct v9fs_mux_data *m)
337 {
338         dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
339                 m->mux_list.prev, m->mux_list.next);
340         v9fs_mux_cancel(m, -ECONNRESET);
341
342         if (!list_empty(&m->req_list)) {
343                 /* wait until all processes waiting on this session exit */
344                 dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
345                         m);
346                 wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
347                 dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
348                         list_empty(&m->req_list));
349         }
350
351         v9fs_mux_poll_stop(m);
352         m->trans = NULL;
353
354         kfree(m);
355 }
356
357 /**
358  * v9fs_pollwait - called by files poll operation to add v9fs-poll task
359  *      to files wait queue
360  */
361 static void
362 v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
363               poll_table * p)
364 {
365         int i;
366         struct v9fs_mux_data *m;
367
368         m = container_of(p, struct v9fs_mux_data, pt);
369         for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
370                 if (m->poll_waddr[i] == NULL)
371                         break;
372
373         if (i >= ARRAY_SIZE(m->poll_waddr)) {
374                 dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
375                 return;
376         }
377
378         m->poll_waddr[i] = wait_address;
379
380         if (!wait_address) {
381                 dprintk(DEBUG_ERROR, "no wait_address\n");
382                 m->poll_waddr[i] = ERR_PTR(-EIO);
383                 return;
384         }
385
386         init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
387         add_wait_queue(wait_address, &m->poll_wait[i]);
388 }
389
390 /**
391  * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
392  */
393 static void v9fs_poll_mux(struct v9fs_mux_data *m)
394 {
395         int n;
396
397         if (m->err < 0)
398                 return;
399
400         n = m->trans->poll(m->trans, NULL);
401         if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
402                 dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
403                 if (n >= 0)
404                         n = -ECONNRESET;
405                 v9fs_mux_cancel(m, n);
406         }
407
408         if (n & POLLIN) {
409                 set_bit(Rpending, &m->wsched);
410                 dprintk(DEBUG_MUX, "mux %p can read\n", m);
411                 if (!test_and_set_bit(Rworksched, &m->wsched)) {
412                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
413                         queue_work(v9fs_mux_wq, &m->rq);
414                 }
415         }
416
417         if (n & POLLOUT) {
418                 set_bit(Wpending, &m->wsched);
419                 dprintk(DEBUG_MUX, "mux %p can write\n", m);
420                 if ((m->wsize || !list_empty(&m->unsent_req_list))
421                     && !test_and_set_bit(Wworksched, &m->wsched)) {
422                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
423                         queue_work(v9fs_mux_wq, &m->wq);
424                 }
425         }
426 }
427
428 /**
429  * v9fs_poll_proc - polls all v9fs transports for new events and queues
430  *      the appropriate work to the work queue
431  */
432 static int v9fs_poll_proc(void *a)
433 {
434         struct v9fs_mux_data *m, *mtmp;
435         struct v9fs_mux_poll_task *vpt;
436
437         vpt = a;
438         dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
439         allow_signal(SIGKILL);
440         while (!kthread_should_stop()) {
441                 set_current_state(TASK_INTERRUPTIBLE);
442                 if (signal_pending(current))
443                         break;
444
445                 list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
446                         v9fs_poll_mux(m);
447                 }
448
449                 dprintk(DEBUG_MUX, "sleeping...\n");
450                 schedule_timeout(SCHED_TIMEOUT * HZ);
451         }
452
453         __set_current_state(TASK_RUNNING);
454         dprintk(DEBUG_MUX, "finish\n");
455         return 0;
456 }
457
458 /**
459  * v9fs_write_work - called when a transport can send some data
460  */
461 static void v9fs_write_work(struct work_struct *work)
462 {
463         int n, err;
464         struct v9fs_mux_data *m;
465         struct v9fs_req *req;
466
467         m = container_of(work, struct v9fs_mux_data, wq);
468
469         if (m->err < 0) {
470                 clear_bit(Wworksched, &m->wsched);
471                 return;
472         }
473
474         if (!m->wsize) {
475                 if (list_empty(&m->unsent_req_list)) {
476                         clear_bit(Wworksched, &m->wsched);
477                         return;
478                 }
479
480                 spin_lock(&m->lock);
481 again:
482                 req = list_entry(m->unsent_req_list.next, struct v9fs_req,
483                                req_list);
484                 list_move_tail(&req->req_list, &m->req_list);
485                 if (req->err == ERREQFLUSH)
486                         goto again;
487
488                 m->wbuf = req->tcall->sdata;
489                 m->wsize = req->tcall->size;
490                 m->wpos = 0;
491                 dump_data(m->wbuf, m->wsize);
492                 spin_unlock(&m->lock);
493         }
494
495         dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
496         clear_bit(Wpending, &m->wsched);
497         err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
498         dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
499         if (err == -EAGAIN) {
500                 clear_bit(Wworksched, &m->wsched);
501                 return;
502         }
503
504         if (err <= 0)
505                 goto error;
506
507         m->wpos += err;
508         if (m->wpos == m->wsize)
509                 m->wpos = m->wsize = 0;
510
511         if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
512                 if (test_and_clear_bit(Wpending, &m->wsched))
513                         n = POLLOUT;
514                 else
515                         n = m->trans->poll(m->trans, NULL);
516
517                 if (n & POLLOUT) {
518                         dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
519                         queue_work(v9fs_mux_wq, &m->wq);
520                 } else
521                         clear_bit(Wworksched, &m->wsched);
522         } else
523                 clear_bit(Wworksched, &m->wsched);
524
525         return;
526
527       error:
528         v9fs_mux_cancel(m, err);
529         clear_bit(Wworksched, &m->wsched);
530 }
531
532 static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
533 {
534         int ecode;
535         struct v9fs_str *ename;
536
537         if (!req->err && req->rcall->id == RERROR) {
538                 ecode = req->rcall->params.rerror.errno;
539                 ename = &req->rcall->params.rerror.error;
540
541                 dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
542
543                 if (*m->extended)
544                         req->err = -ecode;
545
546                 if (!req->err) {
547                         req->err = v9fs_errstr2errno(ename->str, ename->len);
548
549                         if (!req->err) {        /* string match failed */
550                                 PRINT_FCALL_ERROR("unknown error", req->rcall);
551                         }
552
553                         if (!req->err)
554                                 req->err = -ESERVERFAULT;
555                 }
556         } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
557                 dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
558                         req->tcall->id + 1, req->rcall->id);
559                 if (!req->err)
560                         req->err = -EIO;
561         }
562 }
563
564 /**
565  * v9fs_read_work - called when there is some data to be read from a transport
566  */
567 static void v9fs_read_work(struct work_struct *work)
568 {
569         int n, err;
570         struct v9fs_mux_data *m;
571         struct v9fs_req *req, *rptr, *rreq;
572         struct v9fs_fcall *rcall;
573         char *rbuf;
574
575         m = container_of(work, struct v9fs_mux_data, rq);
576
577         if (m->err < 0)
578                 return;
579
580         rcall = NULL;
581         dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
582
583         if (!m->rcall) {
584                 m->rcall =
585                     kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
586                 if (!m->rcall) {
587                         err = -ENOMEM;
588                         goto error;
589                 }
590
591                 m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
592                 m->rpos = 0;
593         }
594
595         clear_bit(Rpending, &m->wsched);
596         err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
597         dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
598         if (err == -EAGAIN) {
599                 clear_bit(Rworksched, &m->wsched);
600                 return;
601         }
602
603         if (err <= 0)
604                 goto error;
605
606         m->rpos += err;
607         while (m->rpos > 4) {
608                 n = le32_to_cpu(*(__le32 *) m->rbuf);
609                 if (n >= m->msize) {
610                         dprintk(DEBUG_ERROR,
611                                 "requested packet size too big: %d\n", n);
612                         err = -EIO;
613                         goto error;
614                 }
615
616                 if (m->rpos < n)
617                         break;
618
619                 dump_data(m->rbuf, n);
620                 err =
621                     v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
622                 if (err < 0) {
623                         goto error;
624                 }
625
626                 if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
627                         char buf[150];
628
629                         v9fs_printfcall(buf, sizeof(buf), m->rcall,
630                                 *m->extended);
631                         printk(KERN_NOTICE ">>> %p %s\n", m, buf);
632                 }
633
634                 rcall = m->rcall;
635                 rbuf = m->rbuf;
636                 if (m->rpos > n) {
637                         m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
638                                            GFP_KERNEL);
639                         if (!m->rcall) {
640                                 err = -ENOMEM;
641                                 goto error;
642                         }
643
644                         m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
645                         memmove(m->rbuf, rbuf + n, m->rpos - n);
646                         m->rpos -= n;
647                 } else {
648                         m->rcall = NULL;
649                         m->rbuf = NULL;
650                         m->rpos = 0;
651                 }
652
653                 dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
654                         rcall->tag);
655
656                 req = NULL;
657                 spin_lock(&m->lock);
658                 list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
659                         if (rreq->tag == rcall->tag) {
660                                 req = rreq;
661                                 if (req->flush != Flushing)
662                                         list_del(&req->req_list);
663                                 break;
664                         }
665                 }
666                 spin_unlock(&m->lock);
667
668                 if (req) {
669                         req->rcall = rcall;
670                         process_request(m, req);
671
672                         if (req->flush != Flushing) {
673                                 if (req->cb)
674                                         (*req->cb) (req, req->cba);
675                                 else
676                                         kfree(req->rcall);
677
678                                 wake_up(&m->equeue);
679                         }
680                 } else {
681                         if (err >= 0 && rcall->id != RFLUSH)
682                                 dprintk(DEBUG_ERROR,
683                                         "unexpected response mux %p id %d tag %d\n",
684                                         m, rcall->id, rcall->tag);
685                         kfree(rcall);
686                 }
687         }
688
689         if (!list_empty(&m->req_list)) {
690                 if (test_and_clear_bit(Rpending, &m->wsched))
691                         n = POLLIN;
692                 else
693                         n = m->trans->poll(m->trans, NULL);
694
695                 if (n & POLLIN) {
696                         dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
697                         queue_work(v9fs_mux_wq, &m->rq);
698                 } else
699                         clear_bit(Rworksched, &m->wsched);
700         } else
701                 clear_bit(Rworksched, &m->wsched);
702
703         return;
704
705       error:
706         v9fs_mux_cancel(m, err);
707         clear_bit(Rworksched, &m->wsched);
708 }
709
710 /**
711  * v9fs_send_request - send 9P request
712  * The function can sleep until the request is scheduled for sending.
713  * The function can be interrupted. Return from the function is not
714  * a guarantee that the request is sent successfully. Can return errors
715  * that can be retrieved by PTR_ERR macros.
716  *
717  * @m: mux data
718  * @tc: request to be sent
719  * @cb: callback function to call when response is received
720  * @cba: parameter to pass to the callback function
721  */
722 static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
723                                           struct v9fs_fcall *tc,
724                                           v9fs_mux_req_callback cb, void *cba)
725 {
726         int n;
727         struct v9fs_req *req;
728
729         dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
730                 tc, tc->id);
731         if (m->err < 0)
732                 return ERR_PTR(m->err);
733
734         req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
735         if (!req)
736                 return ERR_PTR(-ENOMEM);
737
738         if (tc->id == TVERSION)
739                 n = V9FS_NOTAG;
740         else
741                 n = v9fs_mux_get_tag(m);
742
743         if (n < 0)
744                 return ERR_PTR(-ENOMEM);
745
746         v9fs_set_tag(tc, n);
747         if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) {
748                 char buf[150];
749
750                 v9fs_printfcall(buf, sizeof(buf), tc, *m->extended);
751                 printk(KERN_NOTICE "<<< %p %s\n", m, buf);
752         }
753
754         spin_lock_init(&req->lock);
755         req->tag = n;
756         req->tcall = tc;
757         req->rcall = NULL;
758         req->err = 0;
759         req->cb = cb;
760         req->cba = cba;
761         req->flush = None;
762
763         spin_lock(&m->lock);
764         list_add_tail(&req->req_list, &m->unsent_req_list);
765         spin_unlock(&m->lock);
766
767         if (test_and_clear_bit(Wpending, &m->wsched))
768                 n = POLLOUT;
769         else
770                 n = m->trans->poll(m->trans, NULL);
771
772         if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
773                 queue_work(v9fs_mux_wq, &m->wq);
774
775         return req;
776 }
777
778 static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req)
779 {
780         v9fs_mux_put_tag(m, req->tag);
781         kfree(req);
782 }
783
784 static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a)
785 {
786         v9fs_mux_req_callback cb;
787         int tag;
788         struct v9fs_mux_data *m;
789         struct v9fs_req *req, *rreq, *rptr;
790
791         m = a;
792         dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
793                 freq->tcall, freq->rcall, freq->err,
794                 freq->tcall->params.tflush.oldtag);
795
796         spin_lock(&m->lock);
797         cb = NULL;
798         tag = freq->tcall->params.tflush.oldtag;
799         req = NULL;
800         list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
801                 if (rreq->tag == tag) {
802                         req = rreq;
803                         list_del(&req->req_list);
804                         break;
805                 }
806         }
807         spin_unlock(&m->lock);
808
809         if (req) {
810                 spin_lock(&req->lock);
811                 req->flush = Flushed;
812                 spin_unlock(&req->lock);
813
814                 if (req->cb)
815                         (*req->cb) (req, req->cba);
816                 else
817                         kfree(req->rcall);
818
819                 wake_up(&m->equeue);
820         }
821
822         kfree(freq->tcall);
823         kfree(freq->rcall);
824         v9fs_mux_free_request(m, freq);
825 }
826
827 static int
828 v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
829 {
830         struct v9fs_fcall *fc;
831         struct v9fs_req *rreq, *rptr;
832
833         dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
834
835         /* if a response was received for a request, do nothing */
836         spin_lock(&req->lock);
837         if (req->rcall || req->err) {
838                 spin_unlock(&req->lock);
839                 dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req);
840                 return 0;
841         }
842
843         req->flush = Flushing;
844         spin_unlock(&req->lock);
845
846         spin_lock(&m->lock);
847         /* if the request is not sent yet, just remove it from the list */
848         list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
849                 if (rreq->tag == req->tag) {
850                         dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req);
851                         list_del(&rreq->req_list);
852                         req->flush = Flushed;
853                         spin_unlock(&m->lock);
854                         if (req->cb)
855                                 (*req->cb) (req, req->cba);
856                         return 0;
857                 }
858         }
859         spin_unlock(&m->lock);
860
861         clear_thread_flag(TIF_SIGPENDING);
862         fc = v9fs_create_tflush(req->tag);
863         v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
864         return 1;
865 }
866
867 static void
868 v9fs_mux_rpc_cb(struct v9fs_req *req, void *a)
869 {
870         struct v9fs_mux_rpc *r;
871
872         dprintk(DEBUG_MUX, "req %p r %p\n", req, a);
873         r = a;
874         r->rcall = req->rcall;
875         r->err = req->err;
876
877         if (req->flush!=None && !req->err)
878                 r->err = -ERESTARTSYS;
879
880         wake_up(&r->wqueue);
881 }
882
883 /**
884  * v9fs_mux_rpc - sends 9P request and waits until a response is available.
885  *      The function can be interrupted.
886  * @m: mux data
887  * @tc: request to be sent
888  * @rc: pointer where a pointer to the response is stored
889  */
890 int
891 v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
892              struct v9fs_fcall **rc)
893 {
894         int err, sigpending;
895         unsigned long flags;
896         struct v9fs_req *req;
897         struct v9fs_mux_rpc r;
898
899         r.err = 0;
900         r.tcall = tc;
901         r.rcall = NULL;
902         r.m = m;
903         init_waitqueue_head(&r.wqueue);
904
905         if (rc)
906                 *rc = NULL;
907
908         sigpending = 0;
909         if (signal_pending(current)) {
910                 sigpending = 1;
911                 clear_thread_flag(TIF_SIGPENDING);
912         }
913
914         req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
915         if (IS_ERR(req)) {
916                 err = PTR_ERR(req);
917                 dprintk(DEBUG_MUX, "error %d\n", err);
918                 return err;
919         }
920
921         err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
922         if (r.err < 0)
923                 err = r.err;
924
925         if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
926                 if (v9fs_mux_flush_request(m, req)) {
927                         /* wait until we get response of the flush message */
928                         do {
929                                 clear_thread_flag(TIF_SIGPENDING);
930                                 err = wait_event_interruptible(r.wqueue,
931                                         r.rcall || r.err);
932                         } while (!r.rcall && !r.err && err==-ERESTARTSYS &&
933                                 m->trans->status==Connected && !m->err);
934
935                         err = -ERESTARTSYS;
936                 }
937                 sigpending = 1;
938         }
939
940         if (sigpending) {
941                 spin_lock_irqsave(&current->sighand->siglock, flags);
942                 recalc_sigpending();
943                 spin_unlock_irqrestore(&current->sighand->siglock, flags);
944         }
945
946         if (rc)
947                 *rc = r.rcall;
948         else
949                 kfree(r.rcall);
950
951         v9fs_mux_free_request(m, req);
952         if (err > 0)
953                 err = -EIO;
954
955         return err;
956 }
957
958 #if 0
959 /**
960  * v9fs_mux_rpcnb - sends 9P request without waiting for response.
961  * @m: mux data
962  * @tc: request to be sent
963  * @cb: callback function to be called when response arrives
964  * @cba: value to pass to the callback function
965  */
966 int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
967                    v9fs_mux_req_callback cb, void *a)
968 {
969         int err;
970         struct v9fs_req *req;
971
972         req = v9fs_send_request(m, tc, cb, a);
973         if (IS_ERR(req)) {
974                 err = PTR_ERR(req);
975                 dprintk(DEBUG_MUX, "error %d\n", err);
976                 return PTR_ERR(req);
977         }
978
979         dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
980         return 0;
981 }
982 #endif  /*  0  */
983
984 /**
985  * v9fs_mux_cancel - cancel all pending requests with error
986  * @m: mux data
987  * @err: error code
988  */
989 void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
990 {
991         struct v9fs_req *req, *rtmp;
992         LIST_HEAD(cancel_list);
993
994         dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err);
995         m->err = err;
996         spin_lock(&m->lock);
997         list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
998                 list_move(&req->req_list, &cancel_list);
999         }
1000         list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
1001                 list_move(&req->req_list, &cancel_list);
1002         }
1003         spin_unlock(&m->lock);
1004
1005         list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
1006                 list_del(&req->req_list);
1007                 if (!req->err)
1008                         req->err = err;
1009
1010                 if (req->cb)
1011                         (*req->cb) (req, req->cba);
1012                 else
1013                         kfree(req->rcall);
1014         }
1015
1016         wake_up(&m->equeue);
1017 }
1018
1019 static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
1020 {
1021         int tag;
1022
1023         tag = v9fs_get_idpool(&m->tagpool);
1024         if (tag < 0)
1025                 return V9FS_NOTAG;
1026         else
1027                 return (u16) tag;
1028 }
1029
1030 static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
1031 {
1032         if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tagpool))
1033                 v9fs_put_idpool(tag, &m->tagpool);
1034 }