Drivers: hv: Add code to distribute channel interrupt load
[linux-3.10.git] / drivers / hv / channel_mgmt.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/completion.h>
31 #include <linux/hyperv.h>
32
33 #include "hyperv_vmbus.h"
34
35 struct vmbus_channel_message_table_entry {
36         enum vmbus_channel_message_type message_type;
37         void (*message_handler)(struct vmbus_channel_message_header *msg);
38 };
39
40
41 /**
42  * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
43  * @icmsghdrp: Pointer to msg header structure
44  * @icmsg_negotiate: Pointer to negotiate message structure
45  * @buf: Raw buffer channel data
46  *
47  * @icmsghdrp is of type &struct icmsg_hdr.
48  * @negop is of type &struct icmsg_negotiate.
49  * Set up and fill in default negotiate response message.
50  *
51  * The max_fw_version specifies the maximum framework version that
52  * we can support and max _srv_version specifies the maximum service
53  * version we can support. A special value MAX_SRV_VER can be
54  * specified to indicate that we can handle the maximum version
55  * exposed by the host.
56  *
57  * Mainly used by Hyper-V drivers.
58  */
59 void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
60                                 struct icmsg_negotiate *negop, u8 *buf,
61                                 int max_fw_version, int max_srv_version)
62 {
63         int icframe_vercnt;
64         int icmsg_vercnt;
65         int i;
66
67         icmsghdrp->icmsgsize = 0x10;
68
69         negop = (struct icmsg_negotiate *)&buf[
70                 sizeof(struct vmbuspipe_hdr) +
71                 sizeof(struct icmsg_hdr)];
72
73         icframe_vercnt = negop->icframe_vercnt;
74         icmsg_vercnt = negop->icmsg_vercnt;
75
76         /*
77          * Select the framework version number we will
78          * support.
79          */
80
81         for (i = 0; i < negop->icframe_vercnt; i++) {
82                 if (negop->icversion_data[i].major <= max_fw_version)
83                         icframe_vercnt = negop->icversion_data[i].major;
84         }
85
86         for (i = negop->icframe_vercnt;
87                  (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
88                 if (negop->icversion_data[i].major <= max_srv_version)
89                         icmsg_vercnt = negop->icversion_data[i].major;
90         }
91
92         /*
93          * Respond with the maximum framework and service
94          * version numbers we can support.
95          */
96         negop->icframe_vercnt = 1;
97         negop->icmsg_vercnt = 1;
98         negop->icversion_data[0].major = icframe_vercnt;
99         negop->icversion_data[0].minor = 0;
100         negop->icversion_data[1].major = icmsg_vercnt;
101         negop->icversion_data[1].minor = 0;
102 }
103
104 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
105
106 /*
107  * alloc_channel - Allocate and initialize a vmbus channel object
108  */
109 static struct vmbus_channel *alloc_channel(void)
110 {
111         struct vmbus_channel *channel;
112
113         channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
114         if (!channel)
115                 return NULL;
116
117         spin_lock_init(&channel->inbound_lock);
118
119         channel->controlwq = create_workqueue("hv_vmbus_ctl");
120         if (!channel->controlwq) {
121                 kfree(channel);
122                 return NULL;
123         }
124
125         return channel;
126 }
127
128 /*
129  * release_hannel - Release the vmbus channel object itself
130  */
131 static void release_channel(struct work_struct *work)
132 {
133         struct vmbus_channel *channel = container_of(work,
134                                                      struct vmbus_channel,
135                                                      work);
136
137         destroy_workqueue(channel->controlwq);
138
139         kfree(channel);
140 }
141
142 /*
143  * free_channel - Release the resources used by the vmbus channel object
144  */
145 static void free_channel(struct vmbus_channel *channel)
146 {
147
148         /*
149          * We have to release the channel's workqueue/thread in the vmbus's
150          * workqueue/thread context
151          * ie we can't destroy ourselves.
152          */
153         INIT_WORK(&channel->work, release_channel);
154         queue_work(vmbus_connection.work_queue, &channel->work);
155 }
156
157
158
159 /*
160  * vmbus_process_rescind_offer -
161  * Rescind the offer by initiating a device removal
162  */
163 static void vmbus_process_rescind_offer(struct work_struct *work)
164 {
165         struct vmbus_channel *channel = container_of(work,
166                                                      struct vmbus_channel,
167                                                      work);
168
169         vmbus_device_unregister(channel->device_obj);
170 }
171
172 void vmbus_free_channels(void)
173 {
174         struct vmbus_channel *channel;
175
176         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
177                 vmbus_device_unregister(channel->device_obj);
178                 kfree(channel->device_obj);
179                 free_channel(channel);
180         }
181 }
182
183 /*
184  * vmbus_process_offer - Process the offer by creating a channel/device
185  * associated with this offer
186  */
187 static void vmbus_process_offer(struct work_struct *work)
188 {
189         struct vmbus_channel *newchannel = container_of(work,
190                                                         struct vmbus_channel,
191                                                         work);
192         struct vmbus_channel *channel;
193         bool fnew = true;
194         int ret;
195         unsigned long flags;
196
197         /* The next possible work is rescind handling */
198         INIT_WORK(&newchannel->work, vmbus_process_rescind_offer);
199
200         /* Make sure this is a new offer */
201         spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
202
203         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
204                 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
205                         newchannel->offermsg.offer.if_type) &&
206                         !uuid_le_cmp(channel->offermsg.offer.if_instance,
207                                 newchannel->offermsg.offer.if_instance)) {
208                         fnew = false;
209                         break;
210                 }
211         }
212
213         if (fnew)
214                 list_add_tail(&newchannel->listentry,
215                               &vmbus_connection.chn_list);
216
217         spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
218
219         if (!fnew) {
220                 free_channel(newchannel);
221                 return;
222         }
223
224         /*
225          * Start the process of binding this offer to the driver
226          * We need to set the DeviceObject field before calling
227          * vmbus_child_dev_add()
228          */
229         newchannel->device_obj = vmbus_device_create(
230                 &newchannel->offermsg.offer.if_type,
231                 &newchannel->offermsg.offer.if_instance,
232                 newchannel);
233
234         /*
235          * Add the new device to the bus. This will kick off device-driver
236          * binding which eventually invokes the device driver's AddDevice()
237          * method.
238          */
239         ret = vmbus_device_register(newchannel->device_obj);
240         if (ret != 0) {
241                 pr_err("unable to add child device object (relid %d)\n",
242                            newchannel->offermsg.child_relid);
243
244                 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
245                 list_del(&newchannel->listentry);
246                 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
247                 kfree(newchannel->device_obj);
248
249                 free_channel(newchannel);
250         } else {
251                 /*
252                  * This state is used to indicate a successful open
253                  * so that when we do close the channel normally, we
254                  * can cleanup properly
255                  */
256                 newchannel->state = CHANNEL_OPEN_STATE;
257         }
258 }
259
260 enum {
261         IDE = 0,
262         SCSI,
263         NIC,
264         MAX_PERF_CHN,
265 };
266
267 /*
268  * This is an array of channels (devices) that are performance critical.
269  * We attempt to distribute the interrupt load for these devices across
270  * all available CPUs.
271  */
272 static const uuid_le hp_devs[] = {
273         /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
274         /* IDE */
275         {
276                 .b = {
277                         0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
278                         0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
279                 }
280         },
281         /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
282         /* Storage - SCSI */
283         {
284                 .b  = {
285                         0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
286                         0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
287                 }
288         },
289         /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
290         /* Network */
291         {
292                 .b = {
293                         0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
294                         0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E
295                 }
296         },
297
298 };
299
300
301 /*
302  * We use this state to statically distribute the channel interrupt load.
303  */
304 static u32  next_vp;
305
306 /*
307  * Starting with Win8, we can statically distribute the incoming
308  * channel interrupt load by binding a channel to VCPU. We
309  * implement here a simple round robin scheme for distributing
310  * the interrupt load.
311  * We will bind channels that are not performance critical to cpu 0 and
312  * performance critical channels (IDE, SCSI and Network) will be uniformly
313  * distributed across all available CPUs.
314  */
315 static u32 get_vp_index(uuid_le *type_guid)
316 {
317         u32 cur_cpu;
318         int i;
319         bool perf_chn = false;
320         u32 max_cpus = num_online_cpus();
321
322         for (i = IDE; i < MAX_PERF_CHN; i++) {
323                 if (!memcmp(type_guid->b, hp_devs[i].b,
324                                  sizeof(uuid_le))) {
325                         perf_chn = true;
326                         break;
327                 }
328         }
329         if ((vmbus_proto_version == VERSION_WS2008) ||
330             (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
331                 /*
332                  * Prior to win8, all channel interrupts are
333                  * delivered on cpu 0.
334                  * Also if the channel is not a performance critical
335                  * channel, bind it to cpu 0.
336                  */
337                 return 0;
338         }
339         cur_cpu = (++next_vp % max_cpus);
340         return hv_context.vp_index[cur_cpu];
341 }
342
343 /*
344  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
345  *
346  */
347 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
348 {
349         struct vmbus_channel_offer_channel *offer;
350         struct vmbus_channel *newchannel;
351
352         offer = (struct vmbus_channel_offer_channel *)hdr;
353
354         /* Allocate the channel object and save this offer. */
355         newchannel = alloc_channel();
356         if (!newchannel) {
357                 pr_err("Unable to allocate channel object\n");
358                 return;
359         }
360
361         /*
362          * By default we setup state to enable batched
363          * reading. A specific service can choose to
364          * disable this prior to opening the channel.
365          */
366         newchannel->batched_reading = true;
367
368         /*
369          * Setup state for signalling the host.
370          */
371         newchannel->sig_event = (struct hv_input_signal_event *)
372                                 (ALIGN((unsigned long)
373                                 &newchannel->sig_buf,
374                                 HV_HYPERCALL_PARAM_ALIGN));
375
376         newchannel->sig_event->connectionid.asu32 = 0;
377         newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
378         newchannel->sig_event->flag_number = 0;
379         newchannel->sig_event->rsvdz = 0;
380
381         if (vmbus_proto_version != VERSION_WS2008) {
382                 newchannel->is_dedicated_interrupt =
383                                 (offer->is_dedicated_interrupt != 0);
384                 newchannel->sig_event->connectionid.u.id =
385                                 offer->connection_id;
386         }
387
388         newchannel->target_vp = get_vp_index(&offer->offer.if_type);
389
390         memcpy(&newchannel->offermsg, offer,
391                sizeof(struct vmbus_channel_offer_channel));
392         newchannel->monitor_grp = (u8)offer->monitorid / 32;
393         newchannel->monitor_bit = (u8)offer->monitorid % 32;
394
395         INIT_WORK(&newchannel->work, vmbus_process_offer);
396         queue_work(newchannel->controlwq, &newchannel->work);
397 }
398
399 /*
400  * vmbus_onoffer_rescind - Rescind offer handler.
401  *
402  * We queue a work item to process this offer synchronously
403  */
404 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
405 {
406         struct vmbus_channel_rescind_offer *rescind;
407         struct vmbus_channel *channel;
408
409         rescind = (struct vmbus_channel_rescind_offer *)hdr;
410         channel = relid2channel(rescind->child_relid);
411
412         if (channel == NULL)
413                 /* Just return here, no channel found */
414                 return;
415
416         /* work is initialized for vmbus_process_rescind_offer() from
417          * vmbus_process_offer() where the channel got created */
418         queue_work(channel->controlwq, &channel->work);
419 }
420
421 /*
422  * vmbus_onoffers_delivered -
423  * This is invoked when all offers have been delivered.
424  *
425  * Nothing to do here.
426  */
427 static void vmbus_onoffers_delivered(
428                         struct vmbus_channel_message_header *hdr)
429 {
430 }
431
432 /*
433  * vmbus_onopen_result - Open result handler.
434  *
435  * This is invoked when we received a response to our channel open request.
436  * Find the matching request, copy the response and signal the requesting
437  * thread.
438  */
439 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
440 {
441         struct vmbus_channel_open_result *result;
442         struct vmbus_channel_msginfo *msginfo;
443         struct vmbus_channel_message_header *requestheader;
444         struct vmbus_channel_open_channel *openmsg;
445         unsigned long flags;
446
447         result = (struct vmbus_channel_open_result *)hdr;
448
449         /*
450          * Find the open msg, copy the result and signal/unblock the wait event
451          */
452         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
453
454         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
455                                 msglistentry) {
456                 requestheader =
457                         (struct vmbus_channel_message_header *)msginfo->msg;
458
459                 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
460                         openmsg =
461                         (struct vmbus_channel_open_channel *)msginfo->msg;
462                         if (openmsg->child_relid == result->child_relid &&
463                             openmsg->openid == result->openid) {
464                                 memcpy(&msginfo->response.open_result,
465                                        result,
466                                        sizeof(
467                                         struct vmbus_channel_open_result));
468                                 complete(&msginfo->waitevent);
469                                 break;
470                         }
471                 }
472         }
473         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
474 }
475
476 /*
477  * vmbus_ongpadl_created - GPADL created handler.
478  *
479  * This is invoked when we received a response to our gpadl create request.
480  * Find the matching request, copy the response and signal the requesting
481  * thread.
482  */
483 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
484 {
485         struct vmbus_channel_gpadl_created *gpadlcreated;
486         struct vmbus_channel_msginfo *msginfo;
487         struct vmbus_channel_message_header *requestheader;
488         struct vmbus_channel_gpadl_header *gpadlheader;
489         unsigned long flags;
490
491         gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
492
493         /*
494          * Find the establish msg, copy the result and signal/unblock the wait
495          * event
496          */
497         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
498
499         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
500                                 msglistentry) {
501                 requestheader =
502                         (struct vmbus_channel_message_header *)msginfo->msg;
503
504                 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
505                         gpadlheader =
506                         (struct vmbus_channel_gpadl_header *)requestheader;
507
508                         if ((gpadlcreated->child_relid ==
509                              gpadlheader->child_relid) &&
510                             (gpadlcreated->gpadl == gpadlheader->gpadl)) {
511                                 memcpy(&msginfo->response.gpadl_created,
512                                        gpadlcreated,
513                                        sizeof(
514                                         struct vmbus_channel_gpadl_created));
515                                 complete(&msginfo->waitevent);
516                                 break;
517                         }
518                 }
519         }
520         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
521 }
522
523 /*
524  * vmbus_ongpadl_torndown - GPADL torndown handler.
525  *
526  * This is invoked when we received a response to our gpadl teardown request.
527  * Find the matching request, copy the response and signal the requesting
528  * thread.
529  */
530 static void vmbus_ongpadl_torndown(
531                         struct vmbus_channel_message_header *hdr)
532 {
533         struct vmbus_channel_gpadl_torndown *gpadl_torndown;
534         struct vmbus_channel_msginfo *msginfo;
535         struct vmbus_channel_message_header *requestheader;
536         struct vmbus_channel_gpadl_teardown *gpadl_teardown;
537         unsigned long flags;
538
539         gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
540
541         /*
542          * Find the open msg, copy the result and signal/unblock the wait event
543          */
544         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
545
546         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
547                                 msglistentry) {
548                 requestheader =
549                         (struct vmbus_channel_message_header *)msginfo->msg;
550
551                 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
552                         gpadl_teardown =
553                         (struct vmbus_channel_gpadl_teardown *)requestheader;
554
555                         if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
556                                 memcpy(&msginfo->response.gpadl_torndown,
557                                        gpadl_torndown,
558                                        sizeof(
559                                         struct vmbus_channel_gpadl_torndown));
560                                 complete(&msginfo->waitevent);
561                                 break;
562                         }
563                 }
564         }
565         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
566 }
567
568 /*
569  * vmbus_onversion_response - Version response handler
570  *
571  * This is invoked when we received a response to our initiate contact request.
572  * Find the matching request, copy the response and signal the requesting
573  * thread.
574  */
575 static void vmbus_onversion_response(
576                 struct vmbus_channel_message_header *hdr)
577 {
578         struct vmbus_channel_msginfo *msginfo;
579         struct vmbus_channel_message_header *requestheader;
580         struct vmbus_channel_version_response *version_response;
581         unsigned long flags;
582
583         version_response = (struct vmbus_channel_version_response *)hdr;
584         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
585
586         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
587                                 msglistentry) {
588                 requestheader =
589                         (struct vmbus_channel_message_header *)msginfo->msg;
590
591                 if (requestheader->msgtype ==
592                     CHANNELMSG_INITIATE_CONTACT) {
593                         memcpy(&msginfo->response.version_response,
594                               version_response,
595                               sizeof(struct vmbus_channel_version_response));
596                         complete(&msginfo->waitevent);
597                 }
598         }
599         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
600 }
601
602 /* Channel message dispatch table */
603 static struct vmbus_channel_message_table_entry
604         channel_message_table[CHANNELMSG_COUNT] = {
605         {CHANNELMSG_INVALID,                    NULL},
606         {CHANNELMSG_OFFERCHANNEL,               vmbus_onoffer},
607         {CHANNELMSG_RESCIND_CHANNELOFFER,       vmbus_onoffer_rescind},
608         {CHANNELMSG_REQUESTOFFERS,              NULL},
609         {CHANNELMSG_ALLOFFERS_DELIVERED,        vmbus_onoffers_delivered},
610         {CHANNELMSG_OPENCHANNEL,                NULL},
611         {CHANNELMSG_OPENCHANNEL_RESULT, vmbus_onopen_result},
612         {CHANNELMSG_CLOSECHANNEL,               NULL},
613         {CHANNELMSG_GPADL_HEADER,               NULL},
614         {CHANNELMSG_GPADL_BODY,         NULL},
615         {CHANNELMSG_GPADL_CREATED,              vmbus_ongpadl_created},
616         {CHANNELMSG_GPADL_TEARDOWN,             NULL},
617         {CHANNELMSG_GPADL_TORNDOWN,             vmbus_ongpadl_torndown},
618         {CHANNELMSG_RELID_RELEASED,             NULL},
619         {CHANNELMSG_INITIATE_CONTACT,           NULL},
620         {CHANNELMSG_VERSION_RESPONSE,           vmbus_onversion_response},
621         {CHANNELMSG_UNLOAD,                     NULL},
622 };
623
624 /*
625  * vmbus_onmessage - Handler for channel protocol messages.
626  *
627  * This is invoked in the vmbus worker thread context.
628  */
629 void vmbus_onmessage(void *context)
630 {
631         struct hv_message *msg = context;
632         struct vmbus_channel_message_header *hdr;
633         int size;
634
635         hdr = (struct vmbus_channel_message_header *)msg->u.payload;
636         size = msg->header.payload_size;
637
638         if (hdr->msgtype >= CHANNELMSG_COUNT) {
639                 pr_err("Received invalid channel message type %d size %d\n",
640                            hdr->msgtype, size);
641                 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
642                                      (unsigned char *)msg->u.payload, size);
643                 return;
644         }
645
646         if (channel_message_table[hdr->msgtype].message_handler)
647                 channel_message_table[hdr->msgtype].message_handler(hdr);
648         else
649                 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
650 }
651
652 /*
653  * vmbus_request_offers - Send a request to get all our pending offers.
654  */
655 int vmbus_request_offers(void)
656 {
657         struct vmbus_channel_message_header *msg;
658         struct vmbus_channel_msginfo *msginfo;
659         int ret, t;
660
661         msginfo = kmalloc(sizeof(*msginfo) +
662                           sizeof(struct vmbus_channel_message_header),
663                           GFP_KERNEL);
664         if (!msginfo)
665                 return -ENOMEM;
666
667         init_completion(&msginfo->waitevent);
668
669         msg = (struct vmbus_channel_message_header *)msginfo->msg;
670
671         msg->msgtype = CHANNELMSG_REQUESTOFFERS;
672
673
674         ret = vmbus_post_msg(msg,
675                                sizeof(struct vmbus_channel_message_header));
676         if (ret != 0) {
677                 pr_err("Unable to request offers - %d\n", ret);
678
679                 goto cleanup;
680         }
681
682         t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
683         if (t == 0) {
684                 ret = -ETIMEDOUT;
685                 goto cleanup;
686         }
687
688
689
690 cleanup:
691         kfree(msginfo);
692
693         return ret;
694 }
695
696 /* eof */