]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - net/tipc/link.c
48d0483297ea99bc2a70b829cd2b2c43d07df089
[linux-2.6.git] / net / tipc / link.c
1 /*
2  * net/tipc/link.c: TIPC link code
3  * 
4  * Copyright (c) 2003-2006, Ericsson AB
5  * Copyright (c) 2004-2005, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "core.h"
38 #include "dbg.h"
39 #include "link.h"
40 #include "net.h"
41 #include "node.h"
42 #include "port.h"
43 #include "addr.h"
44 #include "node_subscr.h"
45 #include "name_distr.h"
46 #include "bearer.h"
47 #include "name_table.h"
48 #include "discover.h"
49 #include "config.h"
50 #include "bcast.h"
51
52
53 /* 
54  * Limit for deferred reception queue: 
55  */
56
57 #define DEF_QUEUE_LIMIT 256u
58
59 /* 
60  * Link state events: 
61  */
62
63 #define  STARTING_EVT    856384768      /* link processing trigger */
64 #define  TRAFFIC_MSG_EVT 560815u        /* rx'd ??? */
65 #define  TIMEOUT_EVT     560817u        /* link timer expired */
66
67 /*   
68  * The following two 'message types' is really just implementation 
69  * data conveniently stored in the message header. 
70  * They must not be considered part of the protocol
71  */
72 #define OPEN_MSG   0
73 #define CLOSED_MSG 1
74
75 /* 
76  * State value stored in 'exp_msg_count'
77  */
78
79 #define START_CHANGEOVER 100000u
80
81 /**
82  * struct link_name - deconstructed link name
83  * @addr_local: network address of node at this end
84  * @if_local: name of interface at this end
85  * @addr_peer: network address of node at far end
86  * @if_peer: name of interface at far end
87  */
88
89 struct link_name {
90         u32 addr_local;
91         char if_local[TIPC_MAX_IF_NAME];
92         u32 addr_peer;
93         char if_peer[TIPC_MAX_IF_NAME];
94 };
95
96 #if 0
97
98 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
99
100 /** 
101  * struct link_event - link up/down event notification
102  */
103
104 struct link_event {
105         u32 addr;
106         int up;
107         void (*fcn)(u32, char *, int);
108         char name[TIPC_MAX_LINK_NAME];
109 };
110
111 #endif
112
113 static void link_handle_out_of_seq_msg(struct link *l_ptr,
114                                        struct sk_buff *buf);
115 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
116 static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
117 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
118 static int  link_send_sections_long(struct port *sender,
119                                     struct iovec const *msg_sect,
120                                     u32 num_sect, u32 destnode);
121 static void link_check_defragm_bufs(struct link *l_ptr);
122 static void link_state_event(struct link *l_ptr, u32 event);
123 static void link_reset_statistics(struct link *l_ptr);
124 static void link_print(struct link *l_ptr, struct print_buf *buf, 
125                        const char *str);
126
127 /*
128  * Debugging code used by link routines only
129  *
130  * When debugging link problems on a system that has multiple links,
131  * the standard TIPC debugging routines may not be useful since they
132  * allow the output from multiple links to be intermixed.  For this reason
133  * routines of the form "dbg_link_XXX()" have been created that will capture
134  * debug info into a link's personal print buffer, which can then be dumped
135  * into the TIPC system log (LOG) upon request.
136  *
137  * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
138  * of the print buffer used by each link.  If LINK_LOG_BUF_SIZE is set to 0,
139  * the dbg_link_XXX() routines simply send their output to the standard 
140  * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
141  * when there is only a single link in the system being debugged.
142  *
143  * Notes:
144  * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
145  * - "l_ptr" must be valid when using dbg_link_XXX() macros  
146  */
147
148 #define LINK_LOG_BUF_SIZE 0
149
150 #define dbg_link(fmt, arg...)  do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
151 #define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
152 #define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
153 #define dbg_link_dump() do { \
154         if (LINK_LOG_BUF_SIZE) { \
155                 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
156                 printbuf_move(LOG, &l_ptr->print_buf); \
157         } \
158 } while (0)
159
160 static inline void dbg_print_link(struct link *l_ptr, const char *str)
161 {
162         if (DBG_OUTPUT)
163                 link_print(l_ptr, DBG_OUTPUT, str);
164 }
165
166 static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
167 {
168         if (DBG_OUTPUT) {
169                 struct sk_buff *buf = root_buf;
170
171                 while (buf) {
172                         msg_dbg(buf_msg(buf), "In chain: ");
173                         buf = buf->next;
174                 }
175         }
176 }
177
178 /*
179  *  Simple inlined link routines
180  */
181
182 static inline unsigned int align(unsigned int i)
183 {
184         return (i + 3) & ~3u;
185 }
186
187 static inline int link_working_working(struct link *l_ptr)
188 {
189         return (l_ptr->state == WORKING_WORKING);
190 }
191
192 static inline int link_working_unknown(struct link *l_ptr)
193 {
194         return (l_ptr->state == WORKING_UNKNOWN);
195 }
196
197 static inline int link_reset_unknown(struct link *l_ptr)
198 {
199         return (l_ptr->state == RESET_UNKNOWN);
200 }
201
202 static inline int link_reset_reset(struct link *l_ptr)
203 {
204         return (l_ptr->state == RESET_RESET);
205 }
206
207 static inline int link_blocked(struct link *l_ptr)
208 {
209         return (l_ptr->exp_msg_count || l_ptr->blocked);
210 }
211
212 static inline int link_congested(struct link *l_ptr)
213 {
214         return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
215 }
216
217 static inline u32 link_max_pkt(struct link *l_ptr)
218 {
219         return l_ptr->max_pkt;
220 }
221
222 static inline void link_init_max_pkt(struct link *l_ptr)
223 {
224         u32 max_pkt;
225         
226         max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
227         if (max_pkt > MAX_MSG_SIZE)
228                 max_pkt = MAX_MSG_SIZE;
229
230         l_ptr->max_pkt_target = max_pkt;
231         if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
232                 l_ptr->max_pkt = l_ptr->max_pkt_target;
233         else 
234                 l_ptr->max_pkt = MAX_PKT_DEFAULT;
235
236         l_ptr->max_pkt_probes = 0;
237 }
238
239 static inline u32 link_next_sent(struct link *l_ptr)
240 {
241         if (l_ptr->next_out)
242                 return msg_seqno(buf_msg(l_ptr->next_out));
243         return mod(l_ptr->next_out_no);
244 }
245
246 static inline u32 link_last_sent(struct link *l_ptr)
247 {
248         return mod(link_next_sent(l_ptr) - 1);
249 }
250
251 /*
252  *  Simple non-inlined link routines (i.e. referenced outside this file)
253  */
254
255 int link_is_up(struct link *l_ptr)
256 {
257         if (!l_ptr)
258                 return 0;
259         return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
260 }
261
262 int link_is_active(struct link *l_ptr)
263 {
264         return ((l_ptr->owner->active_links[0] == l_ptr) ||
265                 (l_ptr->owner->active_links[1] == l_ptr));
266 }
267
268 /**
269  * link_name_validate - validate & (optionally) deconstruct link name
270  * @name - ptr to link name string
271  * @name_parts - ptr to area for link name components (or NULL if not needed)
272  * 
273  * Returns 1 if link name is valid, otherwise 0.
274  */
275
276 static int link_name_validate(const char *name, struct link_name *name_parts)
277 {
278         char name_copy[TIPC_MAX_LINK_NAME];
279         char *addr_local;
280         char *if_local;
281         char *addr_peer;
282         char *if_peer;
283         char dummy;
284         u32 z_local, c_local, n_local;
285         u32 z_peer, c_peer, n_peer;
286         u32 if_local_len;
287         u32 if_peer_len;
288
289         /* copy link name & ensure length is OK */
290
291         name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
292         /* need above in case non-Posix strncpy() doesn't pad with nulls */
293         strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
294         if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
295                 return 0;
296
297         /* ensure all component parts of link name are present */
298
299         addr_local = name_copy;
300         if ((if_local = strchr(addr_local, ':')) == NULL)
301                 return 0;
302         *(if_local++) = 0;
303         if ((addr_peer = strchr(if_local, '-')) == NULL)
304                 return 0;
305         *(addr_peer++) = 0;
306         if_local_len = addr_peer - if_local;
307         if ((if_peer = strchr(addr_peer, ':')) == NULL)
308                 return 0;
309         *(if_peer++) = 0;
310         if_peer_len = strlen(if_peer) + 1;
311
312         /* validate component parts of link name */
313
314         if ((sscanf(addr_local, "%u.%u.%u%c",
315                     &z_local, &c_local, &n_local, &dummy) != 3) ||
316             (sscanf(addr_peer, "%u.%u.%u%c",
317                     &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
318             (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
319             (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
320             (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 
321             (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) || 
322             (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
323             (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
324                 return 0;
325
326         /* return link name components, if necessary */
327
328         if (name_parts) {
329                 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
330                 strcpy(name_parts->if_local, if_local);
331                 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
332                 strcpy(name_parts->if_peer, if_peer);
333         }
334         return 1;
335 }
336
337 /**
338  * link_timeout - handle expiration of link timer
339  * @l_ptr: pointer to link
340  * 
341  * This routine must not grab "net_lock" to avoid a potential deadlock conflict
342  * with link_delete().  (There is no risk that the node will be deleted by
343  * another thread because link_delete() always cancels the link timer before
344  * node_delete() is called.)
345  */
346
347 static void link_timeout(struct link *l_ptr)
348 {
349         node_lock(l_ptr->owner);
350
351         /* update counters used in statistical profiling of send traffic */
352
353         l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
354         l_ptr->stats.queue_sz_counts++;
355
356         if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
357                 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
358
359         if (l_ptr->first_out) {
360                 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
361                 u32 length = msg_size(msg);
362
363                 if ((msg_user(msg) == MSG_FRAGMENTER)
364                     && (msg_type(msg) == FIRST_FRAGMENT)) {
365                         length = msg_size(msg_get_wrapped(msg));
366                 }
367                 if (length) {
368                         l_ptr->stats.msg_lengths_total += length;
369                         l_ptr->stats.msg_length_counts++;
370                         if (length <= 64)
371                                 l_ptr->stats.msg_length_profile[0]++;
372                         else if (length <= 256)
373                                 l_ptr->stats.msg_length_profile[1]++;
374                         else if (length <= 1024)
375                                 l_ptr->stats.msg_length_profile[2]++;
376                         else if (length <= 4096)
377                                 l_ptr->stats.msg_length_profile[3]++;
378                         else if (length <= 16384)
379                                 l_ptr->stats.msg_length_profile[4]++;
380                         else if (length <= 32768)
381                                 l_ptr->stats.msg_length_profile[5]++;
382                         else
383                                 l_ptr->stats.msg_length_profile[6]++;
384                 }
385         }
386
387         /* do all other link processing performed on a periodic basis */
388
389         link_check_defragm_bufs(l_ptr);
390
391         link_state_event(l_ptr, TIMEOUT_EVT);
392
393         if (l_ptr->next_out)
394                 link_push_queue(l_ptr);
395
396         node_unlock(l_ptr->owner);
397 }
398
399 static inline void link_set_timer(struct link *l_ptr, u32 time)
400 {
401         k_start_timer(&l_ptr->timer, time);
402 }
403
404 /**
405  * link_create - create a new link
406  * @b_ptr: pointer to associated bearer
407  * @peer: network address of node at other end of link
408  * @media_addr: media address to use when sending messages over link
409  * 
410  * Returns pointer to link.
411  */
412
413 struct link *link_create(struct bearer *b_ptr, const u32 peer,
414                          const struct tipc_media_addr *media_addr)
415 {
416         struct link *l_ptr;
417         struct tipc_msg *msg;
418         char *if_name;
419
420         l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
421         if (!l_ptr) {
422                 warn("Memory squeeze; Failed to create link\n");
423                 return NULL;
424         }
425         memset(l_ptr, 0, sizeof(*l_ptr));
426
427         l_ptr->addr = peer;
428         if_name = strchr(b_ptr->publ.name, ':') + 1;
429         sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
430                 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
431                 tipc_node(tipc_own_addr), 
432                 if_name,
433                 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
434                 /* note: peer i/f is appended to link name by reset/activate */
435         memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
436         k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
437         list_add_tail(&l_ptr->link_list, &b_ptr->links);
438         l_ptr->checkpoint = 1;
439         l_ptr->b_ptr = b_ptr;
440         link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
441         l_ptr->state = RESET_UNKNOWN;
442
443         l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
444         msg = l_ptr->pmsg;
445         msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
446         msg_set_size(msg, sizeof(l_ptr->proto_msg));
447         msg_set_session(msg, tipc_random);
448         msg_set_bearer_id(msg, b_ptr->identity);
449         strcpy((char *)msg_data(msg), if_name);
450
451         l_ptr->priority = b_ptr->priority;
452         link_set_queue_limits(l_ptr, b_ptr->media->window);
453
454         link_init_max_pkt(l_ptr);
455
456         l_ptr->next_out_no = 1;
457         INIT_LIST_HEAD(&l_ptr->waiting_ports);
458
459         link_reset_statistics(l_ptr);
460
461         l_ptr->owner = node_attach_link(l_ptr);
462         if (!l_ptr->owner) {
463                 kfree(l_ptr);
464                 return NULL;
465         }
466
467         if (LINK_LOG_BUF_SIZE) {
468                 char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
469
470                 if (!pb) {
471                         kfree(l_ptr);
472                         warn("Memory squeeze; Failed to create link\n");
473                         return NULL;
474                 }
475                 printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
476         }
477
478         k_signal((Handler)link_start, (unsigned long)l_ptr);
479
480         dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
481             l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
482         
483         return l_ptr;
484 }
485
486 /** 
487  * link_delete - delete a link
488  * @l_ptr: pointer to link
489  * 
490  * Note: 'net_lock' is write_locked, bearer is locked.
491  * This routine must not grab the node lock until after link timer cancellation
492  * to avoid a potential deadlock situation.  
493  */
494
495 void link_delete(struct link *l_ptr)
496 {
497         if (!l_ptr) {
498                 err("Attempt to delete non-existent link\n");
499                 return;
500         }
501
502         dbg("link_delete()\n");
503
504         k_cancel_timer(&l_ptr->timer);
505         
506         node_lock(l_ptr->owner);
507         link_reset(l_ptr);
508         node_detach_link(l_ptr->owner, l_ptr);
509         link_stop(l_ptr);
510         list_del_init(&l_ptr->link_list);
511         if (LINK_LOG_BUF_SIZE)
512                 kfree(l_ptr->print_buf.buf);
513         node_unlock(l_ptr->owner);
514         k_term_timer(&l_ptr->timer);
515         kfree(l_ptr);
516 }
517
518 void link_start(struct link *l_ptr)
519 {
520         dbg("link_start %x\n", l_ptr);
521         link_state_event(l_ptr, STARTING_EVT);
522 }
523
524 /**
525  * link_schedule_port - schedule port for deferred sending 
526  * @l_ptr: pointer to link
527  * @origport: reference to sending port
528  * @sz: amount of data to be sent
529  * 
530  * Schedules port for renewed sending of messages after link congestion 
531  * has abated.
532  */
533
534 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
535 {
536         struct port *p_ptr;
537
538         spin_lock_bh(&port_list_lock);
539         p_ptr = port_lock(origport);
540         if (p_ptr) {
541                 if (!p_ptr->wakeup)
542                         goto exit;
543                 if (!list_empty(&p_ptr->wait_list))
544                         goto exit;
545                 p_ptr->congested_link = l_ptr;
546                 p_ptr->publ.congested = 1;
547                 p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
548                 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
549                 l_ptr->stats.link_congs++;
550 exit:
551                 port_unlock(p_ptr);
552         }
553         spin_unlock_bh(&port_list_lock);
554         return -ELINKCONG;
555 }
556
557 void link_wakeup_ports(struct link *l_ptr, int all)
558 {
559         struct port *p_ptr;
560         struct port *temp_p_ptr;
561         int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
562
563         if (all)
564                 win = 100000;
565         if (win <= 0)
566                 return;
567         if (!spin_trylock_bh(&port_list_lock))
568                 return;
569         if (link_congested(l_ptr))
570                 goto exit;
571         list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 
572                                  wait_list) {
573                 if (win <= 0)
574                         break;
575                 list_del_init(&p_ptr->wait_list);
576                 p_ptr->congested_link = 0;
577                 assert(p_ptr->wakeup);
578                 spin_lock_bh(p_ptr->publ.lock);
579                 p_ptr->publ.congested = 0;
580                 p_ptr->wakeup(&p_ptr->publ);
581                 win -= p_ptr->waiting_pkts;
582                 spin_unlock_bh(p_ptr->publ.lock);
583         }
584
585 exit:
586         spin_unlock_bh(&port_list_lock);
587 }
588
589 /** 
590  * link_release_outqueue - purge link's outbound message queue
591  * @l_ptr: pointer to link
592  */
593
594 static void link_release_outqueue(struct link *l_ptr)
595 {
596         struct sk_buff *buf = l_ptr->first_out;
597         struct sk_buff *next;
598
599         while (buf) {
600                 next = buf->next;
601                 buf_discard(buf);
602                 buf = next;
603         }
604         l_ptr->first_out = NULL;
605         l_ptr->out_queue_size = 0;
606 }
607
608 /**
609  * link_reset_fragments - purge link's inbound message fragments queue
610  * @l_ptr: pointer to link
611  */
612
613 void link_reset_fragments(struct link *l_ptr)
614 {
615         struct sk_buff *buf = l_ptr->defragm_buf;
616         struct sk_buff *next;
617
618         while (buf) {
619                 next = buf->next;
620                 buf_discard(buf);
621                 buf = next;
622         }
623         l_ptr->defragm_buf = NULL;
624 }
625
626 /** 
627  * link_stop - purge all inbound and outbound messages associated with link
628  * @l_ptr: pointer to link
629  */
630
631 void link_stop(struct link *l_ptr)
632 {
633         struct sk_buff *buf;
634         struct sk_buff *next;
635
636         buf = l_ptr->oldest_deferred_in;
637         while (buf) {
638                 next = buf->next;
639                 buf_discard(buf);
640                 buf = next;
641         }
642
643         buf = l_ptr->first_out;
644         while (buf) {
645                 next = buf->next;
646                 buf_discard(buf);
647                 buf = next;
648         }
649
650         link_reset_fragments(l_ptr);
651
652         buf_discard(l_ptr->proto_msg_queue);
653         l_ptr->proto_msg_queue = NULL;
654 }
655
656 #if 0
657
658 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
659
660 static void link_recv_event(struct link_event *ev)
661 {
662         ev->fcn(ev->addr, ev->name, ev->up);
663         kfree(ev);
664 }
665
666 static void link_send_event(void (*fcn)(u32 a, char *n, int up),
667                             struct link *l_ptr, int up)
668 {
669         struct link_event *ev;
670         
671         ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
672         if (!ev) {
673                 warn("Link event allocation failure\n");
674                 return;
675         }
676         ev->addr = l_ptr->addr;
677         ev->up = up;
678         ev->fcn = fcn;
679         memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
680         k_signal((Handler)link_recv_event, (unsigned long)ev);
681 }
682
683 #else
684
685 #define link_send_event(fcn, l_ptr, up) do { } while (0)
686
687 #endif
688
689 void link_reset(struct link *l_ptr)
690 {
691         struct sk_buff *buf;
692         u32 prev_state = l_ptr->state;
693         u32 checkpoint = l_ptr->next_in_no;
694         
695         msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
696
697         /* Link is down, accept any session: */
698         l_ptr->peer_session = 0;
699
700         /* Prepare for max packet size negotiation */
701         link_init_max_pkt(l_ptr);
702         
703         l_ptr->state = RESET_UNKNOWN;
704         dbg_link_state("Resetting Link\n");
705
706         if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
707                 return;
708
709         node_link_down(l_ptr->owner, l_ptr);
710         bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
711 #if 0
712         tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
713         dbg_link_dump();
714 #endif
715         if (node_has_active_links(l_ptr->owner) &&
716             l_ptr->owner->permit_changeover) {
717                 l_ptr->reset_checkpoint = checkpoint;
718                 l_ptr->exp_msg_count = START_CHANGEOVER;
719         }
720
721         /* Clean up all queues: */
722
723         link_release_outqueue(l_ptr);
724         buf_discard(l_ptr->proto_msg_queue);
725         l_ptr->proto_msg_queue = NULL;
726         buf = l_ptr->oldest_deferred_in;
727         while (buf) {
728                 struct sk_buff *next = buf->next;
729                 buf_discard(buf);
730                 buf = next;
731         }
732         if (!list_empty(&l_ptr->waiting_ports))
733                 link_wakeup_ports(l_ptr, 1);
734
735         l_ptr->retransm_queue_head = 0;
736         l_ptr->retransm_queue_size = 0;
737         l_ptr->last_out = NULL;
738         l_ptr->first_out = NULL;
739         l_ptr->next_out = NULL;
740         l_ptr->unacked_window = 0;
741         l_ptr->checkpoint = 1;
742         l_ptr->next_out_no = 1;
743         l_ptr->deferred_inqueue_sz = 0;
744         l_ptr->oldest_deferred_in = NULL;
745         l_ptr->newest_deferred_in = NULL;
746         l_ptr->fsm_msg_cnt = 0;
747         l_ptr->stale_count = 0;
748         link_reset_statistics(l_ptr);
749
750         link_send_event(cfg_link_event, l_ptr, 0);
751         if (!in_own_cluster(l_ptr->addr))
752                 link_send_event(disc_link_event, l_ptr, 0);
753 }
754
755
756 static void link_activate(struct link *l_ptr)
757 {
758         l_ptr->next_in_no = 1;
759         node_link_up(l_ptr->owner, l_ptr);
760         bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761         link_send_event(cfg_link_event, l_ptr, 1);
762         if (!in_own_cluster(l_ptr->addr))
763                 link_send_event(disc_link_event, l_ptr, 1);
764 }
765
766 /**
767  * link_state_event - link finite state machine
768  * @l_ptr: pointer to link
769  * @event: state machine event to process
770  */
771
772 static void link_state_event(struct link *l_ptr, unsigned event)
773 {
774         struct link *other; 
775         u32 cont_intv = l_ptr->continuity_interval;
776
777         if (!l_ptr->started && (event != STARTING_EVT))
778                 return;         /* Not yet. */
779
780         if (link_blocked(l_ptr)) {
781                 if (event == TIMEOUT_EVT) {
782                         link_set_timer(l_ptr, cont_intv);
783                 }
784                 return;   /* Changeover going on */
785         }
786         dbg_link("STATE_EV: <%s> ", l_ptr->name);
787
788         switch (l_ptr->state) {
789         case WORKING_WORKING:
790                 dbg_link("WW/");
791                 switch (event) {
792                 case TRAFFIC_MSG_EVT:
793                         dbg_link("TRF-");
794                         /* fall through */
795                 case ACTIVATE_MSG:
796                         dbg_link("ACT\n");
797                         break;
798                 case TIMEOUT_EVT:
799                         dbg_link("TIM ");
800                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
801                                 l_ptr->checkpoint = l_ptr->next_in_no;
802                                 if (bclink_acks_missing(l_ptr->owner)) {
803                                         link_send_proto_msg(l_ptr, STATE_MSG, 
804                                                             0, 0, 0, 0, 0);
805                                         l_ptr->fsm_msg_cnt++;
806                                 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
807                                         link_send_proto_msg(l_ptr, STATE_MSG, 
808                                                             1, 0, 0, 0, 0);
809                                         l_ptr->fsm_msg_cnt++;
810                                 }
811                                 link_set_timer(l_ptr, cont_intv);
812                                 break;
813                         }
814                         dbg_link(" -> WU\n");
815                         l_ptr->state = WORKING_UNKNOWN;
816                         l_ptr->fsm_msg_cnt = 0;
817                         link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
818                         l_ptr->fsm_msg_cnt++;
819                         link_set_timer(l_ptr, cont_intv / 4);
820                         break;
821                 case RESET_MSG:
822                         dbg_link("RES -> RR\n");
823                         link_reset(l_ptr);
824                         l_ptr->state = RESET_RESET;
825                         l_ptr->fsm_msg_cnt = 0;
826                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
827                         l_ptr->fsm_msg_cnt++;
828                         link_set_timer(l_ptr, cont_intv);
829                         break;
830                 default:
831                         err("Unknown link event %u in WW state\n", event);
832                 }
833                 break;
834         case WORKING_UNKNOWN:
835                 dbg_link("WU/");
836                 switch (event) {
837                 case TRAFFIC_MSG_EVT:
838                         dbg_link("TRF-");
839                 case ACTIVATE_MSG:
840                         dbg_link("ACT -> WW\n");
841                         l_ptr->state = WORKING_WORKING;
842                         l_ptr->fsm_msg_cnt = 0;
843                         link_set_timer(l_ptr, cont_intv);
844                         break;
845                 case RESET_MSG:
846                         dbg_link("RES -> RR\n");
847                         link_reset(l_ptr);
848                         l_ptr->state = RESET_RESET;
849                         l_ptr->fsm_msg_cnt = 0;
850                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
851                         l_ptr->fsm_msg_cnt++;
852                         link_set_timer(l_ptr, cont_intv);
853                         break;
854                 case TIMEOUT_EVT:
855                         dbg_link("TIM ");
856                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
857                                 dbg_link("-> WW \n");
858                                 l_ptr->state = WORKING_WORKING;
859                                 l_ptr->fsm_msg_cnt = 0;
860                                 l_ptr->checkpoint = l_ptr->next_in_no;
861                                 if (bclink_acks_missing(l_ptr->owner)) {
862                                         link_send_proto_msg(l_ptr, STATE_MSG,
863                                                             0, 0, 0, 0, 0);
864                                         l_ptr->fsm_msg_cnt++;
865                                 }
866                                 link_set_timer(l_ptr, cont_intv);
867                         } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
868                                 dbg_link("Probing %u/%u,timer = %u ms)\n",
869                                          l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
870                                          cont_intv / 4);
871                                 link_send_proto_msg(l_ptr, STATE_MSG, 
872                                                     1, 0, 0, 0, 0);
873                                 l_ptr->fsm_msg_cnt++;
874                                 link_set_timer(l_ptr, cont_intv / 4);
875                         } else {        /* Link has failed */
876                                 dbg_link("-> RU (%u probes unanswered)\n",
877                                          l_ptr->fsm_msg_cnt);
878                                 link_reset(l_ptr);
879                                 l_ptr->state = RESET_UNKNOWN;
880                                 l_ptr->fsm_msg_cnt = 0;
881                                 link_send_proto_msg(l_ptr, RESET_MSG,
882                                                     0, 0, 0, 0, 0);
883                                 l_ptr->fsm_msg_cnt++;
884                                 link_set_timer(l_ptr, cont_intv);
885                         }
886                         break;
887                 default:
888                         err("Unknown link event %u in WU state\n", event);
889                 }
890                 break;
891         case RESET_UNKNOWN:
892                 dbg_link("RU/");
893                 switch (event) {
894                 case TRAFFIC_MSG_EVT:
895                         dbg_link("TRF-\n");
896                         break;
897                 case ACTIVATE_MSG:
898                         other = l_ptr->owner->active_links[0];
899                         if (other && link_working_unknown(other)) {
900                                 dbg_link("ACT\n");
901                                 break;
902                         }
903                         dbg_link("ACT -> WW\n");
904                         l_ptr->state = WORKING_WORKING;
905                         l_ptr->fsm_msg_cnt = 0;
906                         link_activate(l_ptr);
907                         link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
908                         l_ptr->fsm_msg_cnt++;
909                         link_set_timer(l_ptr, cont_intv);
910                         break;
911                 case RESET_MSG:
912                         dbg_link("RES \n");
913                         dbg_link(" -> RR\n");
914                         l_ptr->state = RESET_RESET;
915                         l_ptr->fsm_msg_cnt = 0;
916                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
917                         l_ptr->fsm_msg_cnt++;
918                         link_set_timer(l_ptr, cont_intv);
919                         break;
920                 case STARTING_EVT:
921                         dbg_link("START-");
922                         l_ptr->started = 1;
923                         /* fall through */
924                 case TIMEOUT_EVT:
925                         dbg_link("TIM \n");
926                         link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
927                         l_ptr->fsm_msg_cnt++;
928                         link_set_timer(l_ptr, cont_intv);
929                         break;
930                 default:
931                         err("Unknown link event %u in RU state\n", event);
932                 }
933                 break;
934         case RESET_RESET:
935                 dbg_link("RR/ ");
936                 switch (event) {
937                 case TRAFFIC_MSG_EVT:
938                         dbg_link("TRF-");
939                         /* fall through */
940                 case ACTIVATE_MSG:
941                         other = l_ptr->owner->active_links[0];
942                         if (other && link_working_unknown(other)) {
943                                 dbg_link("ACT\n");
944                                 break;
945                         }
946                         dbg_link("ACT -> WW\n");
947                         l_ptr->state = WORKING_WORKING;
948                         l_ptr->fsm_msg_cnt = 0;
949                         link_activate(l_ptr);
950                         link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
951                         l_ptr->fsm_msg_cnt++;
952                         link_set_timer(l_ptr, cont_intv);
953                         break;
954                 case RESET_MSG:
955                         dbg_link("RES\n");
956                         break;
957                 case TIMEOUT_EVT:
958                         dbg_link("TIM\n");
959                         link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
960                         l_ptr->fsm_msg_cnt++;
961                         link_set_timer(l_ptr, cont_intv);
962                         dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
963                         break;
964                 default:
965                         err("Unknown link event %u in RR state\n", event);
966                 }
967                 break;
968         default:
969                 err("Unknown link state %u/%u\n", l_ptr->state, event);
970         }
971 }
972
973 /*
974  * link_bundle_buf(): Append contents of a buffer to
975  * the tail of an existing one. 
976  */
977
978 static int link_bundle_buf(struct link *l_ptr,
979                            struct sk_buff *bundler, 
980                            struct sk_buff *buf)
981 {
982         struct tipc_msg *bundler_msg = buf_msg(bundler);
983         struct tipc_msg *msg = buf_msg(buf);
984         u32 size = msg_size(msg);
985         u32 to_pos = align(msg_size(bundler_msg));
986         u32 rest = link_max_pkt(l_ptr) - to_pos;
987
988         if (msg_user(bundler_msg) != MSG_BUNDLER)
989                 return 0;
990         if (msg_type(bundler_msg) != OPEN_MSG)
991                 return 0;
992         if (rest < align(size))
993                 return 0;
994
995         skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
996         memcpy(bundler->data + to_pos, buf->data, size);
997         msg_set_size(bundler_msg, to_pos + size);
998         msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
999         dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
1000             msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
1001         msg_dbg(msg, "PACKD:");
1002         buf_discard(buf);
1003         l_ptr->stats.sent_bundled++;
1004         return 1;
1005 }
1006
1007 static inline void link_add_to_outqueue(struct link *l_ptr, 
1008                                         struct sk_buff *buf, 
1009                                         struct tipc_msg *msg)
1010 {
1011         u32 ack = mod(l_ptr->next_in_no - 1);
1012         u32 seqno = mod(l_ptr->next_out_no++);
1013
1014         msg_set_word(msg, 2, ((ack << 16) | seqno));
1015         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1016         buf->next = NULL;
1017         if (l_ptr->first_out) {
1018                 l_ptr->last_out->next = buf;
1019                 l_ptr->last_out = buf;
1020         } else
1021                 l_ptr->first_out = l_ptr->last_out = buf;
1022         l_ptr->out_queue_size++;
1023 }
1024
1025 /* 
1026  * link_send_buf() is the 'full path' for messages, called from 
1027  * inside TIPC when the 'fast path' in tipc_send_buf
1028  * has failed, and from link_send()
1029  */
1030
1031 int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1032 {
1033         struct tipc_msg *msg = buf_msg(buf);
1034         u32 size = msg_size(msg);
1035         u32 dsz = msg_data_sz(msg);
1036         u32 queue_size = l_ptr->out_queue_size;
1037         u32 imp = msg_tot_importance(msg);
1038         u32 queue_limit = l_ptr->queue_limit[imp];
1039         u32 max_packet = link_max_pkt(l_ptr);
1040
1041         msg_set_prevnode(msg, tipc_own_addr);   /* If routed message */
1042
1043         /* Match msg importance against queue limits: */
1044
1045         if (unlikely(queue_size >= queue_limit)) {
1046                 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
1047                         return link_schedule_port(l_ptr, msg_origport(msg),
1048                                                   size);
1049                 }
1050                 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1051                 buf_discard(buf);
1052                 if (imp > CONN_MANAGER) {
1053                         warn("Resetting <%s>, send queue full", l_ptr->name);
1054                         link_reset(l_ptr);
1055                 }
1056                 return dsz;
1057         }
1058
1059         /* Fragmentation needed ? */
1060
1061         if (size > max_packet)
1062                 return link_send_long_buf(l_ptr, buf);
1063
1064         /* Packet can be queued or sent: */
1065
1066         if (queue_size > l_ptr->stats.max_queue_sz)
1067                 l_ptr->stats.max_queue_sz = queue_size;
1068
1069         if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 
1070                    !link_congested(l_ptr))) {
1071                 link_add_to_outqueue(l_ptr, buf, msg);
1072
1073                 if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1074                         l_ptr->unacked_window = 0;
1075                 } else {
1076                         bearer_schedule(l_ptr->b_ptr, l_ptr);
1077                         l_ptr->stats.bearer_congs++;
1078                         l_ptr->next_out = buf;
1079                 }
1080                 return dsz;
1081         }
1082         /* Congestion: can message be bundled ?: */
1083
1084         if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
1085             (msg_user(msg) != MSG_FRAGMENTER)) {
1086
1087                 /* Try adding message to an existing bundle */
1088
1089                 if (l_ptr->next_out && 
1090                     link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1091                         bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1092                         return dsz;
1093                 }
1094
1095                 /* Try creating a new bundle */
1096
1097                 if (size <= max_packet * 2 / 3) {
1098                         struct sk_buff *bundler = buf_acquire(max_packet);
1099                         struct tipc_msg bundler_hdr;
1100
1101                         if (bundler) {
1102                                 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1103                                          TIPC_OK, INT_H_SIZE, l_ptr->addr);
1104                                 memcpy(bundler->data, (unchar *)&bundler_hdr, 
1105                                        INT_H_SIZE);
1106                                 skb_trim(bundler, INT_H_SIZE);
1107                                 link_bundle_buf(l_ptr, bundler, buf);
1108                                 buf = bundler;
1109                                 msg = buf_msg(buf);
1110                                 l_ptr->stats.sent_bundles++;
1111                         }
1112                 }
1113         }
1114         if (!l_ptr->next_out)
1115                 l_ptr->next_out = buf;
1116         link_add_to_outqueue(l_ptr, buf, msg);
1117         bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1118         return dsz;
1119 }
1120
1121 /* 
1122  * link_send(): same as link_send_buf(), but the link to use has 
1123  * not been selected yet, and the the owner node is not locked
1124  * Called by TIPC internal users, e.g. the name distributor
1125  */
1126
1127 int link_send(struct sk_buff *buf, u32 dest, u32 selector)
1128 {
1129         struct link *l_ptr;
1130         struct node *n_ptr;
1131         int res = -ELINKCONG;
1132
1133         read_lock_bh(&net_lock);
1134         n_ptr = node_select(dest, selector);
1135         if (n_ptr) {
1136                 node_lock(n_ptr);
1137                 l_ptr = n_ptr->active_links[selector & 1];
1138                 dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
1139                 if (l_ptr) {
1140                         res = link_send_buf(l_ptr, buf);
1141                 }
1142                 node_unlock(n_ptr);
1143         } else {
1144                 dbg("Attempt to send msg to unknown node:\n");
1145                 msg_dbg(buf_msg(buf),">>>");
1146                 buf_discard(buf);
1147         }
1148         read_unlock_bh(&net_lock);
1149         return res;
1150 }
1151
1152 /* 
1153  * link_send_buf_fast: Entry for data messages where the 
1154  * destination link is known and the header is complete,
1155  * inclusive total message length. Very time critical.
1156  * Link is locked. Returns user data length.
1157  */
1158
1159 static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1160                                      u32 *used_max_pkt)
1161 {
1162         struct tipc_msg *msg = buf_msg(buf);
1163         int res = msg_data_sz(msg);
1164
1165         if (likely(!link_congested(l_ptr))) {
1166                 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1167                         if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1168                                 link_add_to_outqueue(l_ptr, buf, msg);
1169                                 if (likely(bearer_send(l_ptr->b_ptr, buf,
1170                                                        &l_ptr->media_addr))) {
1171                                         l_ptr->unacked_window = 0;
1172                                         msg_dbg(msg,"SENT_FAST:");
1173                                         return res;
1174                                 }
1175                                 dbg("failed sent fast...\n");
1176                                 bearer_schedule(l_ptr->b_ptr, l_ptr);
1177                                 l_ptr->stats.bearer_congs++;
1178                                 l_ptr->next_out = buf;
1179                                 return res;
1180                         }
1181                 }
1182                 else
1183                         *used_max_pkt = link_max_pkt(l_ptr);
1184         }
1185         return link_send_buf(l_ptr, buf);  /* All other cases */
1186 }
1187
1188 /* 
1189  * tipc_send_buf_fast: Entry for data messages where the 
1190  * destination node is known and the header is complete,
1191  * inclusive total message length.
1192  * Returns user data length.
1193  */
1194 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1195 {
1196         struct link *l_ptr;
1197         struct node *n_ptr;
1198         int res;
1199         u32 selector = msg_origport(buf_msg(buf)) & 1;
1200         u32 dummy;
1201
1202         if (destnode == tipc_own_addr)
1203                 return port_recv_msg(buf);
1204
1205         read_lock_bh(&net_lock);
1206         n_ptr = node_select(destnode, selector);
1207         if (likely(n_ptr)) {
1208                 node_lock(n_ptr);
1209                 l_ptr = n_ptr->active_links[selector];
1210                 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1211                     buf, l_ptr, destnode);
1212                 if (likely(l_ptr)) {
1213                         res = link_send_buf_fast(l_ptr, buf, &dummy);
1214                         node_unlock(n_ptr);
1215                         read_unlock_bh(&net_lock);
1216                         return res;
1217                 }
1218                 node_unlock(n_ptr);
1219         }
1220         read_unlock_bh(&net_lock);
1221         res = msg_data_sz(buf_msg(buf));
1222         tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1223         return res;
1224 }
1225
1226
1227 /* 
1228  * link_send_sections_fast: Entry for messages where the 
1229  * destination processor is known and the header is complete,
1230  * except for total message length. 
1231  * Returns user data length or errno.
1232  */
1233 int link_send_sections_fast(struct port *sender, 
1234                             struct iovec const *msg_sect,
1235                             const u32 num_sect, 
1236                             u32 destaddr)
1237 {
1238         struct tipc_msg *hdr = &sender->publ.phdr;
1239         struct link *l_ptr;
1240         struct sk_buff *buf;
1241         struct node *node;
1242         int res;
1243         u32 selector = msg_origport(hdr) & 1;
1244
1245         assert(destaddr != tipc_own_addr);
1246
1247 again:
1248         /*
1249          * Try building message using port's max_pkt hint.
1250          * (Must not hold any locks while building message.)
1251          */
1252
1253         res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1254                         !sender->user_port, &buf);
1255
1256         read_lock_bh(&net_lock);
1257         node = node_select(destaddr, selector);
1258         if (likely(node)) {
1259                 node_lock(node);
1260                 l_ptr = node->active_links[selector];
1261                 if (likely(l_ptr)) {
1262                         if (likely(buf)) {
1263                                 res = link_send_buf_fast(l_ptr, buf,
1264                                                          &sender->max_pkt);
1265                                 if (unlikely(res < 0))
1266                                         buf_discard(buf);
1267 exit:
1268                                 node_unlock(node);
1269                                 read_unlock_bh(&net_lock);
1270                                 return res;
1271                         }
1272
1273                         /* Exit if build request was invalid */
1274
1275                         if (unlikely(res < 0))
1276                                 goto exit;
1277
1278                         /* Exit if link (or bearer) is congested */
1279
1280                         if (link_congested(l_ptr) || 
1281                             !list_empty(&l_ptr->b_ptr->cong_links)) {
1282                                 res = link_schedule_port(l_ptr,
1283                                                          sender->publ.ref, res);
1284                                 goto exit;
1285                         }
1286
1287                         /* 
1288                          * Message size exceeds max_pkt hint; update hint,
1289                          * then re-try fast path or fragment the message
1290                          */
1291
1292                         sender->max_pkt = link_max_pkt(l_ptr);
1293                         node_unlock(node);
1294                         read_unlock_bh(&net_lock);
1295
1296
1297                         if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1298                                 goto again;
1299
1300                         return link_send_sections_long(sender, msg_sect,
1301                                                        num_sect, destaddr);
1302                 }
1303                 node_unlock(node);
1304         }
1305         read_unlock_bh(&net_lock);
1306
1307         /* Couldn't find a link to the destination node */
1308
1309         if (buf)
1310                 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1311         if (res >= 0)
1312                 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1313                                             TIPC_ERR_NO_NODE);
1314         return res;
1315 }
1316
1317 /* 
1318  * link_send_sections_long(): Entry for long messages where the 
1319  * destination node is known and the header is complete,
1320  * inclusive total message length. 
1321  * Link and bearer congestion status have been checked to be ok,
1322  * and are ignored if they change.
1323  *
1324  * Note that fragments do not use the full link MTU so that they won't have
1325  * to undergo refragmentation if link changeover causes them to be sent
1326  * over another link with an additional tunnel header added as prefix.
1327  * (Refragmentation will still occur if the other link has a smaller MTU.)
1328  *
1329  * Returns user data length or errno.
1330  */
1331 static int link_send_sections_long(struct port *sender,
1332                                    struct iovec const *msg_sect,
1333                                    u32 num_sect,
1334                                    u32 destaddr)
1335 {
1336         struct link *l_ptr;
1337         struct node *node;
1338         struct tipc_msg *hdr = &sender->publ.phdr;
1339         u32 dsz = msg_data_sz(hdr);
1340         u32 max_pkt,fragm_sz,rest;
1341         struct tipc_msg fragm_hdr;
1342         struct sk_buff *buf,*buf_chain,*prev;
1343         u32 fragm_crs,fragm_rest,hsz,sect_rest;
1344         const unchar *sect_crs;
1345         int curr_sect;
1346         u32 fragm_no;
1347
1348 again:
1349         fragm_no = 1;
1350         max_pkt = sender->max_pkt - INT_H_SIZE;  
1351                 /* leave room for tunnel header in case of link changeover */
1352         fragm_sz = max_pkt - INT_H_SIZE; 
1353                 /* leave room for fragmentation header in each fragment */
1354         rest = dsz;
1355         fragm_crs = 0;
1356         fragm_rest = 0;
1357         sect_rest = 0;
1358         sect_crs = 0;
1359         curr_sect = -1;
1360
1361         /* Prepare reusable fragment header: */
1362
1363         msg_dbg(hdr, ">FRAGMENTING>");
1364         msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1365                  TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
1366         msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1367         msg_set_size(&fragm_hdr, max_pkt);
1368         msg_set_fragm_no(&fragm_hdr, 1);
1369
1370         /* Prepare header of first fragment: */
1371
1372         buf_chain = buf = buf_acquire(max_pkt);
1373         if (!buf)
1374                 return -ENOMEM;
1375         buf->next = NULL;
1376         memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1377         hsz = msg_hdr_sz(hdr);
1378         memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
1379         msg_dbg(buf_msg(buf), ">BUILD>");
1380
1381         /* Chop up message: */
1382
1383         fragm_crs = INT_H_SIZE + hsz;
1384         fragm_rest = fragm_sz - hsz;
1385
1386         do {            /* For all sections */
1387                 u32 sz;
1388
1389                 if (!sect_rest) {
1390                         sect_rest = msg_sect[++curr_sect].iov_len;
1391                         sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1392                 }
1393
1394                 if (sect_rest < fragm_rest)
1395                         sz = sect_rest;
1396                 else
1397                         sz = fragm_rest;
1398
1399                 if (likely(!sender->user_port)) {
1400                         if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1401 error:
1402                                 for (; buf_chain; buf_chain = buf) {
1403                                         buf = buf_chain->next;
1404                                         buf_discard(buf_chain);
1405                                 }
1406                                 return -EFAULT;
1407                         }
1408                 } else
1409                         memcpy(buf->data + fragm_crs, sect_crs, sz);
1410
1411                 sect_crs += sz;
1412                 sect_rest -= sz;
1413                 fragm_crs += sz;
1414                 fragm_rest -= sz;
1415                 rest -= sz;
1416
1417                 if (!fragm_rest && rest) {
1418
1419                         /* Initiate new fragment: */
1420                         if (rest <= fragm_sz) {
1421                                 fragm_sz = rest;
1422                                 msg_set_type(&fragm_hdr,LAST_FRAGMENT);
1423                         } else {
1424                                 msg_set_type(&fragm_hdr, FRAGMENT);
1425                         }
1426                         msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1427                         msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1428                         prev = buf;
1429                         buf = buf_acquire(fragm_sz + INT_H_SIZE);
1430                         if (!buf)
1431                                 goto error;
1432
1433                         buf->next = NULL;                                
1434                         prev->next = buf;
1435                         memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1436                         fragm_crs = INT_H_SIZE;
1437                         fragm_rest = fragm_sz;
1438                         msg_dbg(buf_msg(buf),"  >BUILD>");
1439                 }
1440         }
1441         while (rest > 0);
1442
1443         /* 
1444          * Now we have a buffer chain. Select a link and check
1445          * that packet size is still OK
1446          */
1447         node = node_select(destaddr, sender->publ.ref & 1);
1448         if (likely(node)) {
1449                 node_lock(node);
1450                 l_ptr = node->active_links[sender->publ.ref & 1];
1451                 if (!l_ptr) {
1452                         node_unlock(node);
1453                         goto reject;
1454                 }
1455                 if (link_max_pkt(l_ptr) < max_pkt) {
1456                         sender->max_pkt = link_max_pkt(l_ptr);
1457                         node_unlock(node);
1458                         for (; buf_chain; buf_chain = buf) {
1459                                 buf = buf_chain->next;
1460                                 buf_discard(buf_chain);
1461                         }
1462                         goto again;
1463                 }
1464         } else {
1465 reject:
1466                 for (; buf_chain; buf_chain = buf) {
1467                         buf = buf_chain->next;
1468                         buf_discard(buf_chain);
1469                 }
1470                 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1471                                             TIPC_ERR_NO_NODE);
1472         }
1473
1474         /* Append whole chain to send queue: */
1475
1476         buf = buf_chain;
1477         l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
1478         if (!l_ptr->next_out)
1479                 l_ptr->next_out = buf_chain;
1480         l_ptr->stats.sent_fragmented++;
1481         while (buf) {
1482                 struct sk_buff *next = buf->next;
1483                 struct tipc_msg *msg = buf_msg(buf);
1484
1485                 l_ptr->stats.sent_fragments++;
1486                 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1487                 link_add_to_outqueue(l_ptr, buf, msg);
1488                 msg_dbg(msg, ">ADD>");
1489                 buf = next;
1490         }
1491
1492         /* Send it, if possible: */
1493
1494         link_push_queue(l_ptr);
1495         node_unlock(node);
1496         return dsz;
1497 }
1498
1499 /* 
1500  * link_push_packet: Push one unsent packet to the media
1501  */
1502 u32 link_push_packet(struct link *l_ptr)
1503 {
1504         struct sk_buff *buf = l_ptr->first_out;
1505         u32 r_q_size = l_ptr->retransm_queue_size;
1506         u32 r_q_head = l_ptr->retransm_queue_head;
1507
1508         /* Step to position where retransmission failed, if any,    */
1509         /* consider that buffers may have been released in meantime */
1510
1511         if (r_q_size && buf) {
1512                 u32 last = lesser(mod(r_q_head + r_q_size), 
1513                                   link_last_sent(l_ptr));
1514                 u32 first = msg_seqno(buf_msg(buf));
1515
1516                 while (buf && less(first, r_q_head)) {
1517                         first = mod(first + 1);
1518                         buf = buf->next;
1519                 }
1520                 l_ptr->retransm_queue_head = r_q_head = first;
1521                 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1522         }
1523
1524         /* Continue retransmission now, if there is anything: */
1525
1526         if (r_q_size && buf && !skb_cloned(buf)) {
1527                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1528                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 
1529                 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1530                         msg_dbg(buf_msg(buf), ">DEF-RETR>");
1531                         l_ptr->retransm_queue_head = mod(++r_q_head);
1532                         l_ptr->retransm_queue_size = --r_q_size;
1533                         l_ptr->stats.retransmitted++;
1534                         return TIPC_OK;
1535                 } else {
1536                         l_ptr->stats.bearer_congs++;
1537                         msg_dbg(buf_msg(buf), "|>DEF-RETR>");
1538                         return PUSH_FAILED;
1539                 }
1540         }
1541
1542         /* Send deferred protocol message, if any: */
1543
1544         buf = l_ptr->proto_msg_queue;
1545         if (buf) {
1546                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1547                 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 
1548                 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1549                         msg_dbg(buf_msg(buf), ">DEF-PROT>");
1550                         l_ptr->unacked_window = 0;
1551                         buf_discard(buf);
1552                         l_ptr->proto_msg_queue = 0;
1553                         return TIPC_OK;
1554                 } else {
1555                         msg_dbg(buf_msg(buf), "|>DEF-PROT>");
1556                         l_ptr->stats.bearer_congs++;
1557                         return PUSH_FAILED;
1558                 }
1559         }
1560
1561         /* Send one deferred data message, if send window not full: */
1562
1563         buf = l_ptr->next_out;
1564         if (buf) {
1565                 struct tipc_msg *msg = buf_msg(buf);
1566                 u32 next = msg_seqno(msg);
1567                 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1568
1569                 if (mod(next - first) < l_ptr->queue_limit[0]) {
1570                         msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1571                         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
1572                         if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1573                                 if (msg_user(msg) == MSG_BUNDLER)
1574                                         msg_set_type(msg, CLOSED_MSG);
1575                                 msg_dbg(msg, ">PUSH-DATA>");
1576                                 l_ptr->next_out = buf->next;
1577                                 return TIPC_OK;
1578                         } else {
1579                                 msg_dbg(msg, "|PUSH-DATA|");
1580                                 l_ptr->stats.bearer_congs++;
1581                                 return PUSH_FAILED;
1582                         }
1583                 }
1584         }
1585         return PUSH_FINISHED;
1586 }
1587
1588 /*
1589  * push_queue(): push out the unsent messages of a link where
1590  *               congestion has abated. Node is locked
1591  */
1592 void link_push_queue(struct link *l_ptr)
1593 {
1594         u32 res;
1595
1596         if (bearer_congested(l_ptr->b_ptr, l_ptr))
1597                 return;
1598
1599         do {
1600                 res = link_push_packet(l_ptr);
1601         }
1602         while (res == TIPC_OK);
1603         if (res == PUSH_FAILED)
1604                 bearer_schedule(l_ptr->b_ptr, l_ptr);
1605 }
1606
1607 void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
1608                      u32 retransmits)
1609 {
1610         struct tipc_msg *msg;
1611
1612         dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613
1614         if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1615                 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1616                 dbg_print_link(l_ptr, "   ");
1617                 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1618                 l_ptr->retransm_queue_size = retransmits;
1619                 return;
1620         }
1621         while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1622                 msg = buf_msg(buf);
1623                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
1625                 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626                         /* Catch if retransmissions fail repeatedly: */
1627                         if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628                                 if (++l_ptr->stale_count > 100) {
1629                                         msg_print(CONS, buf_msg(buf), ">RETR>");
1630                                         info("...Retransmitted %u times\n",
1631                                              l_ptr->stale_count);
1632                                         link_print(l_ptr, CONS, "Resetting Link\n");;
1633                                         link_reset(l_ptr);
1634                                         break;
1635                                 }
1636                         } else {
1637                                 l_ptr->stale_count = 0;
1638                         }
1639                         l_ptr->last_retransmitted = msg_seqno(msg);
1640
1641                         msg_dbg(buf_msg(buf), ">RETR>");
1642                         buf = buf->next;
1643                         retransmits--;
1644                         l_ptr->stats.retransmitted++;
1645                 } else {
1646                         bearer_schedule(l_ptr->b_ptr, l_ptr);
1647                         l_ptr->stats.bearer_congs++;
1648                         l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1649                         l_ptr->retransm_queue_size = retransmits;
1650                         return;
1651                 }
1652         }
1653         l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1654 }
1655
1656 /* 
1657  * link_recv_non_seq: Receive packets which are outside
1658  *                    the link sequence flow
1659  */
1660
1661 static void link_recv_non_seq(struct sk_buff *buf)
1662 {
1663         struct tipc_msg *msg = buf_msg(buf);
1664
1665         if (msg_user(msg) ==  LINK_CONFIG)
1666                 disc_recv_msg(buf);
1667         else
1668                 bclink_recv_pkt(buf);
1669 }
1670
1671 /** 
1672  * link_insert_deferred_queue - insert deferred messages back into receive chain
1673  */
1674
1675 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, 
1676                                                   struct sk_buff *buf)
1677 {
1678         u32 seq_no;
1679
1680         if (l_ptr->oldest_deferred_in == NULL)
1681                 return buf;
1682
1683         seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1684         if (seq_no == mod(l_ptr->next_in_no)) {
1685                 l_ptr->newest_deferred_in->next = buf;
1686                 buf = l_ptr->oldest_deferred_in;
1687                 l_ptr->oldest_deferred_in = NULL;
1688                 l_ptr->deferred_inqueue_sz = 0;
1689         }
1690         return buf;
1691 }
1692
1693 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1694 {
1695         read_lock_bh(&net_lock);
1696         while (head) {
1697                 struct bearer *b_ptr;
1698                 struct node *n_ptr;
1699                 struct link *l_ptr;
1700                 struct sk_buff *crs;
1701                 struct sk_buff *buf = head;
1702                 struct tipc_msg *msg = buf_msg(buf);
1703                 u32 seq_no = msg_seqno(msg);
1704                 u32 ackd = msg_ack(msg);
1705                 u32 released = 0;
1706                 int type;
1707
1708                 b_ptr = (struct bearer *)tb_ptr;
1709                 TIPC_SKB_CB(buf)->handle = b_ptr;
1710
1711                 head = head->next;
1712                 if (unlikely(msg_version(msg) != TIPC_VERSION))
1713                         goto cont;
1714 #if 0
1715                 if (msg_user(msg) != LINK_PROTOCOL)
1716 #endif
1717                         msg_dbg(msg,"<REC<");
1718
1719                 if (unlikely(msg_non_seq(msg))) {
1720                         link_recv_non_seq(buf);
1721                         continue;
1722                 }
1723                 n_ptr = node_find(msg_prevnode(msg));
1724                 if (unlikely(!n_ptr))
1725                         goto cont;
1726
1727                 node_lock(n_ptr);
1728                 l_ptr = n_ptr->links[b_ptr->identity];
1729                 if (unlikely(!l_ptr)) {
1730                         node_unlock(n_ptr);
1731                         goto cont;
1732                 }
1733                 /* 
1734                  * Release acked messages 
1735                  */
1736                 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737                         if (node_is_up(n_ptr) && n_ptr->bclink.supported)
1738                                 bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739                 }
1740
1741                 crs = l_ptr->first_out;
1742                 while ((crs != l_ptr->next_out) && 
1743                        less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1744                         struct sk_buff *next = crs->next;
1745
1746                         buf_discard(crs);
1747                         crs = next;
1748                         released++;
1749                 }
1750                 if (released) {
1751                         l_ptr->first_out = crs;
1752                         l_ptr->out_queue_size -= released;
1753                 }
1754                 if (unlikely(l_ptr->next_out))
1755                         link_push_queue(l_ptr);
1756                 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1757                         link_wakeup_ports(l_ptr, 0);
1758                 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1759                         l_ptr->stats.sent_acks++;
1760                         link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1761                 }
1762
1763 protocol_check:
1764                 if (likely(link_working_working(l_ptr))) {
1765                         if (likely(seq_no == mod(l_ptr->next_in_no))) {
1766                                 l_ptr->next_in_no++;
1767                                 if (unlikely(l_ptr->oldest_deferred_in))
1768                                         head = link_insert_deferred_queue(l_ptr,
1769                                                                           head);
1770                                 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1771 deliver:
1772                                         if (likely(msg_isdata(msg))) {
1773                                                 node_unlock(n_ptr);
1774                                                 port_recv_msg(buf);
1775                                                 continue;
1776                                         }
1777                                         switch (msg_user(msg)) {
1778                                         case MSG_BUNDLER:
1779                                                 l_ptr->stats.recv_bundles++;
1780                                                 l_ptr->stats.recv_bundled += 
1781                                                         msg_msgcnt(msg);
1782                                                 node_unlock(n_ptr);
1783                                                 link_recv_bundle(buf);
1784                                                 continue;
1785                                         case ROUTE_DISTRIBUTOR:
1786                                                 node_unlock(n_ptr);
1787                                                 cluster_recv_routing_table(buf);
1788                                                 continue;
1789                                         case NAME_DISTRIBUTOR:
1790                                                 node_unlock(n_ptr);
1791                                                 named_recv(buf);
1792                                                 continue;
1793                                         case CONN_MANAGER:
1794                                                 node_unlock(n_ptr);
1795                                                 port_recv_proto_msg(buf);
1796                                                 continue;
1797                                         case MSG_FRAGMENTER:
1798                                                 l_ptr->stats.recv_fragments++;
1799                                                 if (link_recv_fragment(
1800                                                         &l_ptr->defragm_buf, 
1801                                                         &buf, &msg)) {
1802                                                         l_ptr->stats.recv_fragmented++;
1803                                                         goto deliver;
1804                                                 }
1805                                                 break;
1806                                         case CHANGEOVER_PROTOCOL:
1807                                                 type = msg_type(msg);
1808                                                 if (link_recv_changeover_msg(
1809                                                         &l_ptr, &buf)) {
1810                                                         msg = buf_msg(buf);
1811                                                         seq_no = msg_seqno(msg);
1812                                                         TIPC_SKB_CB(buf)->handle 
1813                                                                 = b_ptr;
1814                                                         if (type == ORIGINAL_MSG)
1815                                                                 goto deliver;
1816                                                         goto protocol_check;
1817                                                 }
1818                                                 break;
1819                                         }
1820                                 }
1821                                 node_unlock(n_ptr);
1822                                 net_route_msg(buf);
1823                                 continue;
1824                         }
1825                         link_handle_out_of_seq_msg(l_ptr, buf);
1826                         head = link_insert_deferred_queue(l_ptr, head);
1827                         node_unlock(n_ptr);
1828                         continue;
1829                 }
1830
1831                 if (msg_user(msg) == LINK_PROTOCOL) {
1832                         link_recv_proto_msg(l_ptr, buf);
1833                         head = link_insert_deferred_queue(l_ptr, head);
1834                         node_unlock(n_ptr);
1835                         continue;
1836                 }
1837                 msg_dbg(msg,"NSEQ<REC<");
1838                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1839
1840                 if (link_working_working(l_ptr)) {
1841                         /* Re-insert in front of queue */
1842                         msg_dbg(msg,"RECV-REINS:");
1843                         buf->next = head;
1844                         head = buf;
1845                         node_unlock(n_ptr);
1846                         continue;
1847                 }
1848                 node_unlock(n_ptr);
1849 cont:
1850                 buf_discard(buf);
1851         }
1852         read_unlock_bh(&net_lock);
1853 }
1854
1855 /* 
1856  * link_defer_buf(): Sort a received out-of-sequence packet 
1857  *                   into the deferred reception queue.
1858  * Returns the increase of the queue length,i.e. 0 or 1
1859  */
1860
1861 u32 link_defer_pkt(struct sk_buff **head,
1862                    struct sk_buff **tail,
1863                    struct sk_buff *buf)
1864 {
1865         struct sk_buff *prev = 0;
1866         struct sk_buff *crs = *head;
1867         u32 seq_no = msg_seqno(buf_msg(buf));
1868
1869         buf->next = NULL;
1870
1871         /* Empty queue ? */
1872         if (*head == NULL) {
1873                 *head = *tail = buf;
1874                 return 1;
1875         }
1876
1877         /* Last ? */
1878         if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1879                 (*tail)->next = buf;
1880                 *tail = buf;
1881                 return 1;
1882         }
1883
1884         /* Scan through queue and sort it in */
1885         do {
1886                 struct tipc_msg *msg = buf_msg(crs);
1887
1888                 if (less(seq_no, msg_seqno(msg))) {
1889                         buf->next = crs;
1890                         if (prev)
1891                                 prev->next = buf;
1892                         else
1893                                 *head = buf;   
1894                         return 1;
1895                 }
1896                 if (seq_no == msg_seqno(msg)) {
1897                         break;
1898                 }
1899                 prev = crs;
1900                 crs = crs->next;
1901         }
1902         while (crs);
1903
1904         /* Message is a duplicate of an existing message */
1905
1906         buf_discard(buf);
1907         return 0;
1908 }
1909
1910 /** 
1911  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1912  */
1913
1914 static void link_handle_out_of_seq_msg(struct link *l_ptr, 
1915                                        struct sk_buff *buf)
1916 {
1917         u32 seq_no = msg_seqno(buf_msg(buf));
1918
1919         if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1920                 link_recv_proto_msg(l_ptr, buf);
1921                 return;
1922         }
1923
1924         dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n", 
1925             seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
1926
1927         /* Record OOS packet arrival (force mismatch on next timeout) */
1928
1929         l_ptr->checkpoint--;
1930
1931         /* 
1932          * Discard packet if a duplicate; otherwise add it to deferred queue
1933          * and notify peer of gap as per protocol specification
1934          */
1935
1936         if (less(seq_no, mod(l_ptr->next_in_no))) {
1937                 l_ptr->stats.duplicates++;
1938                 buf_discard(buf);
1939                 return;
1940         }
1941
1942         if (link_defer_pkt(&l_ptr->oldest_deferred_in,
1943                            &l_ptr->newest_deferred_in, buf)) {
1944                 l_ptr->deferred_inqueue_sz++;
1945                 l_ptr->stats.deferred_recv++;
1946                 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1947                         link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1948         } else
1949                 l_ptr->stats.duplicates++;
1950 }
1951
1952 /*
1953  * Send protocol message to the other endpoint.
1954  */
1955 void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1956                          u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1957 {
1958         struct sk_buff *buf = 0;
1959         struct tipc_msg *msg = l_ptr->pmsg;
1960         u32 msg_size = sizeof(l_ptr->proto_msg);
1961
1962         if (link_blocked(l_ptr))
1963                 return;
1964         msg_set_type(msg, msg_typ);
1965         msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1966         msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 
1967         msg_set_last_bcast(msg, bclink_get_last_sent());
1968
1969         if (msg_typ == STATE_MSG) {
1970                 u32 next_sent = mod(l_ptr->next_out_no);
1971
1972                 if (!link_is_up(l_ptr))
1973                         return;
1974                 if (l_ptr->next_out)
1975                         next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1976                 msg_set_next_sent(msg, next_sent);
1977                 if (l_ptr->oldest_deferred_in) {
1978                         u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1979                         gap = mod(rec - mod(l_ptr->next_in_no));
1980                 }
1981                 msg_set_seq_gap(msg, gap);
1982                 if (gap)
1983                         l_ptr->stats.sent_nacks++;
1984                 msg_set_link_tolerance(msg, tolerance);
1985                 msg_set_linkprio(msg, priority);
1986                 msg_set_max_pkt(msg, ack_mtu);
1987                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1988                 msg_set_probe(msg, probe_msg != 0);
1989                 if (probe_msg) { 
1990                         u32 mtu = l_ptr->max_pkt;
1991
1992                         if ((mtu < l_ptr->max_pkt_target) &&
1993                             link_working_working(l_ptr) &&
1994                             l_ptr->fsm_msg_cnt) {
1995                                 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1996                                 if (l_ptr->max_pkt_probes == 10) {
1997                                         l_ptr->max_pkt_target = (msg_size - 4);
1998                                         l_ptr->max_pkt_probes = 0;
1999                                         msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2000                                 }
2001                                 l_ptr->max_pkt_probes++;
2002                         }
2003
2004                         l_ptr->stats.sent_probes++;
2005                 }
2006                 l_ptr->stats.sent_states++;
2007         } else {                /* RESET_MSG or ACTIVATE_MSG */
2008                 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2009                 msg_set_seq_gap(msg, 0);
2010                 msg_set_next_sent(msg, 1);
2011                 msg_set_link_tolerance(msg, l_ptr->tolerance);
2012                 msg_set_linkprio(msg, l_ptr->priority);
2013                 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2014         }
2015
2016         if (node_has_redundant_links(l_ptr->owner)) {
2017                 msg_set_redundant_link(msg);
2018         } else {
2019                 msg_clear_redundant_link(msg);
2020         }
2021         msg_set_linkprio(msg, l_ptr->priority);
2022
2023         /* Ensure sequence number will not fit : */
2024
2025         msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2026
2027         /* Congestion? */
2028
2029         if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
2030                 if (!l_ptr->proto_msg_queue) {
2031                         l_ptr->proto_msg_queue =
2032                                 buf_acquire(sizeof(l_ptr->proto_msg));
2033                 }
2034                 buf = l_ptr->proto_msg_queue;
2035                 if (!buf)
2036                         return;
2037                 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2038                 return;
2039         }
2040         msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
2041
2042         /* Message can be sent */
2043
2044         msg_dbg(msg, ">>");
2045
2046         buf = buf_acquire(msg_size);
2047         if (!buf)
2048                 return;
2049
2050         memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2051         msg_set_size(buf_msg(buf), msg_size);
2052
2053         if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2054                 l_ptr->unacked_window = 0;
2055                 buf_discard(buf);
2056                 return;
2057         }
2058
2059         /* New congestion */
2060         bearer_schedule(l_ptr->b_ptr, l_ptr);
2061         l_ptr->proto_msg_queue = buf;
2062         l_ptr->stats.bearer_congs++;
2063 }
2064
2065 /*
2066  * Receive protocol message :
2067  * Note that network plane id propagates through the network, and may 
2068  * change at any time. The node with lowest address rules    
2069  */
2070
2071 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2072 {
2073         u32 rec_gap = 0;
2074         u32 max_pkt_info;
2075         u32 max_pkt_ack;
2076         u32 msg_tol;
2077         struct tipc_msg *msg = buf_msg(buf);
2078
2079         dbg("AT(%u):", jiffies_to_msecs(jiffies));
2080         msg_dbg(msg, "<<");
2081         if (link_blocked(l_ptr))
2082                 goto exit;
2083
2084         /* record unnumbered packet arrival (force mismatch on next timeout) */
2085
2086         l_ptr->checkpoint--;
2087
2088         if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2089                 if (tipc_own_addr > msg_prevnode(msg))
2090                         l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2091
2092         l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2093
2094         switch (msg_type(msg)) {
2095         
2096         case RESET_MSG:
2097                 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2098                         if (msg_session(msg) == l_ptr->peer_session) {
2099                                 dbg("Duplicate RESET: %u<->%u\n",
2100                                     msg_session(msg), l_ptr->peer_session);                                     
2101                                 break; /* duplicate: ignore */
2102                         }
2103                 }
2104                 /* fall thru' */
2105         case ACTIVATE_MSG:
2106                 /* Update link settings according other endpoint's values */
2107
2108                 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2109
2110                 if ((msg_tol = msg_link_tolerance(msg)) &&
2111                     (msg_tol > l_ptr->tolerance))
2112                         link_set_supervision_props(l_ptr, msg_tol);
2113
2114                 if (msg_linkprio(msg) > l_ptr->priority)
2115                         l_ptr->priority = msg_linkprio(msg);
2116
2117                 max_pkt_info = msg_max_pkt(msg);
2118                 if (max_pkt_info) {
2119                         if (max_pkt_info < l_ptr->max_pkt_target)
2120                                 l_ptr->max_pkt_target = max_pkt_info;
2121                         if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2122                                 l_ptr->max_pkt = l_ptr->max_pkt_target;
2123                 } else {
2124                         l_ptr->max_pkt = l_ptr->max_pkt_target;
2125                 }
2126                 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2127
2128                 link_state_event(l_ptr, msg_type(msg));
2129
2130                 l_ptr->peer_session = msg_session(msg);
2131                 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2132
2133                 /* Synchronize broadcast sequence numbers */
2134                 if (!node_has_redundant_links(l_ptr->owner)) {
2135                         l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2136                 }
2137                 break;
2138         case STATE_MSG:
2139
2140                 if ((msg_tol = msg_link_tolerance(msg)))
2141                         link_set_supervision_props(l_ptr, msg_tol);
2142                 
2143                 if (msg_linkprio(msg) && 
2144                     (msg_linkprio(msg) != l_ptr->priority)) {
2145                         warn("Changing prio <%s>: %u->%u\n",
2146                              l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2147                         l_ptr->priority = msg_linkprio(msg);
2148                         link_reset(l_ptr); /* Enforce change to take effect */
2149                         break;
2150                 }
2151                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2152                 l_ptr->stats.recv_states++;
2153                 if (link_reset_unknown(l_ptr))
2154                         break;
2155
2156                 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2157                         rec_gap = mod(msg_next_sent(msg) - 
2158                                       mod(l_ptr->next_in_no));
2159                 }
2160
2161                 max_pkt_ack = msg_max_pkt(msg);
2162                 if (max_pkt_ack > l_ptr->max_pkt) {
2163                         dbg("Link <%s> updated MTU %u -> %u\n",
2164                             l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2165                         l_ptr->max_pkt = max_pkt_ack;
2166                         l_ptr->max_pkt_probes = 0;
2167                 }
2168
2169                 max_pkt_ack = 0;
2170                 if (msg_probe(msg)) {
2171                         l_ptr->stats.recv_probes++;
2172                         if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2173                                 max_pkt_ack = msg_size(msg);
2174                         }
2175                 }
2176
2177                 /* Protocol message before retransmits, reduce loss risk */
2178
2179                 bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2180
2181                 if (rec_gap || (msg_probe(msg))) {
2182                         link_send_proto_msg(l_ptr, STATE_MSG,
2183                                             0, rec_gap, 0, 0, max_pkt_ack);
2184                 }
2185                 if (msg_seq_gap(msg)) {
2186                         msg_dbg(msg, "With Gap:");
2187                         l_ptr->stats.recv_nacks++;
2188                         link_retransmit(l_ptr, l_ptr->first_out,
2189                                         msg_seq_gap(msg));
2190                 }
2191                 break;
2192         default:
2193                 msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
2194         }
2195 exit:
2196         buf_discard(buf);
2197 }
2198
2199
2200 /*
2201  * link_tunnel(): Send one message via a link belonging to 
2202  * another bearer. Owner node is locked.
2203  */
2204 void link_tunnel(struct link *l_ptr, 
2205             struct tipc_msg *tunnel_hdr, 
2206             struct tipc_msg  *msg,
2207             u32 selector)
2208 {
2209         struct link *tunnel;
2210         struct sk_buff *buf;
2211         u32 length = msg_size(msg);
2212
2213         tunnel = l_ptr->owner->active_links[selector & 1];
2214         if (!link_is_up(tunnel))
2215                 return;
2216         msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2217         buf = buf_acquire(length + INT_H_SIZE);
2218         if (!buf)
2219                 return;
2220         memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2221         memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2222         dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2223         msg_dbg(buf_msg(buf), ">SEND>");
2224         assert(tunnel);
2225         link_send_buf(tunnel, buf);
2226 }
2227
2228
2229
2230 /*
2231  * changeover(): Send whole message queue via the remaining link
2232  *               Owner node is locked.
2233  */
2234
2235 void link_changeover(struct link *l_ptr)
2236 {
2237         u32 msgcount = l_ptr->out_queue_size;
2238         struct sk_buff *crs = l_ptr->first_out;
2239         struct link *tunnel = l_ptr->owner->active_links[0];
2240         int split_bundles = node_has_redundant_links(l_ptr->owner);
2241         struct tipc_msg tunnel_hdr;
2242
2243         if (!tunnel)
2244                 return;
2245
2246         if (!l_ptr->owner->permit_changeover)
2247                 return;
2248
2249         msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2250                  ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2251         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2252         msg_set_msgcnt(&tunnel_hdr, msgcount);
2253         if (!l_ptr->first_out) {
2254                 struct sk_buff *buf;
2255
2256                 assert(!msgcount);
2257                 buf = buf_acquire(INT_H_SIZE);
2258                 if (buf) {
2259                         memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2260                         msg_set_size(&tunnel_hdr, INT_H_SIZE);
2261                         dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2262                             tunnel->b_ptr->net_plane);
2263                         msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2264                         link_send_buf(tunnel, buf);
2265                 } else {
2266                         warn("Memory squeeze; link changeover failed\n");
2267                 }
2268                 return;
2269         }
2270         while (crs) {
2271                 struct tipc_msg *msg = buf_msg(crs);
2272
2273                 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2274                         u32 msgcount = msg_msgcnt(msg);
2275                         struct tipc_msg *m = msg_get_wrapped(msg);
2276                         unchar* pos = (unchar*)m;
2277
2278                         while (msgcount--) {
2279                                 msg_set_seqno(m,msg_seqno(msg));
2280                                 link_tunnel(l_ptr, &tunnel_hdr, m,
2281                                             msg_link_selector(m));
2282                                 pos += align(msg_size(m));
2283                                 m = (struct tipc_msg *)pos;
2284                         }
2285                 } else {
2286                         link_tunnel(l_ptr, &tunnel_hdr, msg,
2287                                     msg_link_selector(msg));
2288                 }
2289                 crs = crs->next;
2290         }
2291 }
2292
2293 void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2294 {
2295         struct sk_buff *iter;
2296         struct tipc_msg tunnel_hdr;
2297
2298         msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2299                  DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2300         msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2301         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2302         iter = l_ptr->first_out;
2303         while (iter) {
2304                 struct sk_buff *outbuf;
2305                 struct tipc_msg *msg = buf_msg(iter);
2306                 u32 length = msg_size(msg);
2307
2308                 if (msg_user(msg) == MSG_BUNDLER)
2309                         msg_set_type(msg, CLOSED_MSG);
2310                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
2311                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
2312                 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2313                 outbuf = buf_acquire(length + INT_H_SIZE);
2314                 if (outbuf == NULL) {
2315                         warn("Memory squeeze; buffer duplication failed\n");
2316                         return;
2317                 }
2318                 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2319                 memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
2320                 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2321                     tunnel->b_ptr->net_plane);
2322                 msg_dbg(buf_msg(outbuf), ">SEND>");
2323                 link_send_buf(tunnel, outbuf);
2324                 if (!link_is_up(l_ptr))
2325                         return;
2326                 iter = iter->next;
2327         }
2328 }
2329
2330
2331
2332 /**
2333  * buf_extract - extracts embedded TIPC message from another message
2334  * @skb: encapsulating message buffer
2335  * @from_pos: offset to extract from
2336  *
2337  * Returns a new message buffer containing an embedded message.  The 
2338  * encapsulating message itself is left unchanged.
2339  */
2340
2341 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2342 {
2343         struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2344         u32 size = msg_size(msg);
2345         struct sk_buff *eb;
2346
2347         eb = buf_acquire(size);
2348         if (eb)
2349                 memcpy(eb->data, (unchar *)msg, size);
2350         return eb;
2351 }
2352
2353 /* 
2354  *  link_recv_changeover_msg(): Receive tunneled packet sent
2355  *  via other link. Node is locked. Return extracted buffer.
2356  */
2357
2358 static int link_recv_changeover_msg(struct link **l_ptr,
2359                                     struct sk_buff **buf)
2360 {
2361         struct sk_buff *tunnel_buf = *buf;
2362         struct link *dest_link;
2363         struct tipc_msg *msg;
2364         struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2365         u32 msg_typ = msg_type(tunnel_msg);
2366         u32 msg_count = msg_msgcnt(tunnel_msg);
2367
2368         dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2369         assert(dest_link != *l_ptr);
2370         if (!dest_link) {
2371                 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2372                 goto exit;
2373         }
2374         dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2375             (*l_ptr)->b_ptr->net_plane);
2376         *l_ptr = dest_link;
2377         msg = msg_get_wrapped(tunnel_msg);
2378
2379         if (msg_typ == DUPLICATE_MSG) {
2380                 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
2381                         msg_dbg(tunnel_msg, "DROP/<REC<");
2382                         goto exit;
2383                 }
2384                 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2385                 if (*buf == NULL) {
2386                         warn("Memory squeeze; failed to extract msg\n");
2387                         goto exit;
2388                 }
2389                 msg_dbg(tunnel_msg, "TNL<REC<");
2390                 buf_discard(tunnel_buf);
2391                 return 1;
2392         }
2393
2394         /* First original message ?: */
2395
2396         if (link_is_up(dest_link)) {
2397                 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2398                 link_reset(dest_link);
2399                 dest_link->exp_msg_count = msg_count;
2400                 if (!msg_count)
2401                         goto exit;
2402         } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2403                 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2404                 dest_link->exp_msg_count = msg_count;
2405                 if (!msg_count)
2406                         goto exit;
2407         }
2408
2409         /* Receive original message */
2410
2411         if (dest_link->exp_msg_count == 0) {
2412                 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2413                 dbg_print_link(dest_link, "LINK:");
2414                 goto exit;
2415         }
2416         dest_link->exp_msg_count--;
2417         if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2418                 msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
2419                 goto exit;
2420         } else {
2421                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2422                 if (*buf != NULL) {
2423                         msg_dbg(tunnel_msg, "TNL<REC<");
2424                         buf_discard(tunnel_buf);
2425                         return 1;
2426                 } else {
2427                         warn("Memory squeeze; dropped incoming msg\n");
2428                 }
2429         }
2430 exit:
2431         *buf = 0;
2432         buf_discard(tunnel_buf);
2433         return 0;
2434 }
2435
2436 /*
2437  *  Bundler functionality:
2438  */
2439 void link_recv_bundle(struct sk_buff *buf)
2440 {
2441         u32 msgcount = msg_msgcnt(buf_msg(buf));
2442         u32 pos = INT_H_SIZE;
2443         struct sk_buff *obuf;
2444
2445         msg_dbg(buf_msg(buf), "<BNDL<: ");
2446         while (msgcount--) {
2447                 obuf = buf_extract(buf, pos);
2448                 if (obuf == NULL) {
2449                         char addr_string[16];
2450
2451                         warn("Buffer allocation failure;\n");
2452                         warn("  incoming message(s) from %s lost\n",
2453                              addr_string_fill(addr_string, 
2454                                               msg_orignode(buf_msg(buf))));
2455                         return;
2456                 };
2457                 pos += align(msg_size(buf_msg(obuf)));
2458                 msg_dbg(buf_msg(obuf), "     /");
2459                 net_route_msg(obuf);
2460         }
2461         buf_discard(buf);
2462 }
2463
2464 /*
2465  *  Fragmentation/defragmentation:
2466  */
2467
2468
2469 /* 
2470  * link_send_long_buf: Entry for buffers needing fragmentation.
2471  * The buffer is complete, inclusive total message length. 
2472  * Returns user data length.
2473  */
2474 int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2475 {
2476         struct tipc_msg *inmsg = buf_msg(buf);
2477         struct tipc_msg fragm_hdr;
2478         u32 insize = msg_size(inmsg);
2479         u32 dsz = msg_data_sz(inmsg);
2480         unchar *crs = buf->data;
2481         u32 rest = insize;
2482         u32 pack_sz = link_max_pkt(l_ptr);
2483         u32 fragm_sz = pack_sz - INT_H_SIZE;
2484         u32 fragm_no = 1;
2485         u32 destaddr = msg_destnode(inmsg);
2486
2487         if (msg_short(inmsg))
2488                 destaddr = l_ptr->addr;
2489
2490         if (msg_routed(inmsg))
2491                 msg_set_prevnode(inmsg, tipc_own_addr);
2492
2493         /* Prepare reusable fragment header: */
2494
2495         msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2496                  TIPC_OK, INT_H_SIZE, destaddr);
2497         msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2498         msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2499         msg_set_fragm_no(&fragm_hdr, fragm_no);
2500         l_ptr->stats.sent_fragmented++;
2501
2502         /* Chop up message: */
2503
2504         while (rest > 0) {
2505                 struct sk_buff *fragm;
2506
2507                 if (rest <= fragm_sz) {
2508                         fragm_sz = rest;
2509                         msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2510                 }
2511                 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2512                 if (fragm == NULL) {
2513                         warn("Memory squeeze; failed to fragment msg\n");
2514                         dsz = -ENOMEM;
2515                         goto exit;
2516                 }
2517                 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2518                 memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
2519                 memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
2520
2521                 /*  Send queued messages first, if any: */
2522
2523                 l_ptr->stats.sent_fragments++;
2524                 link_send_buf(l_ptr, fragm);
2525                 if (!link_is_up(l_ptr))
2526                         return dsz;
2527                 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2528                 rest -= fragm_sz;
2529                 crs += fragm_sz;
2530                 msg_set_type(&fragm_hdr, FRAGMENT);
2531         }
2532 exit:
2533         buf_discard(buf);
2534         return dsz;
2535 }
2536
2537 /* 
2538  * A pending message being re-assembled must store certain values 
2539  * to handle subsequent fragments correctly. The following functions 
2540  * help storing these values in unused, available fields in the
2541  * pending message. This makes dynamic memory allocation unecessary.
2542  */
2543
2544 static inline u32 get_long_msg_seqno(struct sk_buff *buf)
2545 {
2546         return msg_seqno(buf_msg(buf));
2547 }
2548
2549 static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2550 {
2551         msg_set_seqno(buf_msg(buf), seqno);
2552 }
2553
2554 static inline u32 get_fragm_size(struct sk_buff *buf)
2555 {
2556         return msg_ack(buf_msg(buf));
2557 }
2558
2559 static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
2560 {
2561         msg_set_ack(buf_msg(buf), sz);
2562 }
2563
2564 static inline u32 get_expected_frags(struct sk_buff *buf)
2565 {
2566         return msg_bcast_ack(buf_msg(buf));
2567 }
2568
2569 static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
2570 {
2571         msg_set_bcast_ack(buf_msg(buf), exp);
2572 }
2573
2574 static inline u32 get_timer_cnt(struct sk_buff *buf)
2575 {
2576         return msg_reroute_cnt(buf_msg(buf));
2577 }
2578
2579 static inline void incr_timer_cnt(struct sk_buff *buf)
2580 {
2581         msg_incr_reroute_cnt(buf_msg(buf));
2582 }
2583
2584 /* 
2585  * link_recv_fragment(): Called with node lock on. Returns 
2586  * the reassembled buffer if message is complete.
2587  */
2588 int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 
2589                        struct tipc_msg **m)
2590 {
2591         struct sk_buff *prev = 0;
2592         struct sk_buff *fbuf = *fb;
2593         struct tipc_msg *fragm = buf_msg(fbuf);
2594         struct sk_buff *pbuf = *pending;
2595         u32 long_msg_seq_no = msg_long_msgno(fragm);
2596
2597         *fb = 0;
2598         msg_dbg(fragm,"FRG<REC<");
2599
2600         /* Is there an incomplete message waiting for this fragment? */
2601
2602         while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
2603                         || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2604                 prev = pbuf;
2605                 pbuf = pbuf->next;
2606         }
2607
2608         if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2609                 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2610                 u32 msg_sz = msg_size(imsg);
2611                 u32 fragm_sz = msg_data_sz(fragm);
2612                 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2613                 u32 max =  TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2614                 if (msg_type(imsg) == TIPC_MCAST_MSG)
2615                         max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2616                 if (msg_size(imsg) > max) {
2617                         msg_dbg(fragm,"<REC<Oversized: ");
2618                         buf_discard(fbuf);
2619                         return 0;
2620                 }
2621                 pbuf = buf_acquire(msg_size(imsg));
2622                 if (pbuf != NULL) {
2623                         pbuf->next = *pending;
2624                         *pending = pbuf;
2625                         memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
2626
2627                         /*  Prepare buffer for subsequent fragments. */
2628
2629                         set_long_msg_seqno(pbuf, long_msg_seq_no); 
2630                         set_fragm_size(pbuf,fragm_sz); 
2631                         set_expected_frags(pbuf,exp_fragm_cnt - 1); 
2632                 } else {
2633                         warn("Memory squeeze; got no defragmenting buffer\n");
2634                 }
2635                 buf_discard(fbuf);
2636                 return 0;
2637         } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2638                 u32 dsz = msg_data_sz(fragm);
2639                 u32 fsz = get_fragm_size(pbuf);
2640                 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2641                 u32 exp_frags = get_expected_frags(pbuf) - 1;
2642                 memcpy(pbuf->data + crs, msg_data(fragm), dsz);
2643                 buf_discard(fbuf);
2644
2645                 /* Is message complete? */
2646
2647                 if (exp_frags == 0) {
2648                         if (prev)
2649                                 prev->next = pbuf->next;
2650                         else
2651                                 *pending = pbuf->next;
2652                         msg_reset_reroute_cnt(buf_msg(pbuf));
2653                         *fb = pbuf;
2654                         *m = buf_msg(pbuf);
2655                         return 1;
2656                 }
2657                 set_expected_frags(pbuf,exp_frags);     
2658                 return 0;
2659         }
2660         dbg(" Discarding orphan fragment %x\n",fbuf);
2661         msg_dbg(fragm,"ORPHAN:");
2662         dbg("Pending long buffers:\n");
2663         dbg_print_buf_chain(*pending);
2664         buf_discard(fbuf);
2665         return 0;
2666 }
2667
2668 /**
2669  * link_check_defragm_bufs - flush stale incoming message fragments
2670  * @l_ptr: pointer to link
2671  */
2672
2673 static void link_check_defragm_bufs(struct link *l_ptr)
2674 {
2675         struct sk_buff *prev = 0;
2676         struct sk_buff *next = 0;
2677         struct sk_buff *buf = l_ptr->defragm_buf;
2678
2679         if (!buf)
2680                 return;
2681         if (!link_working_working(l_ptr))
2682                 return;
2683         while (buf) {
2684                 u32 cnt = get_timer_cnt(buf);
2685
2686                 next = buf->next;
2687                 if (cnt < 4) {
2688                         incr_timer_cnt(buf);
2689                         prev = buf;
2690                 } else {
2691                         dbg(" Discarding incomplete long buffer\n");
2692                         msg_dbg(buf_msg(buf), "LONG:");
2693                         dbg_print_link(l_ptr, "curr:");
2694                         dbg("Pending long buffers:\n");
2695                         dbg_print_buf_chain(l_ptr->defragm_buf);
2696                         if (prev)
2697                                 prev->next = buf->next;
2698                         else
2699                                 l_ptr->defragm_buf = buf->next;
2700                         buf_discard(buf);
2701                 }
2702                 buf = next;
2703         }
2704 }
2705
2706
2707
2708 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2709 {
2710         l_ptr->tolerance = tolerance;
2711         l_ptr->continuity_interval =
2712                 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2713         l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2714 }
2715
2716
2717 void link_set_queue_limits(struct link *l_ptr, u32 window)
2718 {
2719         /* Data messages from this node, inclusive FIRST_FRAGM */
2720         l_ptr->queue_limit[DATA_LOW] = window;
2721         l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
2722         l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
2723         l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
2724         /* Transiting data messages,inclusive FIRST_FRAGM */
2725         l_ptr->queue_limit[DATA_LOW + 4] = 300;
2726         l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
2727         l_ptr->queue_limit[DATA_HIGH + 4] = 900;
2728         l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
2729         l_ptr->queue_limit[CONN_MANAGER] = 1200;
2730         l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2731         l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2732         l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2733         /* FRAGMENT and LAST_FRAGMENT packets */
2734         l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2735 }
2736
2737 /**
2738  * link_find_link - locate link by name
2739  * @name - ptr to link name string
2740  * @node - ptr to area to be filled with ptr to associated node
2741  * 
2742  * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
2743  * this also prevents link deletion.
2744  * 
2745  * Returns pointer to link (or 0 if invalid link name).
2746  */
2747
2748 static struct link *link_find_link(const char *name, struct node **node)
2749 {
2750         struct link_name link_name_parts;
2751         struct bearer *b_ptr;
2752         struct link *l_ptr; 
2753
2754         if (!link_name_validate(name, &link_name_parts))
2755                 return 0;
2756
2757         b_ptr = bearer_find_interface(link_name_parts.if_local);
2758         if (!b_ptr)
2759                 return 0;
2760
2761         *node = node_find(link_name_parts.addr_peer); 
2762         if (!*node)
2763                 return 0;
2764
2765         l_ptr = (*node)->links[b_ptr->identity];
2766         if (!l_ptr || strcmp(l_ptr->name, name))
2767                 return 0;
2768
2769         return l_ptr;
2770 }
2771
2772 struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 
2773                                 u16 cmd)
2774 {
2775         struct tipc_link_config *args;
2776         u32 new_value;
2777         struct link *l_ptr;
2778         struct node *node;
2779         int res;
2780
2781         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2782                 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2783
2784         args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2785         new_value = ntohl(args->value);
2786
2787         if (!strcmp(args->name, bc_link_name)) {
2788                 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2789                     (bclink_set_queue_limits(new_value) == 0))
2790                         return cfg_reply_none();
2791                 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2792                                               " (cannot change setting on broadcast link)");
2793         }
2794
2795         read_lock_bh(&net_lock);
2796         l_ptr = link_find_link(args->name, &node); 
2797         if (!l_ptr) {
2798                 read_unlock_bh(&net_lock);
2799                 return cfg_reply_error_string("link not found");
2800         }
2801
2802         node_lock(node);
2803         res = -EINVAL;
2804         switch (cmd) {
2805         case TIPC_CMD_SET_LINK_TOL: 
2806                 if ((new_value >= TIPC_MIN_LINK_TOL) && 
2807                     (new_value <= TIPC_MAX_LINK_TOL)) {
2808                         link_set_supervision_props(l_ptr, new_value);
2809                         link_send_proto_msg(l_ptr, STATE_MSG, 
2810                                             0, 0, new_value, 0, 0);
2811                         res = TIPC_OK;
2812                 }
2813                 break;
2814         case TIPC_CMD_SET_LINK_PRI: 
2815                 if (new_value < TIPC_NUM_LINK_PRI) {
2816                         l_ptr->priority = new_value;
2817                         link_send_proto_msg(l_ptr, STATE_MSG, 
2818                                             0, 0, 0, new_value, 0);
2819                         res = TIPC_OK;
2820                 }
2821                 break;
2822         case TIPC_CMD_SET_LINK_WINDOW: 
2823                 if ((new_value >= TIPC_MIN_LINK_WIN) && 
2824                     (new_value <= TIPC_MAX_LINK_WIN)) {
2825                         link_set_queue_limits(l_ptr, new_value);
2826                         res = TIPC_OK;
2827                 }
2828                 break;
2829         }
2830         node_unlock(node);
2831
2832         read_unlock_bh(&net_lock);
2833         if (res)
2834                 return cfg_reply_error_string("cannot change link setting");
2835
2836         return cfg_reply_none();
2837 }
2838
2839 /**
2840  * link_reset_statistics - reset link statistics
2841  * @l_ptr: pointer to link
2842  */
2843
2844 static void link_reset_statistics(struct link *l_ptr)
2845 {
2846         memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2847         l_ptr->stats.sent_info = l_ptr->next_out_no;
2848         l_ptr->stats.recv_info = l_ptr->next_in_no;
2849 }
2850
2851 struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2852 {
2853         char *link_name;
2854         struct link *l_ptr; 
2855         struct node *node;
2856
2857         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2858                 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2859
2860         link_name = (char *)TLV_DATA(req_tlv_area);
2861         if (!strcmp(link_name, bc_link_name)) {
2862                 if (bclink_reset_stats())
2863                         return cfg_reply_error_string("link not found");
2864                 return cfg_reply_none();
2865         }
2866
2867         read_lock_bh(&net_lock);
2868         l_ptr = link_find_link(link_name, &node); 
2869         if (!l_ptr) {
2870                 read_unlock_bh(&net_lock);
2871                 return cfg_reply_error_string("link not found");
2872         }
2873
2874         node_lock(node);
2875         link_reset_statistics(l_ptr);
2876         node_unlock(node);
2877         read_unlock_bh(&net_lock);
2878         return cfg_reply_none();
2879 }
2880
2881 /**
2882  * percent - convert count to a percentage of total (rounding up or down)
2883  */
2884
2885 static u32 percent(u32 count, u32 total)
2886 {
2887         return (count * 100 + (total / 2)) / total;
2888 }
2889
2890 /**
2891  * link_stats - print link statistics
2892  * @name: link name
2893  * @buf: print buffer area
2894  * @buf_size: size of print buffer area
2895  * 
2896  * Returns length of print buffer data string (or 0 if error)
2897  */
2898
2899 static int link_stats(const char *name, char *buf, const u32 buf_size)
2900 {
2901         struct print_buf pb;
2902         struct link *l_ptr; 
2903         struct node *node;
2904         char *status;
2905         u32 profile_total = 0;
2906
2907         if (!strcmp(name, bc_link_name))
2908                 return bclink_stats(buf, buf_size);
2909
2910         printbuf_init(&pb, buf, buf_size);
2911
2912         read_lock_bh(&net_lock);
2913         l_ptr = link_find_link(name, &node); 
2914         if (!l_ptr) {
2915                 read_unlock_bh(&net_lock);
2916                 return 0;
2917         }
2918         node_lock(node);
2919
2920         if (link_is_active(l_ptr))
2921                 status = "ACTIVE";
2922         else if (link_is_up(l_ptr))
2923                 status = "STANDBY";
2924         else
2925                 status = "DEFUNCT";
2926         tipc_printf(&pb, "Link <%s>\n"
2927                          "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2928                          "  Window:%u packets\n", 
2929                     l_ptr->name, status, link_max_pkt(l_ptr), 
2930                     l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2931         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
2932                     l_ptr->next_in_no - l_ptr->stats.recv_info,
2933                     l_ptr->stats.recv_fragments,
2934                     l_ptr->stats.recv_fragmented,
2935                     l_ptr->stats.recv_bundles,
2936                     l_ptr->stats.recv_bundled);
2937         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
2938                     l_ptr->next_out_no - l_ptr->stats.sent_info,
2939                     l_ptr->stats.sent_fragments,
2940                     l_ptr->stats.sent_fragmented, 
2941                     l_ptr->stats.sent_bundles,
2942                     l_ptr->stats.sent_bundled);
2943         profile_total = l_ptr->stats.msg_length_counts;
2944         if (!profile_total)
2945                 profile_total = 1;
2946         tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
2947                          "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2948                          "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2949                     l_ptr->stats.msg_length_counts,
2950                     l_ptr->stats.msg_lengths_total / profile_total,
2951                     percent(l_ptr->stats.msg_length_profile[0], profile_total),
2952                     percent(l_ptr->stats.msg_length_profile[1], profile_total),
2953                     percent(l_ptr->stats.msg_length_profile[2], profile_total),
2954                     percent(l_ptr->stats.msg_length_profile[3], profile_total),
2955                     percent(l_ptr->stats.msg_length_profile[4], profile_total),
2956                     percent(l_ptr->stats.msg_length_profile[5], profile_total),
2957                     percent(l_ptr->stats.msg_length_profile[6], profile_total));
2958         tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n", 
2959                     l_ptr->stats.recv_states,
2960                     l_ptr->stats.recv_probes,
2961                     l_ptr->stats.recv_nacks,
2962                     l_ptr->stats.deferred_recv, 
2963                     l_ptr->stats.duplicates);
2964         tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n", 
2965                     l_ptr->stats.sent_states, 
2966                     l_ptr->stats.sent_probes, 
2967                     l_ptr->stats.sent_nacks, 
2968                     l_ptr->stats.sent_acks, 
2969                     l_ptr->stats.retransmitted);
2970         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
2971                     l_ptr->stats.bearer_congs,
2972                     l_ptr->stats.link_congs, 
2973                     l_ptr->stats.max_queue_sz,
2974                     l_ptr->stats.queue_sz_counts
2975                     ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2976                     : 0);
2977
2978         node_unlock(node);
2979         read_unlock_bh(&net_lock);
2980         return printbuf_validate(&pb);
2981 }
2982
2983 #define MAX_LINK_STATS_INFO 2000
2984
2985 struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2986 {
2987         struct sk_buff *buf;
2988         struct tlv_desc *rep_tlv;
2989         int str_len;
2990
2991         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2992                 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2993
2994         buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2995         if (!buf)
2996                 return NULL;
2997
2998         rep_tlv = (struct tlv_desc *)buf->data;
2999
3000         str_len = link_stats((char *)TLV_DATA(req_tlv_area),
3001                              (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3002         if (!str_len) {
3003                 buf_discard(buf);
3004                 return cfg_reply_error_string("link not found");
3005         }
3006
3007         skb_put(buf, TLV_SPACE(str_len));
3008         TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
3009
3010         return buf;
3011 }
3012
3013 #if 0
3014 int link_control(const char *name, u32 op, u32 val)
3015 {
3016         int res = -EINVAL;
3017         struct link *l_ptr;
3018         u32 bearer_id;
3019         struct node * node;
3020         u32 a;
3021
3022         a = link_name2addr(name, &bearer_id);
3023         read_lock_bh(&net_lock);
3024         node = node_find(a);
3025         if (node) {
3026                 node_lock(node);
3027                 l_ptr = node->links[bearer_id];
3028                 if (l_ptr) {
3029                         if (op == TIPC_REMOVE_LINK) {
3030                                 struct bearer *b_ptr = l_ptr->b_ptr;
3031                                 spin_lock_bh(&b_ptr->publ.lock);
3032                                 link_delete(l_ptr);
3033                                 spin_unlock_bh(&b_ptr->publ.lock);
3034                         }
3035                         if (op == TIPC_CMD_BLOCK_LINK) {
3036                                 link_reset(l_ptr);
3037                                 l_ptr->blocked = 1;
3038                         }
3039                         if (op == TIPC_CMD_UNBLOCK_LINK) {
3040                                 l_ptr->blocked = 0;
3041                         }
3042                         res = TIPC_OK;
3043                 }
3044                 node_unlock(node);
3045         }
3046         read_unlock_bh(&net_lock);
3047         return res;
3048 }
3049 #endif
3050
3051 /**
3052  * link_get_max_pkt - get maximum packet size to use when sending to destination
3053  * @dest: network address of destination node
3054  * @selector: used to select from set of active links
3055  * 
3056  * If no active link can be found, uses default maximum packet size.
3057  */
3058
3059 u32 link_get_max_pkt(u32 dest, u32 selector)
3060 {
3061         struct node *n_ptr;
3062         struct link *l_ptr;
3063         u32 res = MAX_PKT_DEFAULT;
3064         
3065         if (dest == tipc_own_addr)
3066                 return MAX_MSG_SIZE;
3067
3068         read_lock_bh(&net_lock);        
3069         n_ptr = node_select(dest, selector);
3070         if (n_ptr) {
3071                 node_lock(n_ptr);
3072                 l_ptr = n_ptr->active_links[selector & 1];
3073                 if (l_ptr)
3074                         res = link_max_pkt(l_ptr);
3075                 node_unlock(n_ptr);
3076         }
3077         read_unlock_bh(&net_lock);       
3078         return res;
3079 }
3080
3081 #if 0
3082 static void link_dump_rec_queue(struct link *l_ptr)
3083 {
3084         struct sk_buff *crs;
3085
3086         if (!l_ptr->oldest_deferred_in) {
3087                 info("Reception queue empty\n");
3088                 return;
3089         }
3090         info("Contents of Reception queue:\n");
3091         crs = l_ptr->oldest_deferred_in;
3092         while (crs) {
3093                 if (crs->data == (void *)0x0000a3a3) {
3094                         info("buffer %x invalid\n", crs);
3095                         return;
3096                 }
3097                 msg_dbg(buf_msg(crs), "In rec queue: \n");
3098                 crs = crs->next;
3099         }
3100 }
3101 #endif
3102
3103 static void link_dump_send_queue(struct link *l_ptr)
3104 {
3105         if (l_ptr->next_out) {
3106                 info("\nContents of unsent queue:\n");
3107                 dbg_print_buf_chain(l_ptr->next_out);
3108         }
3109         info("\nContents of send queue:\n");
3110         if (l_ptr->first_out) {
3111                 dbg_print_buf_chain(l_ptr->first_out);
3112         }
3113         info("Empty send queue\n");
3114 }
3115
3116 static void link_print(struct link *l_ptr, struct print_buf *buf,
3117                        const char *str)
3118 {
3119         tipc_printf(buf, str);
3120         if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3121                 return;
3122         tipc_printf(buf, "Link %x<%s>:",
3123                     l_ptr->addr, l_ptr->b_ptr->publ.name);
3124         tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3125         tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3126         tipc_printf(buf, "SQUE");
3127         if (l_ptr->first_out) {
3128                 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3129                 if (l_ptr->next_out)
3130                         tipc_printf(buf, "%u..",
3131                                     msg_seqno(buf_msg(l_ptr->next_out)));
3132                 tipc_printf(buf, "%u]",
3133                             msg_seqno(buf_msg
3134                                       (l_ptr->last_out)), l_ptr->out_queue_size);
3135                 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - 
3136                          msg_seqno(buf_msg(l_ptr->first_out))) 
3137                      != (l_ptr->out_queue_size - 1))
3138                     || (l_ptr->last_out->next != 0)) {
3139                         tipc_printf(buf, "\nSend queue inconsistency\n");
3140                         tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3141                         tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
3142                         tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
3143                         link_dump_send_queue(l_ptr);
3144                 }
3145         } else
3146                 tipc_printf(buf, "[]");
3147         tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3148         if (l_ptr->oldest_deferred_in) {
3149                 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3150                 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3151                 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3152                 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3153                         tipc_printf(buf, ":RQSIZ(%u)",
3154                                     l_ptr->deferred_inqueue_sz);
3155                 }
3156         }
3157         if (link_working_unknown(l_ptr))
3158                 tipc_printf(buf, ":WU");
3159         if (link_reset_reset(l_ptr))
3160                 tipc_printf(buf, ":RR");
3161         if (link_reset_unknown(l_ptr))
3162                 tipc_printf(buf, ":RU");
3163         if (link_working_working(l_ptr))
3164                 tipc_printf(buf, ":WW");
3165         tipc_printf(buf, "\n");
3166 }
3167