[TIPC] Initial merge
Per Liden [Mon, 2 Jan 2006 18:04:38 +0000 (19:04 +0100)]
TIPC (Transparent Inter Process Communication) is a protocol designed for
intra cluster communication. For more information see
http://tipc.sourceforge.net

Signed-off-by: Per Liden <per.liden@nospam.ericsson.com>

54 files changed:
include/linux/socket.h
include/linux/tipc.h [new file with mode: 0644]
include/net/tipc/tipc.h [new file with mode: 0644]
include/net/tipc/tipc_bearer.h [new file with mode: 0644]
include/net/tipc/tipc_msg.h [new file with mode: 0644]
include/net/tipc/tipc_port.h [new file with mode: 0644]
net/Kconfig
net/Makefile
net/tipc/Kconfig [new file with mode: 0644]
net/tipc/Makefile [new file with mode: 0644]
net/tipc/addr.c [new file with mode: 0644]
net/tipc/addr.h [new file with mode: 0644]
net/tipc/bcast.c [new file with mode: 0644]
net/tipc/bcast.h [new file with mode: 0644]
net/tipc/bearer.c [new file with mode: 0644]
net/tipc/bearer.h [new file with mode: 0644]
net/tipc/cluster.c [new file with mode: 0644]
net/tipc/cluster.h [new file with mode: 0644]
net/tipc/config.c [new file with mode: 0644]
net/tipc/config.h [new file with mode: 0644]
net/tipc/core.c [new file with mode: 0644]
net/tipc/core.h [new file with mode: 0644]
net/tipc/dbg.c [new file with mode: 0644]
net/tipc/dbg.h [new file with mode: 0644]
net/tipc/discover.c [new file with mode: 0644]
net/tipc/discover.h [new file with mode: 0644]
net/tipc/eth_media.c [new file with mode: 0644]
net/tipc/handler.c [new file with mode: 0644]
net/tipc/link.c [new file with mode: 0644]
net/tipc/link.h [new file with mode: 0644]
net/tipc/msg.c [new file with mode: 0644]
net/tipc/msg.h [new file with mode: 0644]
net/tipc/name_distr.c [new file with mode: 0644]
net/tipc/name_distr.h [new file with mode: 0644]
net/tipc/name_table.c [new file with mode: 0644]
net/tipc/name_table.h [new file with mode: 0644]
net/tipc/net.c [new file with mode: 0644]
net/tipc/net.h [new file with mode: 0644]
net/tipc/netlink.c [new file with mode: 0644]
net/tipc/node.c [new file with mode: 0644]
net/tipc/node.h [new file with mode: 0644]
net/tipc/node_subscr.c [new file with mode: 0644]
net/tipc/node_subscr.h [new file with mode: 0644]
net/tipc/port.c [new file with mode: 0644]
net/tipc/port.h [new file with mode: 0644]
net/tipc/ref.c [new file with mode: 0644]
net/tipc/ref.h [new file with mode: 0644]
net/tipc/socket.c [new file with mode: 0644]
net/tipc/subscr.c [new file with mode: 0644]
net/tipc/subscr.h [new file with mode: 0644]
net/tipc/user_reg.c [new file with mode: 0644]
net/tipc/user_reg.h [new file with mode: 0644]
net/tipc/zone.c [new file with mode: 0644]
net/tipc/zone.h [new file with mode: 0644]

index 9f40191..b02dda4 100644 (file)
@@ -186,6 +186,7 @@ struct ucred {
 #define AF_PPPOX       24      /* PPPoX sockets                */
 #define AF_WANPIPE     25      /* Wanpipe API Sockets */
 #define AF_LLC         26      /* Linux LLC                    */
+#define AF_TIPC                30      /* TIPC sockets                 */
 #define AF_BLUETOOTH   31      /* Bluetooth sockets            */
 #define AF_MAX         32      /* For now.. */
 
@@ -218,6 +219,7 @@ struct ucred {
 #define PF_PPPOX       AF_PPPOX
 #define PF_WANPIPE     AF_WANPIPE
 #define PF_LLC         AF_LLC
+#define PF_TIPC                AF_TIPC
 #define PF_BLUETOOTH   AF_BLUETOOTH
 #define PF_MAX         AF_MAX
 
@@ -279,6 +281,7 @@ struct ucred {
 #define SOL_LLC                268
 #define SOL_DCCP       269
 #define SOL_NETLINK    270
+#define SOL_TIPC       271
 
 /* IPX options */
 #define IPX_TYPE       1
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
new file mode 100644 (file)
index 0000000..ca754f3
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ * include/linux/tipc.h: Include file for TIPC users
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_H_
+#define _LINUX_TIPC_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/*
+ * TIPC addressing primitives
+ */
+struct tipc_portid {
+       __u32 ref;
+       __u32 node;
+};
+
+struct tipc_name {
+       __u32 type;
+       __u32 instance;
+};
+
+struct tipc_name_seq {
+       __u32 type;
+       __u32 lower;
+       __u32 upper;
+};
+
+static inline __u32 tipc_addr(unsigned int zone,
+                             unsigned int cluster,
+                             unsigned int node)
+{
+       return(zone << 24) | (cluster << 12) | node;
+}
+
+static inline unsigned int tipc_zone(__u32 addr)
+{
+       return  addr >> 24;
+}
+
+static inline unsigned int tipc_cluster(__u32 addr)
+{
+       return(addr >> 12) & 0xfff;
+}
+
+static inline unsigned int tipc_node(__u32 addr)
+{
+       return  addr & 0xfff;
+}
+
+/*
+ * Application-accessible port name types
+ */
+
+#define TIPC_NET_EVENTS                0       /* network event subscription name type */
+#define TIPC_TOP_SRV           1       /* topology service name type */
+#define TIPC_RESERVED_TYPES    64      /* lowest user-publishable name type */
+
+/* 
+ * Publication scopes when binding port names and port name sequences
+ */
+
+#define TIPC_ZONE_SCOPE         1
+#define TIPC_CLUSTER_SCOPE      2     
+#define TIPC_NODE_SCOPE         3
+
+/*
+ * Limiting values for messages
+ */
+
+#define TIPC_MAX_USER_MSG_SIZE 66000
+
+/* 
+ * Message importance levels
+ */
+
+#define TIPC_LOW_IMPORTANCE            0  /* default */
+#define TIPC_MEDIUM_IMPORTANCE         1
+#define TIPC_HIGH_IMPORTANCE           2
+#define TIPC_CRITICAL_IMPORTANCE       3
+
+/* 
+ * Msg rejection/connection shutdown reasons
+ */
+
+#define TIPC_OK                        0
+#define TIPC_ERR_NO_NAME       1
+#define TIPC_ERR_NO_PORT       2
+#define TIPC_ERR_NO_NODE       3
+#define TIPC_ERR_OVERLOAD      4
+#define TIPC_CONN_SHUTDOWN     5
+
+/*
+ * TIPC topology subscription service definitions
+ */
+
+#define TIPC_SUB_PORTS         0x01    /* filter for port availability */
+#define TIPC_SUB_SERVICE       0x02    /* filter for service availability */
+#if 0
+/* The following filter options are not currently implemented */
+#define TIPC_SUB_NO_BIND_EVTS  0x04    /* filter out "publish" events */
+#define TIPC_SUB_NO_UNBIND_EVTS        0x08    /* filter out "withdraw" events */
+#define TIPC_SUB_SINGLE_EVT    0x10    /* expire after first event */
+#endif
+
+#define TIPC_WAIT_FOREVER      ~0      /* timeout for permanent subscription */
+
+struct tipc_subscr {
+       struct tipc_name_seq seq;       /* name sequence of interest */
+       __u32 timeout;                  /* subscription duration (in ms) */
+        __u32 filter;                  /* bitmask of filter options */
+       char usr_handle[8];             /* available for subscriber use */
+};
+
+
+#define TIPC_PUBLISHED          1      /* publication event */
+#define TIPC_WITHDRAWN          2      /* withdraw event */
+#define TIPC_SUBSCR_TIMEOUT     3      /* subscription timeout event */
+
+struct tipc_event {
+       __u32 event;                    /* event type */
+       __u32 found_lower;              /* matching name seq instances */
+       __u32 found_upper;              /*    "      "    "     "      */
+        struct tipc_portid port;       /* associated port */
+       struct tipc_subscr s;           /* associated subscription */
+};
+
+/*
+ * Socket API
+ */
+
+#ifndef AF_TIPC
+#define AF_TIPC                30
+#endif
+
+#ifndef PF_TIPC
+#define PF_TIPC                AF_TIPC
+#endif
+
+#ifndef SOL_TIPC
+#define SOL_TIPC       271
+#endif
+
+#define TIPC_ADDR_NAMESEQ      1
+#define TIPC_ADDR_MCAST         1
+#define TIPC_ADDR_NAME         2
+#define TIPC_ADDR_ID           3
+
+struct sockaddr_tipc {
+       unsigned short family;
+       unsigned char  addrtype;
+       signed   char  scope;
+       union {
+               struct tipc_portid id;
+               struct tipc_name_seq nameseq;
+               struct {
+                       struct tipc_name name;
+                       __u32 domain; /* 0: own zone */
+               } name;
+       } addr;
+};
+
+/*
+ * Ancillary data objects supported by recvmsg()
+ */
+
+#define TIPC_ERRINFO   1       /* error info */
+#define TIPC_RETDATA   2       /* returned data */
+#define TIPC_DESTNAME  3       /* destination name */
+
+/*
+ * TIPC-specific socket option values
+ */
+
+#define TIPC_IMPORTANCE                127     /* Default: TIPC_LOW_IMPORTANCE */
+#define TIPC_SRC_DROPPABLE     128     /* Default: 0 (resend congested msg) */
+#define TIPC_DEST_DROPPABLE    129     /* Default: based on socket type */
+#define TIPC_CONN_TIMEOUT      130     /* Default: 8000 (ms)  */
+
+/*
+ * Bearer
+ */
+
+/* Identifiers of supported TIPC media types */
+
+#define TIPC_MEDIA_TYPE_ETH    1
+
+/* Maximum sizes of TIPC bearer-related names (including terminating NUL) */ 
+
+#define TIPC_MAX_MEDIA_NAME    16      /* format = media */
+#define TIPC_MAX_IF_NAME       16      /* format = interface */
+#define TIPC_MAX_BEARER_NAME   32      /* format = media:interface */
+#define TIPC_MAX_LINK_NAME     60      /* format = Z.C.N:interface-Z.C.N:interface */
+
+struct tipc_media_addr {
+       __u32  type;
+       union {
+               __u8   eth_addr[6];     /* Ethernet bearer */ 
+#if 0
+               /* Prototypes for other possible bearer types */
+
+               struct {
+                       __u16 sin_family;
+                       __u16 sin_port;
+                       struct {
+                               __u32 s_addr;
+                       } sin_addr;
+                       char pad[4];
+               } addr_in;              /* IP-based bearer */
+               __u16  sock_descr;      /* generic socket bearer */
+#endif
+       } dev_addr;
+};
+
+
+/* Link priority limits (range from 0 to # priorities - 1) */
+
+#define TIPC_NUM_LINK_PRI 32
+
+/* Link tolerance limits (min, default, max), in ms */
+
+#define TIPC_MIN_LINK_TOL 50
+#define TIPC_DEF_LINK_TOL 1500
+#define TIPC_MAX_LINK_TOL 30000
+
+/* Link window limits (min, default, max), in packets */
+
+#define TIPC_MIN_LINK_WIN 16
+#define TIPC_DEF_LINK_WIN 50
+#define TIPC_MAX_LINK_WIN 150
+
+/*
+ * Configuration
+ *
+ * All configuration management messaging involves sending a request message
+ * to the TIPC configuration service on a node, which sends a reply message
+ * back.  (In the future multi-message replies may be supported.)
+ *
+ * Both request and reply messages consist of a transport header and payload.
+ * The transport header contains info about the desired operation;
+ * the payload consists of zero or more type/length/value (TLV) items
+ * which specify parameters or results for the operation.
+ *
+ * For many operations, the request and reply messages have a fixed number
+ * of TLVs (usually zero or one); however, some reply messages may return 
+ * a variable number of TLVs.  A failed request is denoted by the presence
+ * of an "error string" TLV in the reply message instead of the TLV(s) the
+ * reply should contain if the request succeeds.
+ */
+#define TIPC_CFG_SRV   0               /* configuration service name type */
+
+/* 
+ * Public commands:
+ * May be issued by any process.
+ * Accepted by own node, or by remote node only if remote management enabled.                       
+ */
+#define  TIPC_CMD_NOOP             0x0000    /* tx none, rx none */
+#define  TIPC_CMD_GET_NODES         0x0001    /* tx net_addr, rx node_info(s) */
+#define  TIPC_CMD_GET_MEDIA_NAMES   0x0002    /* tx none, rx media_name(s) */
+#define  TIPC_CMD_GET_BEARER_NAMES  0x0003    /* tx none, rx bearer_name(s) */
+#define  TIPC_CMD_GET_LINKS         0x0004    /* tx net_addr, rx link_info(s) */
+#define  TIPC_CMD_SHOW_NAME_TABLE   0x0005    /* tx name_tbl_query, rx ultra_string */
+#define  TIPC_CMD_SHOW_PORTS        0x0006    /* tx none, rx ultra_string */
+#define  TIPC_CMD_SHOW_LINK_STATS   0x000B    /* tx link_name, rx ultra_string */
+
+#if 0
+#define  TIPC_CMD_SHOW_PORT_STATS   0x0008    /* tx port_ref, rx ultra_string */
+#define  TIPC_CMD_RESET_PORT_STATS  0x0009    /* tx port_ref, rx none */
+#define  TIPC_CMD_GET_ROUTES        0x000A    /* tx ?, rx ? */
+#define  TIPC_CMD_GET_LINK_PEER     0x000D    /* tx link_name, rx ? */
+#endif
+
+/* 
+ * Protected commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node, or by remote node only if remote management enabled
+ * and this node is zone manager.                       
+ */
+
+#define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_NODES     0x4009    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_SLAVES    0x400A    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_NETID         0x400B    /* tx none, rx unsigned */
+
+#define  TIPC_CMD_ENABLE_BEARER     0x4101    /* tx bearer_config, rx none */
+#define  TIPC_CMD_DISABLE_BEARER    0x4102    /* tx bearer_name, rx none */
+#define  TIPC_CMD_SET_LINK_TOL      0x4107    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LINK_PRI      0x4108    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LINK_WINDOW   0x4109    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LOG_SIZE      0x410A    /* tx unsigned, rx none */
+#define  TIPC_CMD_DUMP_LOG          0x410B    /* tx none, rx ultra_string */
+#define  TIPC_CMD_RESET_LINK_STATS  0x410C    /* tx link_name, rx none */
+
+#if 0
+#define  TIPC_CMD_CREATE_LINK       0x4103    /* tx link_create, rx none */
+#define  TIPC_CMD_REMOVE_LINK       0x4104    /* tx link_name, rx none */
+#define  TIPC_CMD_BLOCK_LINK        0x4105    /* tx link_name, rx none */
+#define  TIPC_CMD_UNBLOCK_LINK      0x4106    /* tx link_name, rx none */
+#endif
+
+/* 
+ * Private commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node only; cannot be used on a remote node.                       
+ */
+
+#define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
+#if 0
+#define  TIPC_CMD_SET_ZONE_MASTER   0x8002    /* tx none, rx none */
+#endif
+#define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_NODES     0x8009    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_SLAVES    0x800A    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_NETID         0x800B    /* tx unsigned, rx none */
+
+/*
+ * TLV types defined for TIPC
+ */
+
+#define TIPC_TLV_NONE          0       /* no TLV present */
+#define TIPC_TLV_VOID          1       /* empty TLV (0 data bytes)*/
+#define TIPC_TLV_UNSIGNED      2       /* 32-bit integer */
+#define TIPC_TLV_STRING                3       /* char[128] (max) */
+#define TIPC_TLV_LARGE_STRING  4       /* char[2048] (max) */
+#define TIPC_TLV_ULTRA_STRING  5       /* char[32768] (max) */
+
+#define TIPC_TLV_ERROR_STRING  16      /* char[128] containing "error code" */
+#define TIPC_TLV_NET_ADDR      17      /* 32-bit integer denoting <Z.C.N> */
+#define TIPC_TLV_MEDIA_NAME    18      /* char[MAX_MEDIA_NAME] */
+#define TIPC_TLV_BEARER_NAME   19      /* char[MAX_BEARER_NAME] */
+#define TIPC_TLV_LINK_NAME     20      /* char[MAX_LINK_NAME] */
+#define TIPC_TLV_NODE_INFO     21      /* struct tipc_node_info */
+#define TIPC_TLV_LINK_INFO     22      /* struct tipc_link_info */
+#define TIPC_TLV_BEARER_CONFIG  23     /* struct tipc_bearer_config */
+#define TIPC_TLV_LINK_CONFIG    24     /* struct tipc_link_config */
+#define TIPC_TLV_NAME_TBL_QUERY        25      /* struct tipc_name_table_query */
+#define TIPC_TLV_PORT_REF      26      /* 32-bit port reference */
+
+struct tipc_node_info {
+       __u32 addr;                     /* network address of node */
+       __u32 up;                       /* 0=down, 1= up */
+};
+
+struct tipc_link_info {
+       __u32 dest;                     /* network address of peer node */
+       __u32 up;                       /* 0=down, 1=up */
+       char str[TIPC_MAX_LINK_NAME];   /* link name */
+};
+
+struct tipc_bearer_config {
+       __u32 priority;                 /* Range [1,31]. Override per link  */
+       __u32 detect_scope;     
+       char name[TIPC_MAX_BEARER_NAME];
+};
+
+struct tipc_link_config {
+       __u32 value;
+       char name[TIPC_MAX_LINK_NAME];
+};
+
+#define TIPC_NTQ_ALLTYPES 0x80000000
+
+struct tipc_name_table_query {
+       __u32 depth;    /* 1:type, 2:+name info, 3:+port info, 4+:+debug info */
+       __u32 type;     /* {t,l,u} info ignored if high bit of "depth" is set */
+       __u32 lowbound; /* (i.e. displays all entries of name table) */
+       __u32 upbound;
+};
+
+/*
+ * The error string TLV is a null-terminated string describing the cause 
+ * of the request failure.  To simplify error processing (and to save space)
+ * the first character of the string can be a special error code character
+ * (lying by the range 0x80 to 0xFF) which represents a pre-defined reason.
+ */
+
+#define TIPC_CFG_TLV_ERROR      "\x80"  /* request contains incorrect TLV(s) */
+#define TIPC_CFG_NOT_NET_ADMIN  "\x81" /* must be network administrator */
+#define TIPC_CFG_NOT_ZONE_MSTR "\x82"  /* must be zone master */
+#define TIPC_CFG_NO_REMOTE     "\x83"  /* remote management not enabled */
+#define TIPC_CFG_NOT_SUPPORTED  "\x84" /* request is not supported by TIPC */
+#define TIPC_CFG_INVALID_VALUE  "\x85"  /* request has invalid argument value */
+
+#if 0
+/* prototypes TLV structures for proposed commands */
+struct tipc_link_create {
+       __u32   domain;
+       struct tipc_media_addr peer_addr;
+       char bearer_name[MAX_BEARER_NAME];
+};
+
+struct tipc_route_info {
+       __u32 dest;
+       __u32 router;
+};
+#endif
+
+/*
+ * A TLV consists of a descriptor, followed by the TLV value.
+ * TLV descriptor fields are stored in network byte order; 
+ * TLV values must also be stored in network byte order (where applicable).
+ * TLV descriptors must be aligned to addresses which are multiple of 4,
+ * so up to 3 bytes of padding may exist at the end of the TLV value area.
+ * There must not be any padding between the TLV descriptor and its value.
+ */
+
+struct tlv_desc {
+       __u16 tlv_len;          /* TLV length (descriptor + value) */
+       __u16 tlv_type;         /* TLV identifier */
+};
+
+#define TLV_ALIGNTO 4
+
+#define TLV_ALIGN(datalen) (((datalen)+(TLV_ALIGNTO-1)) & ~(TLV_ALIGNTO-1))
+#define TLV_LENGTH(datalen) (sizeof(struct tlv_desc) + (datalen))
+#define TLV_SPACE(datalen) (TLV_ALIGN(TLV_LENGTH(datalen)))
+#define TLV_DATA(tlv) ((void *)((char *)(tlv) + TLV_LENGTH(0)))
+
+static inline int TLV_OK(const void *tlv, __u16 space)
+{
+       /*
+        * Would also like to check that "tlv" is a multiple of 4,
+        * but don't know how to do this in a portable way.
+        * - Tried doing (!(tlv & (TLV_ALIGNTO-1))), but GCC compiler
+        *   won't allow binary "&" with a pointer.
+        * - Tried casting "tlv" to integer type, but causes warning about size
+        *   mismatch when pointer is bigger than chosen type (int, long, ...).
+        */
+
+       return (space >= TLV_SPACE(0)) &&
+               (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+}
+
+static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
+{
+       return TLV_OK(tlv, space) && 
+               (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+}
+
+static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
+{
+       struct tlv_desc *tlv_ptr;
+       int tlv_len;
+
+       tlv_len = TLV_LENGTH(len);
+       tlv_ptr = (struct tlv_desc *)tlv;
+       tlv_ptr->tlv_type = htons(type);
+       tlv_ptr->tlv_len  = htons(tlv_len);
+       if (len && data)
+               memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
+       return TLV_SPACE(len);
+}
+
+/*
+ * A TLV list descriptor simplifies processing of messages 
+ * containing multiple TLVs.
+ */
+
+struct tlv_list_desc {
+       struct tlv_desc *tlv_ptr;       /* ptr to current TLV */
+       __u32 tlv_space;                /* # bytes from curr TLV to list end */
+};
+
+static inline void TLV_LIST_INIT(struct tlv_list_desc *list, 
+                                void *data, __u32 space)
+{
+       list->tlv_ptr = (struct tlv_desc *)data;
+       list->tlv_space = space;
+}
+            
+static inline int TLV_LIST_EMPTY(struct tlv_list_desc *list)
+{ 
+       return (list->tlv_space == 0);
+}
+
+static inline int TLV_LIST_CHECK(struct tlv_list_desc *list, __u16 exp_type)
+{
+       return TLV_CHECK(list->tlv_ptr, list->tlv_space, exp_type);
+}
+
+static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
+{
+       return TLV_DATA(list->tlv_ptr);
+}
+
+static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
+{
+       __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+
+        list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
+       list->tlv_space -= tlv_space;
+}
+
+/*
+ * Configuration messages exchanged via NETLINK_GENERIC use the following
+ * family id, name, version and command.
+ */
+#define TIPC_GENL_FAMILY       0x222
+#define TIPC_GENL_NAME         "TIPC"
+#define TIPC_GENL_VERSION      0x1
+#define TIPC_GENL_CMD          0x1
+
+/*
+ * TIPC specific header used in NETLINK_GENERIC requests.
+ */
+struct tipc_genlmsghdr {
+       __u32 dest;             /* Destination address */
+       __u16 cmd;              /* Command */
+       __u16 reserved;         /* Unused */
+};
+
+#define TIPC_GENL_HDRLEN       NLMSG_ALIGN(sizeof(struct tipc_genlmsghdr))
+
+/*
+ * Configuration messages exchanged via TIPC sockets use the TIPC configuration 
+ * message header, which is defined below.  This structure is analogous 
+ * to the Netlink message header, but fields are stored in network byte order 
+ * and no padding is permitted between the header and the message data 
+ * that follows.
+ */
+
+struct tipc_cfg_msg_hdr
+{
+       __u32 tcm_len;          /* Message length (including header) */
+       __u16 tcm_type;         /* Command type */
+       __u16 tcm_flags;        /* Additional flags */
+       char  tcm_reserved[8];  /* Unused */
+};
+
+#define TCM_F_REQUEST  0x1     /* Flag: Request message */
+#define TCM_F_MORE     0x2     /* Flag: Message to be continued */
+
+#define TCM_ALIGN(datalen)  (((datalen)+3) & ~3)
+#define TCM_LENGTH(datalen) (sizeof(struct tipc_cfg_msg_hdr) + datalen)
+#define TCM_SPACE(datalen)  (TCM_ALIGN(TCM_LENGTH(datalen)))
+#define TCM_DATA(tcm_hdr)   ((void *)((char *)(tcm_hdr) + TCM_LENGTH(0)))
+
+static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
+                         void *data, __u16 data_len)
+{
+       struct tipc_cfg_msg_hdr *tcm_hdr;
+       int msg_len;
+
+       msg_len = TCM_LENGTH(data_len);
+       tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
+       tcm_hdr->tcm_len   = htonl(msg_len);
+       tcm_hdr->tcm_type  = htons(cmd);
+       tcm_hdr->tcm_flags = htons(flags);
+       if (data_len && data)
+               memcpy(TCM_DATA(msg), data, data_len);
+       return TCM_SPACE(data_len);
+}
+
+#endif
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
new file mode 100644 (file)
index 0000000..1d4d8d0
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * include/net/tipc/tipc.h: Main include file for TIPC users
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_H_
+#define _NET_TIPC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+
+/* 
+ * Native API
+ * ----------
+ */
+
+/*
+ * TIPC operating mode routines
+ */
+
+u32 tipc_get_addr(void);
+
+#define TIPC_NOT_RUNNING  0
+#define TIPC_NODE_MODE    1
+#define TIPC_NET_MODE     2
+
+typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
+
+int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
+
+void tipc_detach(unsigned int userref);
+
+int tipc_get_mode(void);
+
+/*
+ * TIPC port manipulation routines
+ */
+
+typedef void (*tipc_msg_err_event) (void *usr_handle,
+                                   u32 portref,
+                                   struct sk_buff **buf,
+                                   unsigned char const *data,
+                                   unsigned int size,
+                                   int reason, 
+                                   struct tipc_portid const *attmpt_destid);
+
+typedef void (*tipc_named_msg_err_event) (void *usr_handle,
+                                         u32 portref,
+                                         struct sk_buff **buf,
+                                         unsigned char const *data,
+                                         unsigned int size,
+                                         int reason, 
+                                         struct tipc_name_seq const *attmpt_dest);
+
+typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
+                                         u32 portref,
+                                         struct sk_buff **buf,
+                                         unsigned char const *data,
+                                         unsigned int size,
+                                         int reason);
+
+typedef void (*tipc_msg_event) (void *usr_handle,
+                               u32 portref,
+                               struct sk_buff **buf,
+                               unsigned char const *data,
+                               unsigned int size,
+                               unsigned int importance, 
+                               struct tipc_portid const *origin);
+
+typedef void (*tipc_named_msg_event) (void *usr_handle,
+                                     u32 portref,
+                                     struct sk_buff **buf,
+                                     unsigned char const *data,
+                                     unsigned int size,
+                                     unsigned int importance, 
+                                     struct tipc_portid const *orig,
+                                     struct tipc_name_seq const *dest);
+
+typedef void (*tipc_conn_msg_event) (void *usr_handle,
+                                    u32 portref,
+                                    struct sk_buff **buf,
+                                    unsigned char const *data,
+                                    unsigned int size);
+
+typedef void (*tipc_continue_event) (void *usr_handle, 
+                                    u32 portref);
+
+int tipc_createport(unsigned int tipc_user, 
+                   void *usr_handle, 
+                   unsigned int importance, 
+                   tipc_msg_err_event error_cb, 
+                   tipc_named_msg_err_event named_error_cb, 
+                   tipc_conn_shutdown_event conn_error_cb, 
+                   tipc_msg_event message_cb, 
+                   tipc_named_msg_event named_message_cb, 
+                   tipc_conn_msg_event conn_message_cb, 
+                   tipc_continue_event continue_event_cb,/* May be zero */
+                   u32 *portref);
+
+int tipc_deleteport(u32 portref);
+
+int tipc_ownidentity(u32 portref, struct tipc_portid *port);
+
+int tipc_portimportance(u32 portref, unsigned int *importance);
+int tipc_set_portimportance(u32 portref, unsigned int importance);
+
+int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
+int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
+
+int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
+int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+
+int tipc_publish(u32 portref, unsigned int scope, 
+                struct tipc_name_seq const *name_seq);
+int tipc_withdraw(u32 portref, unsigned int scope,
+                 struct tipc_name_seq const *name_seq); /* 0: all */
+
+int tipc_connect2port(u32 portref, struct tipc_portid const *port);
+
+int tipc_disconnect(u32 portref);
+
+int tipc_shutdown(u32 ref); /* Sends SHUTDOWN msg */
+
+int tipc_isconnected(u32 portref, int *isconnected);
+
+int tipc_peer(u32 portref, struct tipc_portid *peer);
+
+int tipc_ref_valid(u32 portref); 
+
+/*
+ * TIPC messaging routines
+ */
+
+#define TIPC_PORT_IMPORTANCE 100       /* send using current port setting */
+
+
+int tipc_send(u32 portref,
+             unsigned int num_sect,
+             struct iovec const *msg_sect);
+
+int tipc_send_buf(u32 portref,
+                 struct sk_buff *buf,
+                 unsigned int dsz);
+
+int tipc_send2name(u32 portref, 
+                  struct tipc_name const *name, 
+                  u32 domain,  /* 0:own zone */
+                  unsigned int num_sect,
+                  struct iovec const *msg_sect);
+
+int tipc_send_buf2name(u32 portref,
+                      struct tipc_name const *name,
+                      u32 domain,
+                      struct sk_buff *buf,
+                      unsigned int dsz);
+
+int tipc_forward2name(u32 portref, 
+                     struct tipc_name const *name, 
+                     u32 domain,   /*0: own zone */
+                     unsigned int section_count,
+                     struct iovec const *msg_sect,
+                     struct tipc_portid const *origin,
+                     unsigned int importance);
+
+int tipc_forward_buf2name(u32 portref,
+                         struct tipc_name const *name,
+                         u32 domain,
+                         struct sk_buff *buf,
+                         unsigned int dsz,
+                         struct tipc_portid const *orig,
+                         unsigned int importance);
+
+int tipc_send2port(u32 portref,
+                  struct tipc_portid const *dest,
+                  unsigned int num_sect,
+                  struct iovec const *msg_sect);
+
+int tipc_send_buf2port(u32 portref,
+                      struct tipc_portid const *dest,
+                      struct sk_buff *buf,
+                      unsigned int dsz);
+
+int tipc_forward2port(u32 portref,
+                     struct tipc_portid const *dest,
+                     unsigned int num_sect,
+                     struct iovec const *msg_sect,
+                     struct tipc_portid const *origin,
+                     unsigned int importance);
+
+int tipc_forward_buf2port(u32 portref,
+                         struct tipc_portid const *dest,
+                         struct sk_buff *buf,
+                         unsigned int dsz,
+                         struct tipc_portid const *orig,
+                         unsigned int importance);
+
+int tipc_multicast(u32 portref, 
+                  struct tipc_name_seq const *seq, 
+                  u32 domain,  /* 0:own zone */
+                  unsigned int section_count,
+                  struct iovec const *msg);
+
+#if 0
+int tipc_multicast_buf(u32 portref, 
+                      struct tipc_name_seq const *seq, 
+                      u32 domain,      /* 0:own zone */
+                      void *buf,
+                      unsigned int size);
+#endif
+
+/*
+ * TIPC subscription routines
+ */
+
+int tipc_ispublished(struct tipc_name const *name);
+
+/*
+ * Get number of available nodes within specified domain (excluding own node)
+ */
+
+unsigned int tipc_available_nodes(const u32 domain);
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
new file mode 100644 (file)
index 0000000..a3daf69
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_BEARER_H_
+#define _NET_TIPC_BEARER_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+/**
+ * struct tipc_bearer - TIPC bearer info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
+ * 
+ * Note: TIPC initializes "name" and "lock" fields; user is responsible for
+ * initialization all other fields when a bearer is enabled.
+ */
+
+struct tipc_bearer {
+       void *usr_handle;
+       u32 mtu;
+       int blocked;
+       spinlock_t lock;
+       struct tipc_media_addr addr;
+       char name[TIPC_MAX_BEARER_NAME];
+};
+
+
+int  tipc_register_media(u32 media_type,
+                        char *media_name, 
+                        int (*enable)(struct tipc_bearer *), 
+                        void (*disable)(struct tipc_bearer *), 
+                        int (*send_msg)(struct sk_buff *, 
+                                        struct tipc_bearer *,
+                                        struct tipc_media_addr *), 
+                        char *(*addr2str)(struct tipc_media_addr *a,
+                                          char *str_buf,
+                                          int str_size),
+                        struct tipc_media_addr *bcast_addr,
+                        const u32 bearer_priority,
+                        const u32 link_tolerance,  /* [ms] */
+                        const u32 send_window_limit); 
+
+void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+
+int  tipc_block_bearer(const char *name);
+void tipc_continue(struct tipc_bearer *tb_ptr); 
+
+int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+int tipc_disable_bearer(const char *name);
+
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
new file mode 100644 (file)
index 0000000..78513f5
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_MSG_H_
+#define _NET_TIPC_MSG_H_
+
+#ifdef __KERNEL__
+
+struct tipc_msg {
+       u32 hdr[15];
+};
+
+
+/*
+               TIPC user data message header format, version 2:
+
+
+       1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w0:|vers | user  |hdr sz |n|d|s|-|          message size           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w1:|mstyp| error |rer cnt|lsc|opt p|      broadcast ack no         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w2:|        link level ack no      |   broadcast/link level seq no |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w3:|                       previous node                           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w4:|                      originating port                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w5:|                      destination port                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+   w6:|                      originating node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w7:|                      destination node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w8:|            name type / transport sequence number              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w9:|              name instance/multicast lower bound              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+   wA:|                    multicast upper bound                      |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+      /                                                               /
+      \                           options                             \
+      /                                                               /
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+*/
+
+#define TIPC_CONN_MSG  0
+#define TIPC_MCAST_MSG 1
+#define TIPC_NAMED_MSG 2
+#define TIPC_DIRECT_MSG        3
+
+
+static inline u32 msg_word(struct tipc_msg *m, u32 pos)
+{
+       return ntohl(m->hdr[pos]);
+}
+
+static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
+{
+       return (msg_word(m, w) >> pos) & mask;
+}
+
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_hdr_sz(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 21, 0xf) << 2;
+}
+
+static inline int msg_short(struct tipc_msg *m)
+{
+       return (msg_hdr_sz(m) == 24);
+}
+
+static inline u32 msg_size(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 0, 0x1ffff);
+}
+
+static inline u32 msg_data_sz(struct tipc_msg *m)
+{
+       return (msg_size(m) - msg_hdr_sz(m));
+}
+
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+       return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline u32 msg_type(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 29, 0x7);
+}
+
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_DIRECT_MSG);
+}
+
+static inline u32 msg_named(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_NAMED_MSG);
+}
+
+static inline u32 msg_mcast(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_MCAST_MSG);
+}
+
+static inline u32 msg_connected(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_CONN_MSG);
+}
+
+static inline u32 msg_errcode(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 25, 0xf);
+}
+
+static inline u32 msg_prevnode(struct tipc_msg *m)
+{
+       return msg_word(m, 3);
+}
+
+static inline u32 msg_origport(struct tipc_msg *m)
+{
+       return msg_word(m, 4);
+}
+
+static inline u32 msg_destport(struct tipc_msg *m)
+{
+       return msg_word(m, 5);
+}
+
+static inline u32 msg_mc_netid(struct tipc_msg *m)
+{
+       return msg_word(m, 5);
+}
+
+static inline u32 msg_orignode(struct tipc_msg *m)
+{
+       if (likely(msg_short(m)))
+               return msg_prevnode(m);
+       return msg_word(m, 6);
+}
+
+static inline u32 msg_destnode(struct tipc_msg *m)
+{
+       return msg_word(m, 7);
+}
+
+static inline u32 msg_nametype(struct tipc_msg *m)
+{
+       return msg_word(m, 8);
+}
+
+static inline u32 msg_nameinst(struct tipc_msg *m)
+{
+       return msg_word(m, 9);
+}
+
+static inline u32 msg_namelower(struct tipc_msg *m)
+{
+       return msg_nameinst(m);
+}
+
+static inline u32 msg_nameupper(struct tipc_msg *m)
+{
+       return msg_word(m, 10);
+}
+
+static inline char *msg_options(struct tipc_msg *m, u32 *len)
+{
+       u32 pos = msg_bits(m, 1, 16, 0x7);
+
+       if (!pos)
+               return 0;
+       pos = (pos * 4) + 28;
+       *len = msg_hdr_sz(m) - pos;
+       return (char *)&m->hdr[pos/4];
+}
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
new file mode 100644 (file)
index 0000000..ec0f0de
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_PORT_H_
+#define _NET_TIPC_PORT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_FLOW_CONTROL_WIN 512
+
+/**
+ * struct tipc_port - native TIPC port info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about port
+ * @lock: pointer to spinlock for controlling access to port
+ * @connected: non-zero if port is currently connected to a peer port
+ * @conn_type: TIPC type used when connection was established
+ * @conn_instance: TIPC instance used when connection was established
+ * @conn_unacked: number of unacknowledged messages received from peer port
+ * @published: non-zero if port has one or more associated names
+ * @congested: non-zero if cannot send because of link or port congestion
+ * @ref: unique reference to port in TIPC object registry
+ * @phdr: preformatted message header used when sending messages
+ */
+
+struct tipc_port {
+        void *usr_handle;
+        spinlock_t *lock;
+       int connected;
+        u32 conn_type;
+        u32 conn_instance;
+       u32 conn_unacked;
+       int published;
+       u32 congested;
+       u32 ref;
+       struct tipc_msg phdr;
+};
+
+
+/**
+ * tipc_createport_raw - create a native TIPC port and return it's reference
+ *
+ * Note: 'dispatcher' and 'wakeup' deliver a locked port.
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+                       u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+                       void (*wakeup)(struct tipc_port *),
+                       const u32 importance);
+
+/*
+ * tipc_set_msg_option(): port must be locked.
+ */
+int tipc_set_msg_option(struct tipc_port *tp_ptr,
+                       const char *opt,
+                       const u32 len);
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err);
+
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
+
+void tipc_acknowledge(u32 port_ref,u32 ack);
+
+struct tipc_port *tipc_get_port(const u32 ref);
+
+void *tipc_get_handle(const u32 ref);
+
+
+#endif
+
+#endif
+
index 60f6f32..9296b26 100644 (file)
@@ -159,6 +159,7 @@ source "net/ipx/Kconfig"
 source "drivers/net/appletalk/Kconfig"
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
+source "net/tipc/Kconfig"
 
 config NET_DIVERT
        bool "Frame Diverter (EXPERIMENTAL)"
index f5141b9..065796f 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_VLAN_8021Q)      += 8021q/
 obj-$(CONFIG_IP_DCCP)          += dccp/
 obj-$(CONFIG_IP_SCTP)          += sctp/
 obj-$(CONFIG_IEEE80211)                += ieee80211/
+obj-$(CONFIG_TIPC)             += tipc/
 
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)           += sysctl_net.o
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644 (file)
index 0000000..05ab18e
--- /dev/null
@@ -0,0 +1,112 @@
+#
+# TIPC configuration
+#
+
+menu "TIPC Configuration (EXPERIMENTAL)"
+       depends on INET && EXPERIMENTAL
+
+config TIPC
+       tristate "The TIPC Protocol (EXPERIMENTAL)"
+       ---help---
+         TBD.
+
+         This protocol support is also available as a module ( = code which
+         can be inserted in and removed from the running kernel whenever you
+         want). The module will be called tipc. If you want to compile it
+         as a module, say M here and read <file:Documentation/modules.txt>.
+
+         If in doubt, say N.
+
+config TIPC_ADVANCED
+       bool "TIPC: Advanced configuration"
+       depends on TIPC
+       default n
+       help
+         Saying Y here will open some advanced configuration
+          for TIPC. Most users do not need to bother, so if
+          unsure, just say N.
+
+config TIPC_ZONES
+       int "Maximum number of zones in network"
+       depends on TIPC && TIPC_ADVANCED
+       default "3"
+       help
+        Max number of zones inside TIPC network. Max supported value 
+         is 255 zones, minimum is 1
+
+        Default is 3 zones in a network; setting this to higher
+        allows more zones but might use more memory.
+
+config TIPC_CLUSTERS
+       int "Maximum number of clusters in a zone"
+       depends on TIPC && TIPC_ADVANCED
+       default "1"
+       help
+          ***Only 1 (one cluster in a zone) is supported by current code.
+          Any value set here will be overridden.***
+
+          (Max number of clusters inside TIPC zone. Max supported 
+          value is 4095 clusters, minimum is 1.
+
+         Default is 1; setting this to smaller value might save 
+          some memory, setting it to higher
+         allows more clusters and might consume more memory.)
+
+config TIPC_NODES
+       int "Maximum number of nodes in cluster"
+       depends on TIPC && TIPC_ADVANCED
+       default "255"
+       help
+         Maximum number of nodes inside a TIPC cluster. Maximum 
+          supported value is 2047 nodes, minimum is 8. 
+
+         Setting this to a smaller value saves some memory, 
+         setting it to higher allows more nodes.
+
+config TIPC_SLAVE_NODES
+       int "Maximum number of slave nodes in cluster"
+       depends on TIPC && TIPC_ADVANCED
+       default "0"
+       help
+          ***This capability is not supported by current code.***
+         
+         Maximum number of slave nodes inside a TIPC cluster. Maximum 
+          supported value is 2047 nodes, minimum is 0. 
+
+         Setting this to a smaller value saves some memory, 
+         setting it to higher allows more nodes.
+
+config TIPC_PORTS
+       int "Maximum number of ports in a node"
+       depends on TIPC && TIPC_ADVANCED
+       default "8191"
+       help
+         Maximum number of ports within a node. Maximum 
+          supported value is 64535 nodes, minimum is 127. 
+
+         Setting this to a smaller value saves some memory, 
+         setting it to higher allows more ports.
+
+config TIPC_LOG
+       int "Size of log buffer"
+       depends on TIPC && TIPC_ADVANCED
+       default 0
+       help
+         Size (in bytes) of TIPC's internal log buffer, which records the
+         occurrence of significant events.  Maximum supported value
+         is 32768 bytes, minimum is 0.
+
+         There is no need to enable the log buffer unless the node will be
+         managed remotely via TIPC.
+
+config TIPC_DEBUG
+       bool "Enable debugging support"
+       depends on TIPC
+       default n
+       help
+         This will enable debugging of TIPC.
+
+         Only say Y here if you are having trouble with TIPC.  It will
+         enable the display of detailed information about what is going on.
+
+endmenu
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644 (file)
index 0000000..dceb702
--- /dev/null
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux TIPC layer
+#
+
+obj-$(CONFIG_TIPC) := tipc.o
+
+tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
+          core.o handler.o link.o discover.o msg.o  \
+          name_distr.o  subscr.o name_table.o net.o  \
+          netlink.o node.o node_subscr.o port.o ref.o  \
+          socket.o user_reg.o zone.o dbg.o eth_media.o
+
+# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644 (file)
index 0000000..bc35363
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * net/tipc/addr.c: TIPC address utility routines
+ *     
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "addr.h"
+#include "zone.h"
+#include "cluster.h"
+#include "net.h"
+
+u32 tipc_get_addr(void)
+{
+       return tipc_own_addr;
+}
+
+/**
+ * addr_domain_valid - validates a network domain address
+ * 
+ * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 
+ * where Z, C, and N are non-zero and do not exceed the configured limits.
+ * 
+ * Returns 1 if domain address is valid, otherwise 0
+ */
+
+int addr_domain_valid(u32 addr)
+{
+       u32 n = tipc_node(addr);
+       u32 c = tipc_cluster(addr);
+       u32 z = tipc_zone(addr);
+       u32 max_nodes = tipc_max_nodes;
+
+       if (is_slave(addr))
+               max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+       if (n > max_nodes)
+               return 0;
+       if (c > tipc_max_clusters)
+               return 0;
+       if (z > tipc_max_zones)
+               return 0;
+
+       if (n && (!z || !c))
+               return 0;
+       if (c && !z)
+               return 0;
+       return 1;
+}
+
+/**
+ * addr_node_valid - validates a proposed network address for this node
+ * 
+ * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 
+ * the configured limits.
+ * 
+ * Returns 1 if address can be used, otherwise 0
+ */
+
+int addr_node_valid(u32 addr)
+{
+       return (addr_domain_valid(addr) && tipc_node(addr));
+}
+
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644 (file)
index 0000000..9dabebc
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * net/tipc/addr.h: Include file for TIPC address utility routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ADDR_H
+#define _TIPC_ADDR_H
+
+static inline u32 own_node(void)
+{
+       return tipc_node(tipc_own_addr);
+}
+
+static inline u32 own_cluster(void)
+{
+       return tipc_cluster(tipc_own_addr);
+}
+
+static inline u32 own_zone(void)
+{
+       return tipc_zone(tipc_own_addr);
+}
+
+static inline int in_own_cluster(u32 addr)
+{
+       return !((addr ^ tipc_own_addr) >> 12);
+}
+
+static inline int in_own_zone(u32 addr)
+{
+       return !((addr ^ tipc_own_addr) >> 24);
+}
+
+static inline int is_slave(u32 addr)
+{
+       return addr & 0x800;
+}
+
+static inline int may_route(u32 addr)
+{
+       return(addr ^ tipc_own_addr) >> 11;
+}
+
+static inline int in_scope(u32 domain, u32 addr)
+{
+       if (!domain || (domain == addr))
+               return 1;
+       if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+               return 1;
+       if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+               return 1;
+       return 0;
+}
+
+/**
+ * addr_scope - convert message lookup domain to equivalent 2-bit scope value
+ */
+
+static inline int addr_scope(u32 domain)
+{
+       if (likely(!domain))
+               return TIPC_ZONE_SCOPE;
+       if (tipc_node(domain))
+               return TIPC_NODE_SCOPE;
+       if (tipc_cluster(domain))
+               return TIPC_CLUSTER_SCOPE;
+       return TIPC_ZONE_SCOPE;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *  
+ * Needed when address of a named message must be looked up a second time 
+ * after a network hop.
+ */
+
+static inline int addr_domain(int sc)
+{
+       if (likely(sc == TIPC_NODE_SCOPE))
+               return tipc_own_addr;
+       if (sc == TIPC_CLUSTER_SCOPE)
+               return tipc_addr(tipc_zone(tipc_own_addr),
+                                tipc_cluster(tipc_own_addr), 0);
+       return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
+}
+
+static inline char *addr_string_fill(char *string, u32 addr)
+{
+       snprintf(string, 16, "<%u.%u.%u>",
+                tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
+       return string;
+}
+
+int addr_domain_valid(u32);
+int addr_node_valid(u32 addr);
+
+#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644 (file)
index 0000000..35ca906
--- /dev/null
@@ -0,0 +1,803 @@
+/*
+ * net/tipc/bcast.c: TIPC broadcast code
+ *     
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "msg.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "bcast.h"
+
+
+#define MAX_PKT_DEFAULT_MCAST 1500     /* bcast link max packet size (fixed) */
+
+#define BCLINK_WIN_DEFAULT 20          /* bcast link window size (default) */
+
+#define BCLINK_LOG_BUF_SIZE 0
+
+/**
+ * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ * 
+ * Bearers must have same priority and same set of reachable destinations 
+ * to be paired.
+ */
+
+struct bcbearer_pair {
+       struct bearer *primary;
+       struct bearer *secondary;
+};
+
+/**
+ * struct bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
+ */
+
+struct bcbearer {
+       struct bearer bearer;
+       struct media media;
+       struct bcbearer_pair bpairs[MAX_BEARERS];
+       struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
+};
+
+/**
+ * struct bclink - link used for broadcast messages
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * 
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+
+struct bclink {
+       struct link link;
+       struct node node;
+};
+
+
+static struct bcbearer *bcbearer = NULL;
+static struct bclink *bclink = NULL;
+static struct link *bcl = NULL;
+static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+
+char bc_link_name[] = "multicast-link";
+
+
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+       return msg_seqno(buf_msg(buf));
+} 
+
+static inline u32 bcbuf_acks(struct sk_buff *buf)
+{
+       return (u32)TIPC_SKB_CB(buf)->handle;
+}
+
+static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+{
+       TIPC_SKB_CB(buf)->handle = (void *)acks;
+}
+
+static inline void bcbuf_decr_acks(struct sk_buff *buf)
+{
+       bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
+}
+
+
+/** 
+ * bclink_set_gap - set gap according to contents of current deferred pkt queue
+ * 
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static inline void bclink_set_gap(struct node *n_ptr)
+{
+       struct sk_buff *buf = n_ptr->bclink.deferred_head;
+
+       n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
+               mod(n_ptr->bclink.last_in);
+       if (unlikely(buf != NULL))
+               n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
+}
+
+/** 
+ * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
+ * 
+ * This mechanism endeavours to prevent all nodes in network from trying
+ * to ACK or NACK at the same time.
+ * 
+ * Note: TIPC uses a different trigger to distribute ACKs than it does to
+ *       distribute NACKs, but tries to use the same spacing (divide by 16). 
+ */
+
+static inline int bclink_ack_allowed(u32 n)
+{
+       return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
+}
+
+
+/** 
+ * bclink_retransmit_pkt - retransmit broadcast packets
+ * @after: sequence number of last packet to *not* retransmit
+ * @to: sequence number of last packet to retransmit
+ * 
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static void bclink_retransmit_pkt(u32 after, u32 to)
+{
+       struct sk_buff *buf;
+
+       spin_lock_bh(&bc_lock);
+       buf = bcl->first_out;
+       while (buf && less_eq(buf_seqno(buf), after)) {
+               buf = buf->next;                
+       }
+       if (buf != NULL)
+               link_retransmit(bcl, buf, mod(to - after));
+       spin_unlock_bh(&bc_lock);              
+}
+
+/** 
+ * bclink_acknowledge - handle acknowledgement of broadcast packets
+ * @n_ptr: node that sent acknowledgement info
+ * @acked: broadcast sequence # that has been acknowledged
+ * 
+ * Node is locked, bc_lock unlocked.
+ */
+
+void bclink_acknowledge(struct node *n_ptr, u32 acked)
+{
+       struct sk_buff *crs;
+       struct sk_buff *next;
+       unsigned int released = 0;
+
+       if (less_eq(acked, n_ptr->bclink.acked))
+               return;
+
+       spin_lock_bh(&bc_lock);
+
+       /* Skip over packets that node has previously acknowledged */
+
+       crs = bcl->first_out;
+       while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
+               crs = crs->next;
+       }
+
+       /* Update packets that node is now acknowledging */
+
+       while (crs && less_eq(buf_seqno(crs), acked)) {
+               next = crs->next;
+               bcbuf_decr_acks(crs);
+               if (bcbuf_acks(crs) == 0) {
+                       bcl->first_out = next;
+                       bcl->out_queue_size--;
+                       buf_discard(crs);
+                       released = 1;
+               }
+               crs = next;
+       }
+       n_ptr->bclink.acked = acked;
+
+       /* Try resolving broadcast link congestion, if necessary */
+
+       if (unlikely(bcl->next_out))
+               link_push_queue(bcl);
+       if (unlikely(released && !list_empty(&bcl->waiting_ports)))
+               link_wakeup_ports(bcl, 0);
+       spin_unlock_bh(&bc_lock);
+}
+
+/** 
+ * bclink_send_ack - unicast an ACK msg
+ * 
+ * net_lock and node lock set
+ */
+
+static void bclink_send_ack(struct node *n_ptr)
+{
+       struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+
+       if (l_ptr != NULL)
+               link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+}
+
+/** 
+ * bclink_send_nack- broadcast a NACK msg
+ * 
+ * net_lock and node lock set
+ */
+
+static void bclink_send_nack(struct node *n_ptr)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+
+       if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
+               return;
+
+       buf = buf_acquire(INT_H_SIZE);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+                        TIPC_OK, INT_H_SIZE, n_ptr->addr);
+               msg_set_mc_netid(msg, tipc_net_id);
+               msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 
+               msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
+               msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
+               msg_set_bcast_tag(msg, tipc_own_tag);
+
+               if (bearer_send(&bcbearer->bearer, buf, 0)) {
+                       bcl->stats.sent_nacks++;
+                       buf_discard(buf);
+               } else {
+                       bearer_schedule(bcl->b_ptr, bcl);
+                       bcl->proto_msg_queue = buf;
+                       bcl->stats.bearer_congs++;
+               }
+
+               /* 
+                * Ensure we doesn't send another NACK msg to the node
+                * until 16 more deferred messages arrive from it
+                * (i.e. helps prevent all nodes from NACK'ing at same time)
+                */
+               
+               n_ptr->bclink.nack_sync = tipc_own_tag;
+       }
+}
+
+/** 
+ * bclink_check_gap - send a NACK if a sequence gap exists
+ *
+ * net_lock and node lock set
+ */
+
+void bclink_check_gap(struct node *n_ptr, u32 last_sent)
+{
+       if (!n_ptr->bclink.supported ||
+           less_eq(last_sent, mod(n_ptr->bclink.last_in)))
+               return;
+
+       bclink_set_gap(n_ptr);
+       if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
+               n_ptr->bclink.gap_to = last_sent;
+       bclink_send_nack(n_ptr);
+}
+
+/** 
+ * bclink_peek_nack - process a NACK msg meant for another node
+ * 
+ * Only net_lock set.
+ */
+
+void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+{
+       struct node *n_ptr = node_find(dest);
+       u32 my_after, my_to;
+
+       if (unlikely(!n_ptr || !node_is_up(n_ptr)))
+               return;
+       node_lock(n_ptr);
+       /*
+        * Modify gap to suppress unnecessary NACKs from this node
+        */
+       my_after = n_ptr->bclink.gap_after;
+       my_to = n_ptr->bclink.gap_to;
+
+       if (less_eq(gap_after, my_after)) {
+               if (less(my_after, gap_to) && less(gap_to, my_to))
+                       n_ptr->bclink.gap_after = gap_to;
+               else if (less_eq(my_to, gap_to))
+                       n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
+       } else if (less_eq(gap_after, my_to)) {
+               if (less_eq(my_to, gap_to))
+                       n_ptr->bclink.gap_to = gap_after;
+       } else {
+               /* 
+                * Expand gap if missing bufs not in deferred queue:
+                */
+               struct sk_buff *buf = n_ptr->bclink.deferred_head;
+               u32 prev = n_ptr->bclink.gap_to;
+
+               for (; buf; buf = buf->next) {
+                       u32 seqno = buf_seqno(buf);
+
+                       if (mod(seqno - prev) != 1)
+                               buf = NULL;
+                       if (seqno == gap_after)
+                               break;
+                       prev = seqno;
+               }
+               if (buf == NULL)
+                       n_ptr->bclink.gap_to = gap_after;
+       }
+       /*
+        * Some nodes may send a complementary NACK now:
+        */ 
+       if (bclink_ack_allowed(sender_tag + 1)) {
+               if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
+                       bclink_send_nack(n_ptr);
+                       bclink_set_gap(n_ptr);
+               }
+       }
+       node_unlock(n_ptr);
+}
+
+/**
+ * bclink_send_msg - broadcast a packet to all nodes in cluster
+ */
+
+int bclink_send_msg(struct sk_buff *buf)
+{
+       int res;
+
+       spin_lock_bh(&bc_lock);
+
+       res = link_send_buf(bcl, buf);
+       if (unlikely(res == -ELINKCONG))
+               buf_discard(buf);
+       else
+               bcl->stats.sent_info++;
+
+       if (bcl->out_queue_size > bcl->stats.max_queue_sz)
+               bcl->stats.max_queue_sz = bcl->out_queue_size;
+       bcl->stats.queue_sz_counts++;
+       bcl->stats.accu_queue_sz += bcl->out_queue_size;
+
+       spin_unlock_bh(&bc_lock);
+       return res;
+}
+
+/**
+ * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * 
+ * net_lock is read_locked, no other locks set
+ */
+
+void bclink_recv_pkt(struct sk_buff *buf)
+{        
+       struct tipc_msg *msg = buf_msg(buf);
+       struct node* node = node_find(msg_prevnode(msg));
+       u32 next_in;
+       u32 seqno;
+       struct sk_buff *deferred;
+
+       msg_dbg(msg, "<BC<<<");
+
+       if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || 
+                    (msg_mc_netid(msg) != tipc_net_id))) {
+               buf_discard(buf);
+               return;
+       }
+
+       if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
+               msg_dbg(msg, "<BCNACK<<<");
+               if (msg_destnode(msg) == tipc_own_addr) {
+                       node_lock(node);
+                       bclink_acknowledge(node, msg_bcast_ack(msg));
+                       node_unlock(node);
+                       bcl->stats.recv_nacks++;
+                       bclink_retransmit_pkt(msg_bcgap_after(msg),
+                                             msg_bcgap_to(msg));
+               } else {
+                       bclink_peek_nack(msg_destnode(msg),
+                                        msg_bcast_tag(msg),
+                                        msg_bcgap_after(msg),
+                                        msg_bcgap_to(msg));
+               }
+               buf_discard(buf);
+               return;
+       }
+
+       node_lock(node);
+receive:
+       deferred = node->bclink.deferred_head;
+       next_in = mod(node->bclink.last_in + 1);
+       seqno = msg_seqno(msg);
+
+       if (likely(seqno == next_in)) {
+               bcl->stats.recv_info++;
+               node->bclink.last_in++;
+               bclink_set_gap(node);
+               if (unlikely(bclink_ack_allowed(seqno))) {
+                       bclink_send_ack(node);
+                       bcl->stats.sent_acks++;
+               }
+               if (likely(msg_isdata(msg))) {
+                       node_unlock(node);
+                       port_recv_mcast(buf, NULL);
+               } else if (msg_user(msg) == MSG_BUNDLER) {
+                       bcl->stats.recv_bundles++;
+                       bcl->stats.recv_bundled += msg_msgcnt(msg);
+                       node_unlock(node);
+                       link_recv_bundle(buf);
+               } else if (msg_user(msg) == MSG_FRAGMENTER) {
+                       bcl->stats.recv_fragments++;
+                       if (link_recv_fragment(&node->bclink.defragm,
+                                              &buf, &msg))
+                               bcl->stats.recv_fragmented++;
+                       node_unlock(node);
+                       net_route_msg(buf);
+               } else {
+                       node_unlock(node);
+                       net_route_msg(buf);
+               }
+               if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
+                       node_lock(node);
+                       buf = deferred;
+                       msg = buf_msg(buf);
+                       node->bclink.deferred_head = deferred->next;
+                       goto receive;
+               }
+               return;
+       } else if (less(next_in, seqno)) {
+               u32 gap_after = node->bclink.gap_after;
+               u32 gap_to = node->bclink.gap_to;
+
+               if (link_defer_pkt(&node->bclink.deferred_head,
+                                  &node->bclink.deferred_tail,
+                                  buf)) {
+                       node->bclink.nack_sync++;
+                       bcl->stats.deferred_recv++;
+                       if (seqno == mod(gap_after + 1))
+                               node->bclink.gap_after = seqno;
+                       else if (less(gap_after, seqno) && less(seqno, gap_to))
+                               node->bclink.gap_to = seqno;
+               }
+               if (bclink_ack_allowed(node->bclink.nack_sync)) {
+                       if (gap_to != gap_after)
+                               bclink_send_nack(node);
+                       bclink_set_gap(node);
+               }
+       } else {
+               bcl->stats.duplicates++;
+               buf_discard(buf);
+       }
+       node_unlock(node);
+}
+
+u32 bclink_get_last_sent(void)
+{
+       u32 last_sent = mod(bcl->next_out_no - 1);
+
+       if (bcl->next_out)
+               last_sent = mod(buf_seqno(bcl->next_out) - 1);
+       return last_sent;
+}
+
+u32 bclink_acks_missing(struct node *n_ptr)
+{
+       return (n_ptr->bclink.supported &&
+               (bclink_get_last_sent() != n_ptr->bclink.acked));
+}
+
+
+/**
+ * bcbearer_send - send a packet through the broadcast pseudo-bearer
+ * 
+ * Send through as many bearers as necessary to reach all nodes
+ * that support TIPC multicasting.
+ * 
+ * Returns 0 if packet sent successfully, non-zero if not
+ */
+
+int bcbearer_send(struct sk_buff *buf,
+                 struct tipc_bearer *unused1,
+                 struct tipc_media_addr *unused2)
+{
+       static int send_count = 0;
+
+       struct node_map remains;
+       struct node_map remains_new;
+       int bp_index;
+       int swap_time;
+
+       /* Prepare buffer for broadcasting (if first time trying to send it) */
+
+       if (likely(!msg_non_seq(buf_msg(buf)))) {
+               struct tipc_msg *msg;
+
+               assert(cluster_bcast_nodes.count != 0);
+               bcbuf_set_acks(buf, cluster_bcast_nodes.count);
+               msg = buf_msg(buf);
+               msg_set_non_seq(msg);
+               msg_set_mc_netid(msg, tipc_net_id);
+       }
+
+       /* Determine if bearer pairs should be swapped following this attempt */
+
+       if ((swap_time = (++send_count >= 10)))
+               send_count = 0;
+
+       /* Send buffer over bearers until all targets reached */
+       
+       remains = cluster_bcast_nodes;
+
+       for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
+               struct bearer *p = bcbearer->bpairs[bp_index].primary;
+               struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+
+               if (!p)
+                       break;  /* no more bearers to try */
+
+               nmap_diff(&remains, &p->nodes, &remains_new);
+               if (remains_new.count == remains.count)
+                       continue;       /* bearer pair doesn't add anything */
+
+               if (!p->publ.blocked &&
+                   !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+                       if (swap_time && s && !s->publ.blocked)
+                               goto swap;
+                       else
+                               goto update;
+               }
+
+               if (!s || s->publ.blocked ||
+                   s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
+                       continue;       /* unable to send using bearer pair */
+swap:
+               bcbearer->bpairs[bp_index].primary = s;
+               bcbearer->bpairs[bp_index].secondary = p;
+update:
+               if (remains_new.count == 0)
+                       return TIPC_OK;
+
+               remains = remains_new;
+       }
+       
+       /* Unable to reach all targets */
+
+       bcbearer->bearer.publ.blocked = 1;
+       bcl->stats.bearer_congs++;
+       return ~TIPC_OK;
+}
+
+/**
+ * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+ */
+
+void bcbearer_sort(void)
+{
+       struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+       struct bcbearer_pair *bp_curr;
+       int b_index;
+       int pri;
+
+       spin_lock_bh(&bc_lock);
+
+       /* Group bearers by priority (can assume max of two per priority) */
+
+       memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+
+       for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
+               struct bearer *b = &bearers[b_index];
+
+               if (!b->active || !b->nodes.count)
+                       continue;
+
+               if (!bp_temp[b->priority].primary)
+                       bp_temp[b->priority].primary = b;
+               else
+                       bp_temp[b->priority].secondary = b;
+       }
+
+       /* Create array of bearer pairs for broadcasting */
+
+       bp_curr = bcbearer->bpairs;
+       memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+
+       for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
+
+               if (!bp_temp[pri].primary)
+                       continue;
+
+               bp_curr->primary = bp_temp[pri].primary;
+
+               if (bp_temp[pri].secondary) {
+                       if (nmap_equal(&bp_temp[pri].primary->nodes,
+                                      &bp_temp[pri].secondary->nodes)) {
+                               bp_curr->secondary = bp_temp[pri].secondary;
+                       } else {
+                               bp_curr++;
+                               bp_curr->primary = bp_temp[pri].secondary;
+                       }
+               }
+
+               bp_curr++;
+       }
+
+       spin_unlock_bh(&bc_lock);
+}
+
+/**
+ * bcbearer_push - resolve bearer congestion
+ * 
+ * Forces bclink to push out any unsent packets, until all packets are gone
+ * or congestion reoccurs.
+ * No locks set when function called
+ */
+
+void bcbearer_push(void)
+{
+       struct bearer *b_ptr;
+
+       spin_lock_bh(&bc_lock);
+       b_ptr = &bcbearer->bearer;
+       if (b_ptr->publ.blocked) {
+               b_ptr->publ.blocked = 0;
+               bearer_lock_push(b_ptr);
+       }
+       spin_unlock_bh(&bc_lock);
+}
+
+
+int bclink_stats(char *buf, const u32 buf_size)
+{
+       struct print_buf pb;
+
+       if (!bcl)
+               return 0;
+
+       printbuf_init(&pb, buf, buf_size);
+
+       spin_lock_bh(&bc_lock);
+
+       tipc_printf(&pb, "Link <%s>\n"
+                        "  Window:%u packets\n", 
+                   bcl->name, bcl->queue_limit[0]);
+       tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+                   bcl->stats.recv_info,
+                   bcl->stats.recv_fragments,
+                   bcl->stats.recv_fragmented,
+                   bcl->stats.recv_bundles,
+                   bcl->stats.recv_bundled);
+       tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+                   bcl->stats.sent_info,
+                   bcl->stats.sent_fragments,
+                   bcl->stats.sent_fragmented, 
+                   bcl->stats.sent_bundles,
+                   bcl->stats.sent_bundled);
+       tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n", 
+                   bcl->stats.recv_nacks,
+                   bcl->stats.deferred_recv, 
+                   bcl->stats.duplicates);
+       tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n", 
+                   bcl->stats.sent_nacks, 
+                   bcl->stats.sent_acks, 
+                   bcl->stats.retransmitted);
+       tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
+                   bcl->stats.bearer_congs,
+                   bcl->stats.link_congs,
+                   bcl->stats.max_queue_sz,
+                   bcl->stats.queue_sz_counts
+                   ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
+                   : 0);
+
+       spin_unlock_bh(&bc_lock);
+       return printbuf_validate(&pb);
+}
+
+int bclink_reset_stats(void)
+{
+       if (!bcl)
+               return -ENOPROTOOPT;
+
+       spin_lock_bh(&bc_lock);
+       memset(&bcl->stats, 0, sizeof(bcl->stats));
+       spin_unlock_bh(&bc_lock);
+       return TIPC_OK;
+}
+
+int bclink_set_queue_limits(u32 limit)
+{
+       if (!bcl)
+               return -ENOPROTOOPT;
+       if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+               return -EINVAL;
+
+       spin_lock_bh(&bc_lock);
+       link_set_queue_limits(bcl, limit);
+       spin_unlock_bh(&bc_lock);
+       return TIPC_OK;
+}
+
+int bclink_init(void)
+{
+       bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
+       bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
+       if (!bcbearer || !bclink) {
+ nomem:
+               warn("Memory squeeze; Failed to create multicast link\n");
+               kfree(bcbearer);
+               bcbearer = NULL;
+               kfree(bclink);
+               bclink = NULL;
+               return -ENOMEM;
+       }
+
+       memset(bcbearer, 0, sizeof(struct bcbearer));
+       INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
+       bcbearer->bearer.media = &bcbearer->media;
+       bcbearer->media.send_msg = bcbearer_send;
+       sprintf(bcbearer->media.name, "tipc-multicast");
+
+       bcl = &bclink->link;
+       memset(bclink, 0, sizeof(struct bclink));
+       INIT_LIST_HEAD(&bcl->waiting_ports);
+       bcl->next_out_no = 1;
+       bclink->node.lock =  SPIN_LOCK_UNLOCKED;        
+       bcl->owner = &bclink->node;
+        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+       link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+       bcl->b_ptr = &bcbearer->bearer;
+       bcl->state = WORKING_WORKING;
+       sprintf(bcl->name, bc_link_name);
+
+       if (BCLINK_LOG_BUF_SIZE) {
+               char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+               if (!pb)
+                       goto nomem;
+               printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
+       }
+
+       return TIPC_OK;
+}
+
+void bclink_stop(void)
+{
+       spin_lock_bh(&bc_lock);
+       if (bcbearer) {
+               link_stop(bcl);
+               if (BCLINK_LOG_BUF_SIZE)
+                       kfree(bcl->print_buf.buf);
+               bcl = NULL;
+               kfree(bclink);
+               bclink = NULL;
+               kfree(bcbearer);
+               bcbearer = NULL;
+       }
+       spin_unlock_bh(&bc_lock);
+}
+
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644 (file)
index 0000000..cc2ede1
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * net/tipc/bcast.h: Include file for TIPC broadcast code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BCAST_H
+#define _TIPC_BCAST_H
+
+#define MAX_NODES 4096
+#define WSIZE 32
+
+/**
+ * struct node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+
+struct node_map {
+       u32 count;
+       u32 map[MAX_NODES / WSIZE];
+};
+
+
+#define PLSIZE 32
+
+/**
+ * struct port_list - set of node local destination ports
+ * @count: # of ports in set (only valid for first entry in list)
+ * @next: pointer to next entry in list
+ * @ports: array of port references
+ */
+
+struct port_list {
+       int count;
+       struct port_list *next;
+       u32 ports[PLSIZE];
+};
+
+
+struct node;
+
+extern char bc_link_name[];
+
+
+/**
+ * nmap_get - determine if node exists in a node map
+ */
+
+static inline int nmap_get(struct node_map *nm_ptr, u32 node)
+{
+       int n = tipc_node(node);
+       int w = n / WSIZE;
+       int b = n % WSIZE;
+
+       return nm_ptr->map[w] & (1 << b);
+}
+
+/**
+ * nmap_add - add a node to a node map
+ */
+
+static inline void nmap_add(struct node_map *nm_ptr, u32 node)
+{
+       int n = tipc_node(node);
+       int w = n / WSIZE;
+       u32 mask = (1 << (n % WSIZE));
+
+       if ((nm_ptr->map[w] & mask) == 0) {
+               nm_ptr->count++;
+               nm_ptr->map[w] |= mask;
+       }
+}
+
+/** 
+ * nmap_remove - remove a node from a node map
+ */
+
+static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
+{
+       int n = tipc_node(node);
+       int w = n / WSIZE;
+       u32 mask = (1 << (n % WSIZE));
+
+       if ((nm_ptr->map[w] & mask) != 0) {
+               nm_ptr->map[w] &= ~mask;
+               nm_ptr->count--;
+       }
+}
+
+/**
+ * nmap_equal - test for equality of node maps
+ */
+
+static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
+{
+       return !memcmp(nm_a, nm_b, sizeof(*nm_a));
+}
+
+/**
+ * nmap_diff - find differences between node maps
+ * @nm_a: input node map A
+ * @nm_b: input node map B
+ * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
+ */
+
+static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
+                            struct node_map *nm_diff)
+{
+       int stop = sizeof(nm_a->map) / sizeof(u32);
+       int w;
+       int b;
+       u32 map;
+
+       memset(nm_diff, 0, sizeof(*nm_diff));
+       for (w = 0; w < stop; w++) {
+               map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
+               nm_diff->map[w] = map;
+               if (map != 0) {
+                       for (b = 0 ; b < WSIZE; b++) {
+                               if (map & (1 << b))
+                                       nm_diff->count++;
+                       }
+               }
+       }
+}
+
+/**
+ * port_list_add - add a port to a port list, ensuring no duplicates
+ */
+
+static inline void port_list_add(struct port_list *pl_ptr, u32 port)
+{
+       struct port_list *item = pl_ptr;
+       int i;
+       int item_sz = PLSIZE;
+       int cnt = pl_ptr->count;
+
+       for (; ; cnt -= item_sz, item = item->next) {
+               if (cnt < PLSIZE)
+                       item_sz = cnt;
+               for (i = 0; i < item_sz; i++)
+                       if (item->ports[i] == port)
+                               return;
+               if (i < PLSIZE) {
+                       item->ports[i] = port;
+                       pl_ptr->count++;
+                       return;
+               }
+               if (!item->next) {
+                       item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
+                       if (!item->next) {
+                               warn("Memory squeeze: multicast destination port list is incomplete\n");
+                               return;
+                       }
+                       item->next->next = NULL;
+               }
+       }
+}
+
+/**
+ * port_list_free - free dynamically created entries in port_list chain
+ * 
+ * Note: First item is on stack, so it doesn't need to be released
+ */
+
+static inline void port_list_free(struct port_list *pl_ptr)
+{
+       struct port_list *item;
+       struct port_list *next;
+
+       for (item = pl_ptr->next; item; item = next) {
+               next = item->next;
+               kfree(item);
+       }
+}
+
+
+int  bclink_init(void);
+void bclink_stop(void);
+void bclink_acknowledge(struct node *n_ptr, u32 acked);
+int  bclink_send_msg(struct sk_buff *buf);
+void bclink_recv_pkt(struct sk_buff *buf);
+u32  bclink_get_last_sent(void);
+u32  bclink_acks_missing(struct node *n_ptr);
+void bclink_check_gap(struct node *n_ptr, u32 seqno);
+int  bclink_stats(char *stats_buf, const u32 buf_size);
+int  bclink_reset_stats(void);
+int  bclink_set_queue_limits(u32 limit);
+void bcbearer_sort(void);
+void bcbearer_push(void);
+
+#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644 (file)
index 0000000..c19465c
--- /dev/null
@@ -0,0 +1,689 @@
+/*
+ * net/tipc/bearer.c: TIPC bearer code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "link.h"
+#include "port.h"
+#include "discover.h"
+#include "bcast.h"
+
+#define MAX_ADDR_STR 32
+
+static struct media *media_list = 0;
+static u32 media_count = 0;
+
+struct bearer *bearers = 0;
+
+/**
+ * media_name_valid - validate media name
+ * 
+ * Returns 1 if media name is valid, otherwise 0.
+ */
+
+static int media_name_valid(const char *name)
+{
+       u32 len;
+
+       len = strlen(name);
+       if ((len + 1) > TIPC_MAX_MEDIA_NAME)
+               return 0;
+       return (strspn(name, tipc_alphabet) == len);
+}
+
+/**
+ * media_find - locates specified media object by name
+ */
+
+static struct media *media_find(const char *name)
+{
+       struct media *m_ptr;
+       u32 i;
+
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               if (!strcmp(m_ptr->name, name))
+                       return m_ptr;
+       }
+       return 0;
+}
+
+/**
+ * tipc_register_media - register a media type
+ * 
+ * Bearers for this media type must be activated separately at a later stage.
+ */
+
+int  tipc_register_media(u32 media_type,
+                        char *name, 
+                        int (*enable)(struct tipc_bearer *), 
+                        void (*disable)(struct tipc_bearer *), 
+                        int (*send_msg)(struct sk_buff *, 
+                                        struct tipc_bearer *,
+                                        struct tipc_media_addr *), 
+                        char *(*addr2str)(struct tipc_media_addr *a,
+                                          char *str_buf, int str_size),
+                        struct tipc_media_addr *bcast_addr,
+                        const u32 bearer_priority,
+                        const u32 link_tolerance,  /* [ms] */
+                        const u32 send_window_limit)
+{
+       struct media *m_ptr;
+       u32 media_id;
+       u32 i;
+       int res = -EINVAL;
+
+       write_lock_bh(&net_lock);
+       if (!media_list)
+               goto exit;
+
+       if (!media_name_valid(name)) {
+               warn("Media registration error: illegal name <%s>\n", name);
+               goto exit;
+       }
+       if (!bcast_addr) {
+               warn("Media registration error: no broadcast address supplied\n");
+               goto exit;
+       }
+       if (bearer_priority >= TIPC_NUM_LINK_PRI) {
+               warn("Media registration error: priority %u\n", bearer_priority);
+               goto exit;
+       }
+       if ((link_tolerance < TIPC_MIN_LINK_TOL) || 
+           (link_tolerance > TIPC_MAX_LINK_TOL)) {
+               warn("Media registration error: tolerance %u\n", link_tolerance);
+               goto exit;
+       }
+
+       media_id = media_count++;
+       if (media_id >= MAX_MEDIA) {
+               warn("Attempt to register more than %u media\n", MAX_MEDIA);
+               media_count--;
+               goto exit;
+       }
+       for (i = 0; i < media_id; i++) {
+               if (media_list[i].type_id == media_type) {
+                       warn("Attempt to register second media with type %u\n", 
+                            media_type);
+                       media_count--;
+                       goto exit;
+               }
+               if (!strcmp(name, media_list[i].name)) {
+                       warn("Attempt to re-register media name <%s>\n", name);
+                       media_count--;
+                       goto exit;
+               }
+       }
+
+       m_ptr = &media_list[media_id];
+       m_ptr->type_id = media_type;
+       m_ptr->send_msg = send_msg;
+       m_ptr->enable_bearer = enable;
+       m_ptr->disable_bearer = disable;
+       m_ptr->addr2str = addr2str;
+       memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
+       m_ptr->bcast = 1;
+       strcpy(m_ptr->name, name);
+       m_ptr->priority = bearer_priority;
+       m_ptr->tolerance = link_tolerance;
+       m_ptr->window = send_window_limit;
+       dbg("Media <%s> registered\n", name);
+       res = 0;
+exit:
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+/**
+ * media_addr_printf - record media address in print buffer
+ */
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+{
+       struct media *m_ptr;
+       u32 media_type;
+       u32 i;
+
+       media_type = ntohl(a->type);
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               if (m_ptr->type_id == media_type)
+                       break;
+       }
+
+       if ((i < media_count) && (m_ptr->addr2str != NULL)) {
+               char addr_str[MAX_ADDR_STR];
+
+               tipc_printf(pb, "%s(%s) ", m_ptr->name, 
+                           m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
+       } else {
+               unchar *addr = (unchar *)&a->dev_addr;
+
+               tipc_printf(pb, "UNKNOWN(%u):", media_type);
+               for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
+                       tipc_printf(pb, "%02x ", addr[i]);
+               }
+       }
+}
+
+/**
+ * media_get_names - record names of registered media in buffer
+ */
+
+struct sk_buff *media_get_names(void)
+{
+       struct sk_buff *buf;
+       struct media *m_ptr;
+       int i;
+
+       buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
+       if (!buf)
+               return NULL;
+
+       read_lock_bh(&net_lock);
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 
+                              strlen(m_ptr->name) + 1);
+       }
+       read_unlock_bh(&net_lock);
+       return buf;
+}
+
+/**
+ * bearer_name_validate - validate & (optionally) deconstruct bearer name
+ * @name - ptr to bearer name string
+ * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ * 
+ * Returns 1 if bearer name is valid, otherwise 0.
+ */
+
+static int bearer_name_validate(const char *name, 
+                               struct bearer_name *name_parts)
+{
+       char name_copy[TIPC_MAX_BEARER_NAME];
+       char *media_name;
+       char *if_name;
+       u32 media_len;
+       u32 if_len;
+
+       /* copy bearer name & ensure length is OK */
+
+       name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
+       /* need above in case non-Posix strncpy() doesn't pad with nulls */
+       strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
+       if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+               return 0;
+
+       /* ensure all component parts of bearer name are present */
+
+       media_name = name_copy;
+       if ((if_name = strchr(media_name, ':')) == NULL)
+               return 0;
+       *(if_name++) = 0;
+       media_len = if_name - media_name;
+       if_len = strlen(if_name) + 1;
+
+       /* validate component parts of bearer name */
+
+       if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 
+           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 
+           (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
+           (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+               return 0;
+
+       /* return bearer name components, if necessary */
+
+       if (name_parts) {
+               strcpy(name_parts->media_name, media_name);
+               strcpy(name_parts->if_name, if_name);
+       }
+       return 1;
+}
+
+/**
+ * bearer_find - locates bearer object with matching bearer name
+ */
+
+static struct bearer *bearer_find(const char *name)
+{
+       struct bearer *b_ptr;
+       u32 i;
+
+       for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+               if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+                       return b_ptr;
+       }
+       return 0;
+}
+
+/**
+ * bearer_find - locates bearer object with matching interface name
+ */
+
+struct bearer *bearer_find_interface(const char *if_name)
+{
+       struct bearer *b_ptr;
+       char *b_if_name;
+       u32 i;
+
+       for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+               if (!b_ptr->active)
+                       continue;
+               b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+               if (!strcmp(b_if_name, if_name))
+                       return b_ptr;
+       }
+       return 0;
+}
+
+/**
+ * bearer_get_names - record names of bearers in buffer
+ */
+
+struct sk_buff *bearer_get_names(void)
+{
+       struct sk_buff *buf;
+       struct media *m_ptr;
+       struct bearer *b_ptr;
+       int i, j;
+
+       buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
+       if (!buf)
+               return NULL;
+
+       read_lock_bh(&net_lock);
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               for (j = 0; j < MAX_BEARERS; j++) {
+                       b_ptr = &bearers[j];
+                       if (b_ptr->active && (b_ptr->media == m_ptr)) {
+                               cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 
+                                              b_ptr->publ.name, 
+                                              strlen(b_ptr->publ.name) + 1);
+                       }
+               }
+       }
+       read_unlock_bh(&net_lock);
+       return buf;
+}
+
+void bearer_add_dest(struct bearer *b_ptr, u32 dest)
+{
+       nmap_add(&b_ptr->nodes, dest);
+       disc_update_link_req(b_ptr->link_req);
+       bcbearer_sort();
+}
+
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+{
+       nmap_remove(&b_ptr->nodes, dest);
+       disc_update_link_req(b_ptr->link_req);
+       bcbearer_sort();
+}
+
+/*
+ * bearer_push(): Resolve bearer congestion. Force the waiting
+ * links to push out their unsent packets, one packet per link
+ * per iteration, until all packets are gone or congestion reoccurs.
+ * 'net_lock' is read_locked when this function is called
+ * bearer.lock must be taken before calling
+ * Returns binary true(1) ore false(0)
+ */
+static int bearer_push(struct bearer *b_ptr)
+{
+       u32 res = TIPC_OK;
+       struct link *ln, *tln;
+
+       if (b_ptr->publ.blocked)
+               return 0;
+
+       while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
+               list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
+                       res = link_push_packet(ln);
+                       if (res == PUSH_FAILED)
+                               break;
+                       if (res == PUSH_FINISHED)
+                               list_move_tail(&ln->link_list, &b_ptr->links);
+               }
+       }
+       return list_empty(&b_ptr->cong_links);
+}
+
+void bearer_lock_push(struct bearer *b_ptr)
+{
+       int res;
+
+       spin_lock_bh(&b_ptr->publ.lock);
+       res = bearer_push(b_ptr);
+       spin_unlock_bh(&b_ptr->publ.lock);
+       if (res)
+               bcbearer_push();
+}
+
+
+/*
+ * Interrupt enabling new requests after bearer congestion or blocking:    
+ * See bearer_send().   
+ */
+void tipc_continue(struct tipc_bearer *tb_ptr)
+{
+       struct bearer *b_ptr = (struct bearer *)tb_ptr;
+
+       spin_lock_bh(&b_ptr->publ.lock);
+       b_ptr->continue_count++;
+       if (!list_empty(&b_ptr->cong_links))
+               k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr);
+       b_ptr->publ.blocked = 0;
+       spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer 
+ * has been deblocked by 'continue()'. This method is called 
+ * when somebody tries to send a message via this link while 
+ * the bearer is congested. 'net_lock' is in read_lock here
+ * bearer.lock is busy
+ */
+
+static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+{
+       list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer 
+ * has been deblocked by 'continue()'. This method is called 
+ * when somebody tries to send a message via this link while 
+ * the bearer is congested. 'net_lock' is in read_lock here,
+ * bearer.lock is free
+ */
+
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+{
+       spin_lock_bh(&b_ptr->publ.lock);
+       bearer_schedule_unlocked(b_ptr, l_ptr);
+       spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+
+/*
+ * bearer_resolve_congestion(): Check if there is bearer congestion,
+ * and if there is, try to resolve it before returning.
+ * 'net_lock' is read_locked when this function is called
+ */
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+{
+       int res = 1;
+
+       if (list_empty(&b_ptr->cong_links))
+               return 1;
+       spin_lock_bh(&b_ptr->publ.lock);
+       if (!bearer_push(b_ptr)) {
+               bearer_schedule_unlocked(b_ptr, l_ptr);
+               res = 0;
+       }
+       spin_unlock_bh(&b_ptr->publ.lock);
+       return res;
+}
+
+
+/**
+ * tipc_enable_bearer - enable bearer with the given name
+ */              
+
+int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
+{
+       struct bearer *b_ptr;
+       struct media *m_ptr;
+       struct bearer_name b_name;
+       char addr_string[16];
+       u32 bearer_id;
+       u32 with_this_prio;
+       u32 i;
+       int res = -EINVAL;
+
+       if (tipc_mode != TIPC_NET_MODE)
+               return -ENOPROTOOPT;
+       if (!bearer_name_validate(name, &b_name) ||
+           !addr_domain_valid(bcast_scope) ||
+           !in_scope(bcast_scope, tipc_own_addr) ||
+           (priority > TIPC_NUM_LINK_PRI))
+               return -EINVAL;
+
+       write_lock_bh(&net_lock);
+       if (!bearers)
+               goto failed;
+
+       m_ptr = media_find(b_name.media_name);
+       if (!m_ptr) {
+               warn("No media <%s>\n", b_name.media_name);
+               goto failed;
+       }
+       if (priority == TIPC_NUM_LINK_PRI)
+               priority = m_ptr->priority;
+
+restart:
+       bearer_id = MAX_BEARERS;
+       with_this_prio = 1;
+       for (i = MAX_BEARERS; i-- != 0; ) {
+               if (!bearers[i].active) {
+                       bearer_id = i;
+                       continue;
+               }
+               if (!strcmp(name, bearers[i].publ.name)) {
+                       warn("Bearer <%s> already enabled\n", name);
+                       goto failed;
+               }
+               if ((bearers[i].priority == priority) &&
+                   (++with_this_prio > 2)) {
+                       if (priority-- == 0) {
+                               warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
+                                    name, priority + 1, priority);
+                               goto failed;
+                       }
+                       warn("Third bearer <%s> with priority %u, lowering to %u\n",
+                            name, priority + 1, priority);
+                       goto restart;
+               }
+       }
+       if (bearer_id >= MAX_BEARERS) {
+               warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
+               goto failed;
+       }
+
+       b_ptr = &bearers[bearer_id];
+       memset(b_ptr, 0, sizeof(struct bearer));
+
+       strcpy(b_ptr->publ.name, name);
+       res = m_ptr->enable_bearer(&b_ptr->publ);
+       if (res) {
+               warn("Failed to enable bearer <%s>\n", name);
+               goto failed;
+       }
+
+       b_ptr->identity = bearer_id;
+       b_ptr->media = m_ptr;
+       b_ptr->net_plane = bearer_id + 'A';
+       b_ptr->active = 1;
+       b_ptr->detect_scope = bcast_scope;
+       b_ptr->priority = priority;
+       INIT_LIST_HEAD(&b_ptr->cong_links);
+       INIT_LIST_HEAD(&b_ptr->links);
+       if (m_ptr->bcast) {
+               b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
+                                                    bcast_scope, 2);
+       }
+       b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
+       write_unlock_bh(&net_lock);
+       info("Enabled bearer <%s>, discovery domain %s\n",
+            name, addr_string_fill(addr_string, bcast_scope));
+       return 0;
+failed:
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+/**
+ * tipc_block_bearer(): Block the bearer with the given name,
+ *                      and reset all its links
+ */
+
+int tipc_block_bearer(const char *name)
+{
+       struct bearer *b_ptr = 0;
+       struct link *l_ptr;
+       struct link *temp_l_ptr;
+
+       if (tipc_mode != TIPC_NET_MODE)
+               return -ENOPROTOOPT;
+
+       read_lock_bh(&net_lock);
+       b_ptr = bearer_find(name);
+       if (!b_ptr) {
+               warn("Attempt to block unknown bearer <%s>\n", name);
+               read_unlock_bh(&net_lock);
+               return -EINVAL;
+       }
+
+       spin_lock_bh(&b_ptr->publ.lock);
+       b_ptr->publ.blocked = 1;
+       list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+               struct node *n_ptr = l_ptr->owner;
+
+               spin_lock_bh(&n_ptr->lock);
+               link_reset(l_ptr);
+               spin_unlock_bh(&n_ptr->lock);
+       }
+       spin_unlock_bh(&b_ptr->publ.lock);
+       read_unlock_bh(&net_lock);
+       info("Blocked bearer <%s>\n", name);
+       return TIPC_OK;
+}
+
+/**
+ * bearer_disable -
+ * 
+ * Note: This routine assumes caller holds net_lock.
+ */
+
+static int bearer_disable(const char *name)
+{
+       struct bearer *b_ptr;
+       struct link *l_ptr;
+       struct link *temp_l_ptr;
+
+       if (tipc_mode != TIPC_NET_MODE)
+               return -ENOPROTOOPT;
+
+       b_ptr = bearer_find(name);
+       if (!b_ptr) {
+               warn("Attempt to disable unknown bearer <%s>\n", name);
+               return -EINVAL;
+       }
+
+       disc_stop_link_req(b_ptr->link_req);
+       spin_lock_bh(&b_ptr->publ.lock);
+       b_ptr->link_req = NULL;
+       b_ptr->publ.blocked = 1;
+       if (b_ptr->media->disable_bearer) {
+               spin_unlock_bh(&b_ptr->publ.lock);
+               write_unlock_bh(&net_lock);
+               b_ptr->media->disable_bearer(&b_ptr->publ);
+               write_lock_bh(&net_lock);
+               spin_lock_bh(&b_ptr->publ.lock);
+       }
+       list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+               link_delete(l_ptr);
+       }
+       spin_unlock_bh(&b_ptr->publ.lock);
+       info("Disabled bearer <%s>\n", name);
+       memset(b_ptr, 0, sizeof(struct bearer));
+       return TIPC_OK;
+}
+
+int tipc_disable_bearer(const char *name)
+{
+       int res;
+
+       write_lock_bh(&net_lock);
+       res = bearer_disable(name);
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+
+
+int bearer_init(void)
+{
+       int res;
+
+       write_lock_bh(&net_lock);
+       bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
+       media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
+       if (bearers && media_list) {
+               memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer));
+               memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
+               res = TIPC_OK;
+       } else {
+               kfree(bearers);
+               kfree(media_list);
+               bearers = 0;
+               media_list = 0;
+               res = -ENOMEM;
+       }
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+void bearer_stop(void)
+{
+       u32 i;
+
+       if (!bearers)
+               return;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (bearers[i].active)
+                       bearers[i].publ.blocked = 1;
+       }
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (bearers[i].active)
+                       bearer_disable(bearers[i].publ.name);
+       }
+       kfree(bearers);
+       kfree(media_list);
+       bearers = 0;
+       media_list = 0;
+       media_count = 0;
+}
+
+
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644 (file)
index 0000000..02a16f8
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * net/tipc/bearer.h: Include file for TIPC bearer code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BEARER_H
+#define _TIPC_BEARER_H
+
+#include <net/tipc/tipc_bearer.h>
+#include "bcast.h"
+
+#define MAX_BEARERS 8
+#define MAX_MEDIA 4
+
+
+/**
+ * struct media - TIPC media information available to internal users
+ * @send_msg: routine which handles buffer transmission
+ * @enable_bearer: routine which enables a bearer
+ * @disable_bearer: routine which disables a bearer
+ * @addr2str: routine which converts bearer's address to string form
+ * @bcast_addr: media address used in broadcasting
+ * @bcast: non-zero if media supports broadcasting [currently mandatory]
+ * @priority: default link (and bearer) priority
+ * @tolerance: default time (in ms) before declaring link failure
+ * @window: default window (in packets) before declaring link congestion
+ * @type_id: TIPC media identifier [defined in tipc_bearer.h]
+ * @name: media name
+ */
+struct media {
+       int (*send_msg)(struct sk_buff *buf, 
+                       struct tipc_bearer *b_ptr,
+                       struct tipc_media_addr *dest);
+       int (*enable_bearer)(struct tipc_bearer *b_ptr);
+       void (*disable_bearer)(struct tipc_bearer *b_ptr);
+       char *(*addr2str)(struct tipc_media_addr *a, 
+                         char *str_buf, int str_size);
+       struct tipc_media_addr bcast_addr;
+       int bcast;
+       u32 priority;
+       u32 tolerance;
+       u32 window;
+       u32 type_id;
+       char name[TIPC_MAX_MEDIA_NAME];
+};
+
+/**
+ * struct bearer - TIPC bearer information available to internal users
+ * @publ: bearer information available to privileged users
+ * @media: ptr to media structure associated with bearer
+ * @priority: default link priority for bearer
+ * @detect_scope: network address mask used during automatic link creation
+ * @identity: array index of this bearer within TIPC bearer array
+ * @link_req: ptr to (optional) structure making periodic link setup requests
+ * @links: list of non-congested links associated with bearer
+ * @cong_links: list of congested links associated with bearer
+ * @continue_count: # of times bearer has resumed after congestion or blocking
+ * @active: non-zero if bearer structure is represents a bearer
+ * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @nodes: indicates which nodes in cluster can be reached through bearer
+ */
+struct bearer {
+       struct tipc_bearer publ;
+       struct media *media;
+       u32 priority;
+       u32 detect_scope;
+       u32 identity;
+       struct link_req *link_req;
+       struct list_head links;
+       struct list_head cong_links;
+       u32 continue_count;
+       int active;
+       char net_plane;
+       struct node_map nodes;
+};
+
+struct bearer_name {
+       char media_name[TIPC_MAX_MEDIA_NAME];
+       char if_name[TIPC_MAX_IF_NAME];
+};
+
+struct link;
+
+extern struct bearer *bearers;
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+struct sk_buff *media_get_names(void);
+
+struct sk_buff *bearer_get_names(void);
+void bearer_add_dest(struct bearer *b_ptr, u32 dest);
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest);
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
+struct bearer *bearer_find_interface(const char *if_name);
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
+int bearer_init(void);
+void bearer_stop(void);
+int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr,
+                    struct tipc_media_addr *dest);
+void bearer_lock_push(struct bearer *b_ptr);
+
+
+/**
+ * bearer_send- sends buffer to destination over bearer 
+ * 
+ * Returns true (1) if successful, or false (0) if unable to send
+ * 
+ * IMPORTANT:
+ * The media send routine must not alter the buffer being passed in
+ * as it may be needed for later retransmission!
+ * 
+ * If the media send routine returns a non-zero value (indicating that 
+ * it was unable to send the buffer), it must:
+ *   1) mark the bearer as blocked,
+ *   2) call tipc_continue() once the bearer is able to send again.
+ * Media types that are unable to meet these two critera must ensure their
+ * send routine always returns success -- even if the buffer was not sent --
+ * and let TIPC's link code deal with the undelivered message. 
+ */
+
+static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+                             struct tipc_media_addr *dest)
+{
+       return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+}
+
+/**
+ * bearer_congested - determines if bearer is currently congested
+ */
+
+static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+{
+       if (unlikely(b_ptr->publ.blocked))
+               return 1;
+       if (likely(list_empty(&b_ptr->cong_links)))
+               return 0;
+       return !bearer_resolve_congestion(b_ptr, l_ptr);
+}
+
+#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
new file mode 100644 (file)
index 0000000..d7d0f5f
--- /dev/null
@@ -0,0 +1,573 @@
+/*
+ * net/tipc/cluster.c: TIPC cluster management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "node.h"
+#include "net.h"
+#include "msg.h"
+#include "bearer.h"
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+                      u32 lower, u32 upper);
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
+
+struct node **local_nodes = 0;
+struct node_map cluster_bcast_nodes = {0,{0,}};
+u32 highest_allowed_slave = 0;
+
+struct cluster *cluster_create(u32 addr)
+{
+       struct _zone *z_ptr;
+       struct cluster *c_ptr;
+       int max_nodes; 
+       int alloc;
+
+       c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
+       if (c_ptr == NULL)
+               return 0;
+       memset(c_ptr, 0, sizeof(*c_ptr));
+
+       c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+       if (in_own_cluster(addr))
+               max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+       else
+               max_nodes = tipc_max_nodes + 1;
+       alloc = sizeof(void *) * (max_nodes + 1);
+       c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
+       if (c_ptr->nodes == NULL) {
+               kfree(c_ptr);
+               return 0;
+       }
+       memset(c_ptr->nodes, 0, alloc);  
+       if (in_own_cluster(addr))
+               local_nodes = c_ptr->nodes;
+       c_ptr->highest_slave = LOWEST_SLAVE - 1;
+       c_ptr->highest_node = 0;
+       
+       z_ptr = zone_find(tipc_zone(addr));
+       if (z_ptr == NULL) {
+               z_ptr = zone_create(addr);
+       }
+       if (z_ptr != NULL) {
+               zone_attach_cluster(z_ptr, c_ptr);
+               c_ptr->owner = z_ptr;
+       }
+       else {
+               kfree(c_ptr);
+               c_ptr = 0;
+       }
+
+       return c_ptr;
+}
+
+void cluster_delete(struct cluster *c_ptr)
+{
+       u32 n_num;
+
+       if (!c_ptr)
+               return;
+       for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
+               node_delete(c_ptr->nodes[n_num]);
+       }
+       for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
+               node_delete(c_ptr->nodes[n_num]);
+       }
+       kfree(c_ptr->nodes);
+       kfree(c_ptr);
+}
+
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
+{
+       struct node *n_ptr;
+       u32 n_num = tipc_node(addr) + 1;
+
+       if (!c_ptr)
+               return addr;
+       for (; n_num <= c_ptr->highest_node; n_num++) {
+               n_ptr = c_ptr->nodes[n_num];
+               if (n_ptr && node_has_active_links(n_ptr))
+                       return n_ptr->addr;
+       }
+       for (n_num = 1; n_num < tipc_node(addr); n_num++) {
+               n_ptr = c_ptr->nodes[n_num];
+               if (n_ptr && node_has_active_links(n_ptr))
+                       return n_ptr->addr;
+       }
+       return 0;
+}
+
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
+{
+       u32 n_num = tipc_node(n_ptr->addr);
+       u32 max_n_num = tipc_max_nodes;
+
+       if (in_own_cluster(n_ptr->addr))
+               max_n_num = highest_allowed_slave;
+       assert(n_num > 0);
+       assert(n_num <= max_n_num);
+       assert(c_ptr->nodes[n_num] == 0);
+       c_ptr->nodes[n_num] = n_ptr;
+       if (n_num > c_ptr->highest_node)
+               c_ptr->highest_node = n_num;
+}
+
+/**
+ * cluster_select_router - select router to a cluster
+ * 
+ * Uses deterministic and fair algorithm.
+ */
+
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
+{
+       u32 n_num;
+       u32 ulim = c_ptr->highest_node;
+       u32 mask;
+       u32 tstart;
+
+       assert(!in_own_cluster(c_ptr->addr));
+       if (!ulim)
+               return 0;
+
+       /* Start entry must be random */
+       mask = tipc_max_nodes;
+       while (mask > ulim)
+               mask >>= 1;
+       tstart = ref & mask;
+       n_num = tstart;
+
+       /* Lookup upwards with wrap-around */
+       do {
+               if (node_is_up(c_ptr->nodes[n_num]))
+                       break;
+       } while (++n_num <= ulim);
+       if (n_num > ulim) {
+               n_num = 1;
+               do {
+                       if (node_is_up(c_ptr->nodes[n_num]))
+                               break;
+               } while (++n_num < tstart);
+               if (n_num == tstart)
+                       return 0;
+       }
+       assert(n_num <= ulim);
+       return node_select_router(c_ptr->nodes[n_num], ref);
+}
+
+/**
+ * cluster_select_node - select destination node within a remote cluster
+ * 
+ * Uses deterministic and fair algorithm.
+ */
+
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
+{
+       u32 n_num;
+       u32 mask = tipc_max_nodes;
+       u32 start_entry;
+
+       assert(!in_own_cluster(c_ptr->addr));
+       if (!c_ptr->highest_node)
+               return 0;
+
+       /* Start entry must be random */
+       while (mask > c_ptr->highest_node) {
+               mask >>= 1;
+       }
+       start_entry = (selector & mask) ? selector & mask : 1u;
+       assert(start_entry <= c_ptr->highest_node);
+
+       /* Lookup upwards with wrap-around */
+       for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
+               if (node_has_active_links(c_ptr->nodes[n_num]))
+                       return c_ptr->nodes[n_num];
+       }
+       for (n_num = 1; n_num < start_entry; n_num++) {
+               if (node_has_active_links(c_ptr->nodes[n_num]))
+                       return c_ptr->nodes[n_num];
+       }
+       return 0;
+}
+
+/*
+ *    Routing table management: See description in node.c
+ */
+
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
+{
+       u32 size = INT_H_SIZE + data_size;
+       struct sk_buff *buf = buf_acquire(size);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               memset((char *)msg, 0, size);
+               msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
+       }
+       return buf;
+}
+
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
+                            u32 lower, u32 upper)
+{
+       struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, dest);
+               msg_set_type(msg, ROUTE_ADDITION);
+               cluster_multicast(c_ptr, buf, lower, upper);
+       } else {
+               warn("Memory squeeze: broadcast of new route failed\n");
+       }
+}
+
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
+                             u32 lower, u32 upper)
+{
+       struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, dest);
+               msg_set_type(msg, ROUTE_REMOVAL);
+               cluster_multicast(c_ptr, buf, lower, upper);
+       } else {
+               warn("Memory squeeze: broadcast of lost route failed\n");
+       }
+}
+
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       u32 highest = c_ptr->highest_slave;
+       u32 n_num;
+       int send = 0;
+
+       assert(!is_slave(dest));
+       assert(in_own_cluster(dest));
+       assert(in_own_cluster(c_ptr->addr));
+       if (highest <= LOWEST_SLAVE)
+               return;
+       buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
+                                         c_ptr->addr);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, c_ptr->addr);
+               msg_set_type(msg, SLAVE_ROUTING_TABLE);
+               for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
+                       if (c_ptr->nodes[n_num] && 
+                           node_has_active_links(c_ptr->nodes[n_num])) {
+                               send = 1;
+                               msg_set_dataoctet(msg, n_num);
+                       }
+               }
+               if (send)
+                       link_send(buf, dest, dest);
+               else
+                       buf_discard(buf);
+       } else {
+               warn("Memory squeeze: broadcast of lost route failed\n");
+       }
+}
+
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       u32 highest = c_ptr->highest_node;
+       u32 n_num;
+       int send = 0;
+
+       if (in_own_cluster(c_ptr->addr))
+               return;
+       assert(!is_slave(dest));
+       assert(in_own_cluster(dest));
+       highest = c_ptr->highest_node;
+       buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, c_ptr->addr);
+               msg_set_type(msg, EXT_ROUTING_TABLE);
+               for (n_num = 1; n_num <= highest; n_num++) {
+                       if (c_ptr->nodes[n_num] && 
+                           node_has_active_links(c_ptr->nodes[n_num])) {
+                               send = 1;
+                               msg_set_dataoctet(msg, n_num);
+                       }
+               }
+               if (send)
+                       link_send(buf, dest, dest);
+               else
+                       buf_discard(buf);
+       } else {
+               warn("Memory squeeze: broadcast of external route failed\n");
+       }
+}
+
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       u32 highest = c_ptr->highest_node;
+       u32 n_num;
+       int send = 0;
+
+       assert(is_slave(dest));
+       assert(in_own_cluster(c_ptr->addr));
+       buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, c_ptr->addr);
+               msg_set_type(msg, LOCAL_ROUTING_TABLE);
+               for (n_num = 1; n_num <= highest; n_num++) {
+                       if (c_ptr->nodes[n_num] && 
+                           node_has_active_links(c_ptr->nodes[n_num])) {
+                               send = 1;
+                               msg_set_dataoctet(msg, n_num);
+                       }
+               }
+               if (send)
+                       link_send(buf, dest, dest);
+               else
+                       buf_discard(buf);
+       } else {
+               warn("Memory squeeze: broadcast of local route failed\n");
+       }
+}
+
+void cluster_recv_routing_table(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct cluster *c_ptr;
+       struct node *n_ptr;
+       unchar *node_table;
+       u32 table_size;
+       u32 router;
+       u32 rem_node = msg_remote_node(msg);
+       u32 z_num;
+       u32 c_num;
+       u32 n_num;
+
+       c_ptr = cluster_find(rem_node);
+       if (!c_ptr) {
+               c_ptr = cluster_create(rem_node);
+               if (!c_ptr) {
+                       buf_discard(buf);
+                       return;
+               }
+       }
+
+       node_table = buf->data + msg_hdr_sz(msg);
+       table_size = msg_size(msg) - msg_hdr_sz(msg);
+       router = msg_prevnode(msg);
+       z_num = tipc_zone(rem_node);
+       c_num = tipc_cluster(rem_node);
+
+       switch (msg_type(msg)) {
+       case LOCAL_ROUTING_TABLE:
+               assert(is_slave(tipc_own_addr));
+       case EXT_ROUTING_TABLE:
+               for (n_num = 1; n_num < table_size; n_num++) {
+                       if (node_table[n_num]) {
+                               u32 addr = tipc_addr(z_num, c_num, n_num);
+                               n_ptr = c_ptr->nodes[n_num];
+                               if (!n_ptr) {
+                                       n_ptr = node_create(addr);
+                               }
+                               if (n_ptr)
+                                       node_add_router(n_ptr, router);
+                       }
+               }
+               break;
+       case SLAVE_ROUTING_TABLE:
+               assert(!is_slave(tipc_own_addr));
+               assert(in_own_cluster(c_ptr->addr));
+               for (n_num = 1; n_num < table_size; n_num++) {
+                       if (node_table[n_num]) {
+                               u32 slave_num = n_num + LOWEST_SLAVE;
+                               u32 addr = tipc_addr(z_num, c_num, slave_num);
+                               n_ptr = c_ptr->nodes[slave_num];
+                               if (!n_ptr) {
+                                       n_ptr = node_create(addr);
+                               }
+                               if (n_ptr)
+                                       node_add_router(n_ptr, router);
+                       }
+               }
+               break;
+       case ROUTE_ADDITION:
+               if (!is_slave(tipc_own_addr)) {
+                       assert(!in_own_cluster(c_ptr->addr)
+                              || is_slave(rem_node));
+               } else {
+                       assert(in_own_cluster(c_ptr->addr)
+                              && !is_slave(rem_node));
+               }
+               n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+               if (!n_ptr)
+                       n_ptr = node_create(rem_node);
+               if (n_ptr)
+                       node_add_router(n_ptr, router);
+               break;
+       case ROUTE_REMOVAL:
+               if (!is_slave(tipc_own_addr)) {
+                       assert(!in_own_cluster(c_ptr->addr)
+                              || is_slave(rem_node));
+               } else {
+                       assert(in_own_cluster(c_ptr->addr)
+                              && !is_slave(rem_node));
+               }
+               n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+               if (n_ptr)
+                       node_remove_router(n_ptr, router);
+               break;
+       default:
+               assert(!"Illegal routing manager message received\n");
+       }
+       buf_discard(buf);
+}
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
+{
+       u32 start_entry;
+       u32 tstop;
+       u32 n_num;
+
+       if (is_slave(router))
+               return; /* Slave nodes can not be routers */
+
+       if (in_own_cluster(c_ptr->addr)) {
+               start_entry = LOWEST_SLAVE;
+               tstop = c_ptr->highest_slave;
+       } else {
+               start_entry = 1;
+               tstop = c_ptr->highest_node;
+       }
+
+       for (n_num = start_entry; n_num <= tstop; n_num++) {
+               if (c_ptr->nodes[n_num]) {
+                       node_remove_router(c_ptr->nodes[n_num], router);
+               }
+       }
+}
+
+/**
+ * cluster_multicast - multicast message to local nodes 
+ */
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+                      u32 lower, u32 upper)
+{
+       struct sk_buff *buf_copy;
+       struct node *n_ptr;
+       u32 n_num;
+       u32 tstop;
+
+       assert(lower <= upper);
+       assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
+              ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
+       assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
+              ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
+       assert(in_own_cluster(c_ptr->addr));
+
+       tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
+       if (tstop > upper)
+               tstop = upper;
+       for (n_num = lower; n_num <= tstop; n_num++) {
+               n_ptr = c_ptr->nodes[n_num];
+               if (n_ptr && node_has_active_links(n_ptr)) {
+                       buf_copy = skb_copy(buf, GFP_ATOMIC);
+                       if (buf_copy == NULL)
+                               break;
+                       msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
+                       link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+               }
+       }
+       buf_discard(buf);
+}
+
+/**
+ * cluster_broadcast - broadcast message to all nodes within cluster
+ */
+
+void cluster_broadcast(struct sk_buff *buf)
+{
+       struct sk_buff *buf_copy;
+       struct cluster *c_ptr;
+       struct node *n_ptr;
+       u32 n_num;
+       u32 tstart;
+       u32 tstop;
+       u32 node_type;
+
+       if (tipc_mode == TIPC_NET_MODE) {
+               c_ptr = cluster_find(tipc_own_addr);
+               assert(in_own_cluster(c_ptr->addr));    /* For now */
+
+               /* Send to standard nodes, then repeat loop sending to slaves */
+               tstart = 1;
+               tstop = c_ptr->highest_node;
+               for (node_type = 1; node_type <= 2; node_type++) {
+                       for (n_num = tstart; n_num <= tstop; n_num++) {
+                               n_ptr = c_ptr->nodes[n_num];
+                               if (n_ptr && node_has_active_links(n_ptr)) {
+                                       buf_copy = skb_copy(buf, GFP_ATOMIC);
+                                       if (buf_copy == NULL)
+                                               goto exit;
+                                       msg_set_destnode(buf_msg(buf_copy), 
+                                                        n_ptr->addr);
+                                       link_send(buf_copy, n_ptr->addr, 
+                                                 n_ptr->addr);
+                               }
+                       }
+                       tstart = LOWEST_SLAVE;
+                       tstop = c_ptr->highest_slave;
+               }
+       }
+exit:
+       buf_discard(buf);
+}
+
+int cluster_init(void)
+{
+       highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
+       return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
+}
+
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
new file mode 100644 (file)
index 0000000..c12875a
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * net/tipc/cluster.h: Include file for TIPC cluster management routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CLUSTER_H
+#define _TIPC_CLUSTER_H
+
+#include "addr.h"
+#include "zone.h"
+
+#define LOWEST_SLAVE  2048u
+
+/**
+ * struct cluster - TIPC cluster structure
+ * @addr: network address of cluster
+ * @owner: pointer to zone that cluster belongs to
+ * @nodes: array of pointers to all nodes within cluster
+ * @highest_node: id of highest numbered node within cluster
+ * @highest_slave: (used for secondary node support)
+ */
+struct cluster {
+       u32 addr;
+       struct _zone *owner;
+       struct node **nodes;
+       u32 highest_node;
+       u32 highest_slave;
+};
+
+
+extern struct node **local_nodes;
+extern u32 highest_allowed_slave;
+extern struct node_map cluster_bcast_nodes;
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router);
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest);
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector);
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref);
+void cluster_recv_routing_table(struct sk_buff *buf);
+struct cluster *cluster_create(u32 addr);
+void cluster_delete(struct cluster *c_ptr);
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr);
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest);
+void cluster_broadcast(struct sk_buff *buf);
+int cluster_init(void);
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr);
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest);
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+
+static inline struct cluster *cluster_find(u32 addr)
+{
+       struct _zone *z_ptr = zone_find(addr);
+
+       if (z_ptr)
+               return z_ptr->clusters[1];
+       return 0;
+}
+
+#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
new file mode 100644 (file)
index 0000000..0f31c34
--- /dev/null
@@ -0,0 +1,715 @@
+/*
+ * net/tipc/config.c: TIPC configuration management code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "port.h"
+#include "link.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "node.h"
+#include "config.h"
+#include "discover.h"
+
+struct subscr_data {
+       char usr_handle[8];
+       u32 domain;
+       u32 port_ref;
+       struct list_head subd_list;
+};
+
+struct manager {
+       u32 user_ref;
+       u32 port_ref;
+       u32 subscr_ref;
+       u32 link_subscriptions;
+       struct list_head link_subscribers;
+};
+
+static struct manager mng = { 0};
+
+static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
+
+static const void *req_tlv_area;       /* request message TLV area */
+static int req_tlv_space;              /* request message TLV area size */
+static int rep_headroom;               /* reply message headroom to use */
+
+
+void cfg_link_event(u32 addr, char *name, int up)
+{
+       /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
+}
+
+
+struct sk_buff *cfg_reply_alloc(int payload_size)
+{
+       struct sk_buff *buf;
+
+       buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
+       if (buf)
+               skb_reserve(buf, rep_headroom);
+       return buf;
+}
+
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+                  void *tlv_data, int tlv_data_size)
+{
+       struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
+       int new_tlv_space = TLV_SPACE(tlv_data_size);
+
+       if (skb_tailroom(buf) < new_tlv_space) {
+               dbg("cfg_append_tlv unable to append TLV\n");
+               return 0;
+       }
+       skb_put(buf, new_tlv_space);
+       tlv->tlv_type = htons(tlv_type);
+       tlv->tlv_len  = htons(TLV_LENGTH(tlv_data_size));
+       if (tlv_data_size && tlv_data)
+               memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
+       return 1;
+}
+
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value)
+{
+       struct sk_buff *buf;
+       u32 value_net;
+
+       buf = cfg_reply_alloc(TLV_SPACE(sizeof(value)));
+       if (buf) {
+               value_net = htonl(value);
+               cfg_append_tlv(buf, tlv_type, &value_net, 
+                              sizeof(value_net));
+       }
+       return buf;
+}
+
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string)
+{
+       struct sk_buff *buf;
+       int string_len = strlen(string) + 1;
+
+       buf = cfg_reply_alloc(TLV_SPACE(string_len));
+       if (buf)
+               cfg_append_tlv(buf, tlv_type, string, string_len);
+       return buf;
+}
+
+
+
+
+#if 0
+
+/* Now obsolete code for handling commands not yet implemented the new way */
+
+int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
+                char *data,
+                u32 sz,
+                u32 *ret_size,
+                struct tipc_portid *orig)
+{
+       int rv = -EINVAL;
+       u32 cmd = msg->cmd;
+
+       *ret_size = 0;
+       switch (cmd) {
+       case TIPC_REMOVE_LINK:
+       case TIPC_CMD_BLOCK_LINK:
+       case TIPC_CMD_UNBLOCK_LINK:
+               if (!cfg_check_connection(orig))
+                       rv = link_control(msg->argv.link_name, msg->cmd, 0);
+               break;
+       case TIPC_ESTABLISH:
+               {
+                       int connected;
+
+                       tipc_isconnected(mng.conn_port_ref, &connected);
+                       if (connected || !orig) {
+                               rv = TIPC_FAILURE;
+                               break;
+                       }
+                       rv = tipc_connect2port(mng.conn_port_ref, orig);
+                       if (rv == TIPC_OK)
+                               orig = 0;
+                       break;
+               }
+       case TIPC_GET_PEER_ADDRESS:
+               *ret_size = link_peer_addr(msg->argv.link_name, data, sz);
+               break;
+       case TIPC_GET_ROUTES:
+               rv = TIPC_OK;
+               break;
+       default: {}
+       }
+       if (*ret_size)
+               rv = TIPC_OK;
+       return rv;
+}
+
+static void cfg_cmd_event(struct tipc_cmd_msg *msg,
+                         char *data,
+                         u32 sz,        
+                         struct tipc_portid const *orig)
+{
+       int rv = -EINVAL;
+       struct tipc_cmd_result_msg rmsg;
+       struct iovec msg_sect[2];
+       int *arg;
+
+       msg->cmd = ntohl(msg->cmd);
+
+       cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect, 
+                           data, 0);
+       if (ntohl(msg->magic) != TIPC_MAGIC)
+               goto exit;
+
+       switch (msg->cmd) {
+       case TIPC_CREATE_LINK:
+               if (!cfg_check_connection(orig))
+                       rv = disc_create_link(&msg->argv.create_link);
+               break;
+       case TIPC_LINK_SUBSCRIBE:
+               {
+                       struct subscr_data *sub;
+
+                       if (mng.link_subscriptions > 64)
+                               break;
+                       sub = (struct subscr_data *)kmalloc(sizeof(*sub),
+                                                           GFP_ATOMIC);
+                       if (sub == NULL) {
+                               warn("Memory squeeze; dropped remote link subscription\n");
+                               break;
+                       }
+                       INIT_LIST_HEAD(&sub->subd_list);
+                       tipc_createport(mng.user_ref,
+                                       (void *)sub,
+                                       TIPC_HIGH_IMPORTANCE,
+                                       0,
+                                       0,
+                                       (tipc_conn_shutdown_event)cfg_linksubscr_cancel,
+                                       0,
+                                       0,
+                                       (tipc_conn_msg_event)cfg_linksubscr_cancel,
+                                       0,
+                                       &sub->port_ref);
+                       if (!sub->port_ref) {
+                               kfree(sub);
+                               break;
+                       }
+                       memcpy(sub->usr_handle,msg->usr_handle,
+                              sizeof(sub->usr_handle));
+                       sub->domain = msg->argv.domain;
+                       list_add_tail(&sub->subd_list, &mng.link_subscribers);
+                       tipc_connect2port(sub->port_ref, orig);
+                       rmsg.retval = TIPC_OK;
+                       tipc_send(sub->port_ref, 2u, msg_sect);
+                       mng.link_subscriptions++;
+                       return;
+               }
+       default:
+               rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
+       }
+       exit:
+       rmsg.result_len = htonl(msg_sect[1].iov_len);
+       rmsg.retval = htonl(rv);
+       cfg_respond(msg_sect, 2u, orig);
+}
+#endif
+
+static struct sk_buff *cfg_enable_bearer(void)
+{
+       struct tipc_bearer_config *args;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
+       if (tipc_enable_bearer(args->name,
+                              ntohl(args->detect_scope),
+                              ntohl(args->priority)))
+               return cfg_reply_error_string("unable to enable bearer");
+
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_disable_bearer(void)
+{
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
+               return cfg_reply_error_string("unable to disable bearer");
+
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_own_addr(void)
+{
+       u32 addr;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       addr = *(u32 *)TLV_DATA(req_tlv_area);
+       addr = ntohl(addr);
+       if (addr == tipc_own_addr)
+               return cfg_reply_none();
+       if (!addr_node_valid(addr))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (node address)");
+       if (tipc_own_addr)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change node address once assigned)");
+
+       spin_unlock_bh(&config_lock);
+       stop_net();
+       tipc_own_addr = addr;
+       start_net();
+       spin_lock_bh(&config_lock);
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_remote_mng(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       tipc_remote_management = (value != 0);
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_publications(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 65535))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max publications must be 1-65535)");
+       tipc_max_publications = value;
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_subscriptions(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 65535))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max subscriptions must be 1-65535");
+       tipc_max_subscriptions = value;
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_ports(void)
+{
+       int orig_mode;
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 127, 65535))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max ports must be 127-65535)");
+
+       if (value == tipc_max_ports)
+               return cfg_reply_none();
+
+       if (atomic_read(&tipc_user_count) > 2)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change max ports while TIPC users exist)");
+
+       spin_unlock_bh(&config_lock);
+       orig_mode = tipc_get_mode();
+       if (orig_mode == TIPC_NET_MODE)
+               stop_net();
+       stop_core();
+       tipc_max_ports = value;
+       start_core();
+       if (orig_mode == TIPC_NET_MODE)
+               start_net();
+       spin_lock_bh(&config_lock);
+       return cfg_reply_none();
+}
+
+static struct sk_buff *set_net_max(int value, int *parameter)
+{
+       int orig_mode;
+
+       if (value != *parameter) {
+               orig_mode = tipc_get_mode();
+               if (orig_mode == TIPC_NET_MODE)
+                       stop_net();
+               *parameter = value;
+               if (orig_mode == TIPC_NET_MODE)
+                       start_net();
+       }
+
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_zones(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 255))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max zones must be 1-255)");
+       return set_net_max(value, &tipc_max_zones);
+}
+
+static struct sk_buff *cfg_set_max_clusters(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != 1)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (max clusters fixed at 1)");
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_nodes(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 8, 2047))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max nodes must be 8-2047)");
+       return set_net_max(value, &tipc_max_nodes);
+}
+
+static struct sk_buff *cfg_set_max_slaves(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != 0)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (max secondary nodes fixed at 0)");
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_netid(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 9999))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (network id must be 1-9999)");
+
+       if (tipc_own_addr)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change network id once part of network)");
+       
+       return set_net_max(value, &tipc_net_id);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
+                          int request_space, int reply_headroom)
+{
+       struct sk_buff *rep_tlv_buf;
+
+       spin_lock_bh(&config_lock);
+
+       /* Save request and reply details in a well-known location */
+
+       req_tlv_area = request_area;
+       req_tlv_space = request_space;
+       rep_headroom = reply_headroom;
+
+       /* Check command authorization */
+
+       if (likely(orig_node == tipc_own_addr)) {
+               /* command is permitted */
+       } else if (cmd >= 0x8000) {
+               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                    " (cannot be done remotely)");
+               goto exit;
+       } else if (!tipc_remote_management) {
+               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
+               goto exit;
+       }
+       else if (cmd >= 0x4000) {
+               u32 domain = 0;
+
+               if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
+                   (domain != orig_node)) {
+                       rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
+                       goto exit;
+               }
+       }
+
+       /* Call appropriate processing routine */
+
+       switch (cmd) {
+       case TIPC_CMD_NOOP:
+               rep_tlv_buf = cfg_reply_none();
+               break;
+       case TIPC_CMD_GET_NODES:
+               rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_GET_LINKS:
+               rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_SHOW_LINK_STATS:
+               rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_RESET_LINK_STATS:
+               rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_SHOW_NAME_TABLE:
+               rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_GET_BEARER_NAMES:
+               rep_tlv_buf = bearer_get_names();
+               break;
+       case TIPC_CMD_GET_MEDIA_NAMES:
+               rep_tlv_buf = media_get_names();
+               break;
+       case TIPC_CMD_SHOW_PORTS:
+               rep_tlv_buf = port_get_ports();
+               break;
+#if 0
+       case TIPC_CMD_SHOW_PORT_STATS:
+               rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_RESET_PORT_STATS:
+               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
+               break;
+#endif
+       case TIPC_CMD_SET_LOG_SIZE:
+               rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_DUMP_LOG:
+               rep_tlv_buf = log_dump();
+               break;
+       case TIPC_CMD_SET_LINK_TOL:
+       case TIPC_CMD_SET_LINK_PRI:
+       case TIPC_CMD_SET_LINK_WINDOW:
+               rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+               break;
+       case TIPC_CMD_ENABLE_BEARER:
+               rep_tlv_buf = cfg_enable_bearer();
+               break;
+       case TIPC_CMD_DISABLE_BEARER:
+               rep_tlv_buf = cfg_disable_bearer();
+               break;
+       case TIPC_CMD_SET_NODE_ADDR:
+               rep_tlv_buf = cfg_set_own_addr();
+               break;
+       case TIPC_CMD_SET_REMOTE_MNG:
+               rep_tlv_buf = cfg_set_remote_mng();
+               break;
+       case TIPC_CMD_SET_MAX_PORTS:
+               rep_tlv_buf = cfg_set_max_ports();
+               break;
+       case TIPC_CMD_SET_MAX_PUBL:
+               rep_tlv_buf = cfg_set_max_publications();
+               break;
+       case TIPC_CMD_SET_MAX_SUBSCR:
+               rep_tlv_buf = cfg_set_max_subscriptions();
+               break;
+       case TIPC_CMD_SET_MAX_ZONES:
+               rep_tlv_buf = cfg_set_max_zones();
+               break;
+       case TIPC_CMD_SET_MAX_CLUSTERS:
+               rep_tlv_buf = cfg_set_max_clusters();
+               break;
+       case TIPC_CMD_SET_MAX_NODES:
+               rep_tlv_buf = cfg_set_max_nodes();
+               break;
+       case TIPC_CMD_SET_MAX_SLAVES:
+               rep_tlv_buf = cfg_set_max_slaves();
+               break;
+       case TIPC_CMD_SET_NETID:
+               rep_tlv_buf = cfg_set_netid();
+               break;
+       case TIPC_CMD_GET_REMOTE_MNG:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management);
+               break;
+       case TIPC_CMD_GET_MAX_PORTS:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports);
+               break;
+       case TIPC_CMD_GET_MAX_PUBL:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications);
+               break;
+       case TIPC_CMD_GET_MAX_SUBSCR:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions);
+               break;
+       case TIPC_CMD_GET_MAX_ZONES:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones);
+               break;
+       case TIPC_CMD_GET_MAX_CLUSTERS:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters);
+               break;
+       case TIPC_CMD_GET_MAX_NODES:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes);
+               break;
+       case TIPC_CMD_GET_MAX_SLAVES:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves);
+               break;
+       case TIPC_CMD_GET_NETID:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_net_id);
+               break;
+       default:
+               rep_tlv_buf = NULL;
+               break;
+       }
+
+       /* Return reply buffer */
+exit:
+       spin_unlock_bh(&config_lock);
+       return rep_tlv_buf;
+}
+
+static void cfg_named_msg_event(void *userdata,
+                               u32 port_ref,
+                               struct sk_buff **buf,
+                               const unchar *msg,
+                               u32 size,
+                               u32 importance, 
+                               struct tipc_portid const *orig,
+                               struct tipc_name_seq const *dest)
+{
+       struct tipc_cfg_msg_hdr *req_hdr;
+       struct tipc_cfg_msg_hdr *rep_hdr;
+       struct sk_buff *rep_buf;
+
+       /* Validate configuration message header (ignore invalid message) */
+
+       req_hdr = (struct tipc_cfg_msg_hdr *)msg;
+       if ((size < sizeof(*req_hdr)) ||
+           (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
+           (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
+               warn("discarded invalid configuration message\n");
+               return;
+       }
+
+       /* Generate reply for request (if can't, return request) */
+
+       rep_buf = cfg_do_cmd(orig->node,
+                            ntohs(req_hdr->tcm_type), 
+                            msg + sizeof(*req_hdr),
+                            size - sizeof(*req_hdr),
+                            BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
+       if (rep_buf) {
+               skb_push(rep_buf, sizeof(*rep_hdr));
+               rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
+               memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+               rep_hdr->tcm_len = htonl(rep_buf->len);
+               rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+       } else {
+               rep_buf = *buf;
+               *buf = NULL;
+       }
+
+       /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
+       tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
+}
+
+int cfg_init(void)
+{
+       struct tipc_name_seq seq;
+       int res;
+
+       memset(&mng, 0, sizeof(mng));
+       INIT_LIST_HEAD(&mng.link_subscribers);
+
+       res = tipc_attach(&mng.user_ref, 0, 0);
+       if (res)
+               goto failed;
+
+       res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
+                             NULL, NULL, NULL,
+                             NULL, cfg_named_msg_event, NULL,
+                             NULL, &mng.port_ref);
+       if (res)
+               goto failed;
+
+       seq.type = TIPC_CFG_SRV;
+       seq.lower = seq.upper = tipc_own_addr;
+       res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
+       if (res)
+               goto failed;
+
+       return 0;
+
+failed:
+       err("Unable to create configuration service\n");
+       tipc_detach(mng.user_ref);
+       mng.user_ref = 0;
+       return res;
+}
+
+void cfg_stop(void)
+{
+       if (mng.user_ref) {
+               tipc_detach(mng.user_ref);
+               mng.user_ref = 0;
+       }
+}
diff --git a/net/tipc/config.h b/net/tipc/config.h
new file mode 100644 (file)
index 0000000..7ac0af5
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * net/tipc/config.h: Include file for TIPC configuration service code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CONFIG_H
+#define _TIPC_CONFIG_H
+
+/* ---------------------------------------------------------------------- */
+
+#include <linux/tipc.h>
+#include "link.h"
+
+struct sk_buff *cfg_reply_alloc(int payload_size);
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+                  void *tlv_data, int tlv_data_size);
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value);
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string);
+
+static inline struct sk_buff *cfg_reply_none(void)
+{
+       return cfg_reply_alloc(0);
+}
+
+static inline struct sk_buff *cfg_reply_unsigned(u32 value)
+{
+       return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
+}
+
+static inline struct sk_buff *cfg_reply_error_string(char *string)
+{
+       return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
+}
+
+static inline struct sk_buff *cfg_reply_ultra_string(char *string)
+{
+       return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, 
+                          const void *req_tlv_area, int req_tlv_space, 
+                          int headroom);
+
+void cfg_link_event(u32 addr, char *name, int up);
+int  cfg_init(void);
+void cfg_stop(void);
+
+#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644 (file)
index 0000000..17c723f
--- /dev/null
@@ -0,0 +1,282 @@
+/*
+ * net/tipc/core.c: TIPC module code
+ *
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/random.h>
+
+#include "core.h"
+#include "dbg.h"
+#include "ref.h"
+#include "net.h"
+#include "user_reg.h"
+#include "name_table.h"
+#include "subscr.h"
+#include "config.h"
+
+int  eth_media_start(void);
+void eth_media_stop(void);
+int  handler_start(void);
+void handler_stop(void);
+int  socket_init(void);
+void socket_stop(void);
+int  netlink_start(void);
+void netlink_stop(void);
+
+#define MOD_NAME "tipc_start: "
+
+#ifndef CONFIG_TIPC_ZONES
+#define CONFIG_TIPC_ZONES 3
+#endif
+
+#ifndef CONFIG_TIPC_CLUSTERS
+#define CONFIG_TIPC_CLUSTERS 1
+#endif
+
+#ifndef CONFIG_TIPC_NODES
+#define CONFIG_TIPC_NODES 255
+#endif
+
+#ifndef CONFIG_TIPC_SLAVE_NODES
+#define CONFIG_TIPC_SLAVE_NODES 0
+#endif
+
+#ifndef CONFIG_TIPC_PORTS
+#define CONFIG_TIPC_PORTS 8191
+#endif
+
+#ifndef CONFIG_TIPC_LOG
+#define CONFIG_TIPC_LOG 0
+#endif
+
+/* global variables used by multiple sub-systems within TIPC */
+
+int tipc_mode = TIPC_NOT_RUNNING;
+int tipc_random;
+atomic_t tipc_user_count = ATOMIC_INIT(0);
+
+const char tipc_alphabet[] = 
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
+
+/* configurable TIPC parameters */
+
+u32 tipc_own_addr;
+int tipc_max_zones;
+int tipc_max_clusters;
+int tipc_max_nodes;
+int tipc_max_slaves;
+int tipc_max_ports;
+int tipc_max_subscriptions;
+int tipc_max_publications;
+int tipc_net_id;
+int tipc_remote_management;
+
+
+int tipc_get_mode(void)
+{
+       return tipc_mode;
+}
+
+/**
+ * stop_net - shut down TIPC networking sub-systems
+ */
+
+void stop_net(void)
+{
+       eth_media_stop();
+       tipc_stop_net();
+}
+
+/**
+ * start_net - start TIPC networking sub-systems
+ */
+
+int start_net(void)
+{
+       int res;
+
+       if ((res = tipc_start_net()) ||
+           (res = eth_media_start())) {
+               stop_net();
+       }
+       return res;
+}
+
+/**
+ * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode
+ */
+
+void stop_core(void)
+{
+       if (tipc_mode != TIPC_NODE_MODE)
+               return;
+
+       tipc_mode = TIPC_NOT_RUNNING;
+
+       netlink_stop();
+       handler_stop();
+       cfg_stop();
+       subscr_stop();
+       reg_stop();
+       nametbl_stop();
+       ref_table_stop();
+       socket_stop();
+}
+
+/**
+ * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode
+ */
+
+int start_core(void)
+{
+       int res;
+
+       if (tipc_mode != TIPC_NOT_RUNNING)
+               return -ENOPROTOOPT;
+
+       get_random_bytes(&tipc_random, sizeof(tipc_random));
+       tipc_mode = TIPC_NODE_MODE;
+
+       if ((res = handler_start()) || 
+           (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions,
+                                 tipc_random)) ||
+           (res = reg_start()) ||
+           (res = nametbl_init()) ||
+            (res = k_signal((Handler)subscr_start, 0)) ||
+           (res = k_signal((Handler)cfg_init, 0)) || 
+           (res = netlink_start()) ||
+           (res = socket_init())) {
+               stop_core();
+       }
+       return res;
+}
+
+
+static int __init tipc_init(void)
+{
+       int res;
+
+       log_reinit(CONFIG_TIPC_LOG);
+       info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
+
+       tipc_own_addr = 0;
+       tipc_remote_management = 1;
+       tipc_max_publications = 10000;
+       tipc_max_subscriptions = 2000;
+       tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
+       tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
+       tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
+       tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
+       tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
+       tipc_net_id = 4711;
+
+       if ((res = start_core()))
+               err("Unable to start in single node mode\n");
+       else    
+               info("Started in single node mode\n");
+        return res;
+}
+
+static void __exit tipc_exit(void)
+{
+       stop_net();
+       stop_core();
+       info("Deactivated\n");
+       log_stop();
+}
+
+module_init(tipc_init);
+module_exit(tipc_exit);
+
+MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Native TIPC API for kernel-space applications (see tipc.h) */
+
+EXPORT_SYMBOL(tipc_attach);
+EXPORT_SYMBOL(tipc_detach);
+EXPORT_SYMBOL(tipc_get_addr);
+EXPORT_SYMBOL(tipc_get_mode);
+EXPORT_SYMBOL(tipc_createport);
+EXPORT_SYMBOL(tipc_deleteport);
+EXPORT_SYMBOL(tipc_ownidentity);
+EXPORT_SYMBOL(tipc_portimportance);
+EXPORT_SYMBOL(tipc_set_portimportance);
+EXPORT_SYMBOL(tipc_portunreliable);
+EXPORT_SYMBOL(tipc_set_portunreliable);
+EXPORT_SYMBOL(tipc_portunreturnable);
+EXPORT_SYMBOL(tipc_set_portunreturnable);
+EXPORT_SYMBOL(tipc_publish);
+EXPORT_SYMBOL(tipc_withdraw);
+EXPORT_SYMBOL(tipc_connect2port);
+EXPORT_SYMBOL(tipc_disconnect);
+EXPORT_SYMBOL(tipc_shutdown);
+EXPORT_SYMBOL(tipc_isconnected);
+EXPORT_SYMBOL(tipc_peer);
+EXPORT_SYMBOL(tipc_ref_valid);
+EXPORT_SYMBOL(tipc_send);
+EXPORT_SYMBOL(tipc_send_buf);
+EXPORT_SYMBOL(tipc_send2name);
+EXPORT_SYMBOL(tipc_forward2name);
+EXPORT_SYMBOL(tipc_send_buf2name);
+EXPORT_SYMBOL(tipc_forward_buf2name);
+EXPORT_SYMBOL(tipc_send2port);
+EXPORT_SYMBOL(tipc_forward2port);
+EXPORT_SYMBOL(tipc_send_buf2port);
+EXPORT_SYMBOL(tipc_forward_buf2port);
+EXPORT_SYMBOL(tipc_multicast);
+/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
+EXPORT_SYMBOL(tipc_ispublished);
+EXPORT_SYMBOL(tipc_available_nodes);
+
+/* TIPC API for external bearers (see tipc_bearer.h) */
+
+EXPORT_SYMBOL(tipc_block_bearer);
+EXPORT_SYMBOL(tipc_continue); 
+EXPORT_SYMBOL(tipc_disable_bearer);
+EXPORT_SYMBOL(tipc_enable_bearer);
+EXPORT_SYMBOL(tipc_recv_msg);
+EXPORT_SYMBOL(tipc_register_media); 
+
+/* TIPC API for external APIs (see tipc_port.h) */
+
+EXPORT_SYMBOL(tipc_createport_raw);
+EXPORT_SYMBOL(tipc_set_msg_option);
+EXPORT_SYMBOL(tipc_reject_msg);
+EXPORT_SYMBOL(tipc_send_buf_fast);
+EXPORT_SYMBOL(tipc_acknowledge);
+EXPORT_SYMBOL(tipc_get_port);
+EXPORT_SYMBOL(tipc_get_handle);
+
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644 (file)
index 0000000..d92898d
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ * net/tipc/core.h: Include file for TIPC global declarations
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CORE_H
+#define _TIPC_CORE_H
+
+#include <net/tipc/tipc.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>  
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+/*
+ * TIPC debugging code
+ */
+
+#define assert(i)  BUG_ON(!(i))
+
+struct tipc_msg;
+extern struct print_buf *CONS, *LOG;
+extern struct print_buf *TEE(struct print_buf *, struct print_buf *);
+void msg_print(struct print_buf*,struct tipc_msg *,const char*);
+void tipc_printf(struct print_buf *, const char *fmt, ...);
+void tipc_dump(struct print_buf*,const char *fmt, ...);
+
+#ifdef CONFIG_TIPC_DEBUG
+
+/*
+ * TIPC debug support included:
+ * - system messages are printed to TIPC_OUTPUT print buffer
+ * - debug messages are printed to DBG_OUTPUT print buffer
+ */
+
+#define err(fmt, arg...)  tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg)
+#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
+#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
+
+#define dbg(fmt, arg...)  do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
+#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0)
+#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
+
+
+/*     
+ * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
+ * while DBG_OUTPUT is the null print buffer.  These defaults can be changed
+ * here, or on a per .c file basis, by redefining these symbols.  The following
+ * print buffer options are available:
+ *
+ * NULL                        : Output to null print buffer (i.e. print nowhere)
+ * CONS                        : Output to system console
+ * LOG                 : Output to TIPC log buffer 
+ * &buf                : Output to user-defined buffer (struct print_buf *)
+ * TEE(&buf_a,&buf_b)  : Output to two print buffers (eg. TEE(CONS,LOG) )
+ */
+
+#ifndef TIPC_OUTPUT
+#define TIPC_OUTPUT TEE(CONS,LOG)
+#endif
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+#else
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+/*
+ * TIPC debug support not included:
+ * - system messages are printed to system console
+ * - debug messages are not printed
+ */
+
+#define err(fmt, arg...)  printk(KERN_ERR "%s: " fmt "\n" , __FILE__ , ## arg)
+#define info(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __FILE__ , ## arg)
+#define warn(fmt, arg...) printk(KERN_WARNING "%s: " fmt "\n" , __FILE__ , ## arg)
+
+#define dbg(fmt, arg...) do {} while (0)
+#define msg_dbg(msg,txt) do {} while (0)
+#define dump(fmt,arg...) do {} while (0)
+
+#endif                   
+
+
+/* 
+ * TIPC-specific error codes
+ */
+
+#define ELINKCONG EAGAIN       /* link congestion <=> resource unavailable */
+
+/*
+ * Global configuration variables
+ */
+
+extern u32 tipc_own_addr;
+extern int tipc_max_zones;
+extern int tipc_max_clusters;
+extern int tipc_max_nodes;
+extern int tipc_max_slaves;
+extern int tipc_max_ports;
+extern int tipc_max_subscriptions;
+extern int tipc_max_publications;
+extern int tipc_net_id;
+extern int tipc_remote_management;
+
+/*
+ * Other global variables
+ */
+
+extern int tipc_mode;
+extern int tipc_random;
+extern const char tipc_alphabet[];
+extern atomic_t tipc_user_count;
+
+
+/*
+ * Routines available to privileged subsystems
+ */
+
+extern int  start_core(void);
+extern void stop_core(void);
+extern int  start_net(void);
+extern void stop_net(void);
+
+static inline int delimit(int val, int min, int max)
+{
+       if (val > max)
+               return max;
+       if (val < min)
+               return min;
+       return val;
+}
+
+
+/*
+ * TIPC timer and signal code
+ */
+
+typedef void (*Handler) (unsigned long);
+
+u32 k_signal(Handler routine, unsigned long argument);
+
+/**
+ * k_init_timer - initialize a timer
+ * @timer: pointer to timer structure
+ * @routine: pointer to routine to invoke when timer expires
+ * @argument: value to pass to routine when timer expires
+ * 
+ * Timer must be initialized before use (and terminated when no longer needed).
+ */
+
+static inline void k_init_timer(struct timer_list *timer, Handler routine, 
+                               unsigned long argument)
+{
+       dbg("initializing timer %p\n", timer);
+       init_timer(timer);
+       timer->function = routine;
+       timer->data = argument;
+}
+
+/**
+ * k_start_timer - start a timer
+ * @timer: pointer to timer structure
+ * @msec: time to delay (in ms)
+ * 
+ * Schedules a previously initialized timer for later execution.
+ * If timer is already running, the new timeout overrides the previous request.
+ * 
+ * To ensure the timer doesn't expire before the specified delay elapses,
+ * the amount of delay is rounded up when converting to the jiffies
+ * then an additional jiffy is added to account for the fact that 
+ * the starting time may be in the middle of the current jiffy.
+ */
+
+static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
+{
+       dbg("starting timer %p for %u\n", timer, msec);
+       mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
+}
+
+/**
+ * k_cancel_timer - cancel a timer
+ * @timer: pointer to timer structure
+ * 
+ * Cancels a previously initialized timer.  
+ * Can be called safely even if the timer is already inactive.
+ * 
+ * WARNING: Must not be called when holding locks required by the timer's
+ *          timeout routine, otherwise deadlock can occur on SMP systems!
+ */
+
+static inline void k_cancel_timer(struct timer_list *timer)
+{
+       dbg("cancelling timer %p\n", timer);
+       del_timer_sync(timer);
+}
+
+/**
+ * k_term_timer - terminate a timer
+ * @timer: pointer to timer structure
+ * 
+ * Prevents further use of a previously initialized timer.
+ * 
+ * WARNING: Caller must ensure timer isn't currently running.
+ * 
+ * (Do not "enhance" this routine to automatically cancel an active timer,
+ * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
+ */
+
+static inline void k_term_timer(struct timer_list *timer)
+{
+       dbg("terminating timer %p\n", timer);
+}
+
+
+/*
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom leaves room for 14 byte Ethernet header, 
+ * while ensuring TIPC header is word aligned for quicker access
+ */
+
+#define BUF_HEADROOM 16u 
+
+struct tipc_skb_cb {
+       void *handle;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+
+
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+       return (struct tipc_msg *)skb->data;
+}
+
+/**
+ * buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer.  Space is reserved for a data link header.
+ */
+
+static inline struct sk_buff *buf_acquire(u32 size)
+{
+       struct sk_buff *skb;
+       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+       skb = alloc_skb(buf_size, GFP_ATOMIC);
+       if (skb) {
+               skb_reserve(skb, BUF_HEADROOM);
+               skb_put(skb, size);
+               skb->next = NULL;
+       }
+       return skb;
+}
+
+/**
+ * buf_discard - frees a TIPC message buffer
+ * @skb: message buffer
+ *
+ * Frees a new buffer.  If passed NULL, just returns.
+ */
+
+static inline void buf_discard(struct sk_buff *skb)
+{
+       if (likely(skb != NULL))
+               kfree_skb(skb);
+}
+
+#endif                 
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
new file mode 100644 (file)
index 0000000..efd6d65
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ * net/tipc/dbg.c: TIPC print buffer routines for debuggign
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+
+#define MAX_STRING 512
+
+static char print_string[MAX_STRING];
+static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
+
+static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
+struct print_buf *CONS = &cons_buf;
+
+static struct print_buf log_buf = { NULL, 0, NULL, NULL };
+struct print_buf *LOG = &log_buf;
+
+
+#define FORMAT(PTR,LEN,FMT) \
+{\
+       va_list args;\
+       va_start(args, FMT);\
+       LEN = vsprintf(PTR, FMT, args);\
+       va_end(args);\
+       *(PTR + LEN) = '\0';\
+}
+
+/*
+ * Locking policy when using print buffers.
+ *
+ * 1) Routines of the form printbuf_XXX() rely on the caller to prevent
+ *    simultaneous use of the print buffer(s) being manipulated.
+ * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
+ *    'print_string' and to protect its print buffer(s).
+ * 3) TEE() uses 'print_lock' to protect its print buffer(s).
+ * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG.
+ */
+
+/**
+ * printbuf_init - initialize print buffer to empty
+ */
+
+void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
+{
+       if (!pb || !raw || (sz < (MAX_STRING + 1)))
+               return;
+
+       pb->crs = pb->buf = raw;
+       pb->size = sz;
+       pb->next = 0;
+       pb->buf[0] = 0;
+       pb->buf[sz-1] = ~0;
+}
+
+/**
+ * printbuf_reset - reinitialize print buffer to empty state
+ */
+
+void printbuf_reset(struct print_buf *pb)
+{
+       if (pb && pb->buf)
+               printbuf_init(pb, pb->buf, pb->size);
+}
+
+/**
+ * printbuf_empty - test if print buffer is in empty state
+ */
+
+int printbuf_empty(struct print_buf *pb)
+{
+       return (!pb || !pb->buf || (pb->crs == pb->buf));
+}
+
+/**
+ * printbuf_validate - check for print buffer overflow
+ * 
+ * Verifies that a print buffer has captured all data written to it. 
+ * If data has been lost, linearize buffer and prepend an error message
+ * 
+ * Returns length of print buffer data string (including trailing NULL)
+ */
+
+int printbuf_validate(struct print_buf *pb)
+{
+        char *err = "             *** PRINT BUFFER WRAPPED AROUND ***\n";
+        char *cp_buf;
+        struct print_buf cb;
+
+       if (!pb || !pb->buf)
+               return 0;
+
+       if (pb->buf[pb->size - 1] == '\0') {
+                cp_buf = kmalloc(pb->size, GFP_ATOMIC);
+                if (cp_buf != NULL){
+                        printbuf_init(&cb, cp_buf, pb->size);
+                        printbuf_move(&cb, pb);
+                        printbuf_move(pb, &cb);
+                        kfree(cp_buf);
+                        memcpy(pb->buf, err, strlen(err));
+                } else {
+                        printbuf_reset(pb);
+                        tipc_printf(pb, err);
+                }
+       }
+       return (pb->crs - pb->buf + 1);
+}
+
+/**
+ * printbuf_move - move print buffer contents to another print buffer
+ * 
+ * Current contents of destination print buffer (if any) are discarded.
+ * Source print buffer becomes empty if a successful move occurs.
+ */
+
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
+{
+       int len;
+
+       /* Handle the cases where contents can't be moved */
+
+       if (!pb_to || !pb_to->buf)
+               return;
+
+       if (!pb_from || !pb_from->buf) {
+               printbuf_reset(pb_to);
+               return;
+       }
+
+       if (pb_to->size < pb_from->size) {
+               printbuf_reset(pb_to);
+               tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
+               return;
+       }
+
+       /* Copy data from char after cursor to end (if used) */
+       len = pb_from->buf + pb_from->size - pb_from->crs - 2;
+       if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
+               strcpy(pb_to->buf, pb_from->crs + 1);
+               pb_to->crs = pb_to->buf + len;
+       } else
+               pb_to->crs = pb_to->buf;
+
+       /* Copy data from start to cursor (always) */
+       len = pb_from->crs - pb_from->buf;
+       strcpy(pb_to->crs, pb_from->buf);
+       pb_to->crs += len;
+
+       printbuf_reset(pb_from);
+}
+
+/**
+ * tipc_printf - append formatted output to print buffer chain
+ */
+
+void tipc_printf(struct print_buf *pb, const char *fmt, ...)
+{
+       int chars_to_add;
+       int chars_left;
+       char save_char;
+       struct print_buf *pb_next;
+
+       spin_lock_bh(&print_lock);
+       FORMAT(print_string, chars_to_add, fmt);
+       if (chars_to_add >= MAX_STRING)
+               strcpy(print_string, "*** STRING TOO LONG ***");
+
+       while (pb) {
+               if (pb == CONS)
+                       printk(print_string);
+               else if (pb->buf) {
+                       chars_left = pb->buf + pb->size - pb->crs - 1;
+                       if (chars_to_add <= chars_left) {
+                               strcpy(pb->crs, print_string);
+                               pb->crs += chars_to_add;
+                       } else {
+                               strcpy(pb->buf, print_string + chars_left);
+                                save_char = print_string[chars_left];
+                                print_string[chars_left] = 0;
+                                strcpy(pb->crs, print_string);
+                                print_string[chars_left] = save_char;
+                                pb->crs = pb->buf + chars_to_add - chars_left;
+                        }
+                }
+               pb_next = pb->next;
+               pb->next = 0;
+               pb = pb_next;
+       }
+       spin_unlock_bh(&print_lock);
+}
+
+/**
+ * TEE - perform next output operation on both print buffers  
+ */
+
+struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1)
+{
+       struct print_buf *pb = b0;
+
+       if (!b0 || (b0 == b1))
+               return b1;
+       if (!b1)
+               return b0;
+
+       spin_lock_bh(&print_lock);
+       while (pb->next) {
+               if ((pb->next == b1) || (pb->next == b0))
+                       pb->next = pb->next->next;
+               else
+                       pb = pb->next;
+       }
+       pb->next = b1;
+       spin_unlock_bh(&print_lock);
+       return b0;
+}
+
+/**
+ * print_to_console - write string of bytes to console in multiple chunks
+ */
+
+static void print_to_console(char *crs, int len)
+{
+       int rest = len;
+
+       while (rest > 0) {
+               int sz = rest < MAX_STRING ? rest : MAX_STRING;
+               char c = crs[sz];
+
+               crs[sz] = 0;
+               printk((const char *)crs);
+               crs[sz] = c;
+               rest -= sz;
+               crs += sz;
+       }
+}
+
+/**
+ * printbuf_dump - write print buffer contents to console
+ */
+
+static void printbuf_dump(struct print_buf *pb)
+{
+       int len;
+
+       /* Dump print buffer from char after cursor to end (if used) */
+       len = pb->buf + pb->size - pb->crs - 2;
+       if ((pb->buf[pb->size - 1] == 0) && (len > 0))
+               print_to_console(pb->crs + 1, len);
+
+       /* Dump print buffer from start to cursor (always) */
+       len = pb->crs - pb->buf;
+       print_to_console(pb->buf, len);
+}
+
+/**
+ * tipc_dump - dump non-console print buffer(s) to console
+ */
+
+void tipc_dump(struct print_buf *pb, const char *fmt, ...)
+{
+       int len;
+
+       spin_lock_bh(&print_lock);
+       FORMAT(CONS->buf, len, fmt);
+       printk(CONS->buf);
+
+       for (; pb; pb = pb->next) {
+               if (pb == CONS)
+                       continue;
+               printk("\n---- Start of dump,%s log ----\n\n", 
+                      (pb == LOG) ? "global" : "local");
+               printbuf_dump(pb);
+               printbuf_reset(pb);
+               printk("\n-------- End of dump --------\n");
+       }
+       spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_stop - free up TIPC log print buffer 
+ */
+
+void log_stop(void)
+{
+       spin_lock_bh(&print_lock);
+       if (LOG->buf) {
+               kfree(LOG->buf);
+               LOG->buf = NULL;
+       }
+       spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_reinit - set TIPC log print buffer to specified size
+ */
+
+void log_reinit(int log_size)
+{
+       log_stop();
+
+       if (log_size) {
+               if (log_size <= MAX_STRING)
+                       log_size = MAX_STRING + 1;
+               spin_lock_bh(&print_lock);
+               printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
+               spin_unlock_bh(&print_lock);
+       }
+}
+
+/**
+ * log_resize - reconfigure size of TIPC log buffer
+ */
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 0, 32768))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (log size must be 0-32768)");
+       log_reinit(value);
+       return cfg_reply_none();
+}
+
+/**
+ * log_dump - capture TIPC log buffer contents in configuration message
+ */
+
+struct sk_buff *log_dump(void)
+{
+       struct sk_buff *reply;
+
+       spin_lock_bh(&print_lock);
+       if (!LOG->buf)
+               reply = cfg_reply_ultra_string("log not activated\n");
+       else if (printbuf_empty(LOG))
+               reply = cfg_reply_ultra_string("log is empty\n");
+       else {
+               struct tlv_desc *rep_tlv;
+               struct print_buf pb;
+               int str_len;
+
+               str_len = min(LOG->size, 32768u);
+               reply = cfg_reply_alloc(TLV_SPACE(str_len));
+               if (reply) {
+                       rep_tlv = (struct tlv_desc *)reply->data;
+                       printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
+                       printbuf_move(&pb, LOG);
+                       str_len = strlen(TLV_DATA(rep_tlv)) + 1;
+                       skb_put(reply, TLV_SPACE(str_len));
+                       TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+               }
+       }
+       spin_unlock_bh(&print_lock);
+       return reply;
+}
+
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
new file mode 100644 (file)
index 0000000..af4217a
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * net/tipc/dbg.h: Include file for TIPC print buffer routines
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DBG_H
+#define _TIPC_DBG_H
+
+struct print_buf {
+       char *buf;
+       u32 size;
+       char *crs;
+       struct print_buf *next;
+};
+
+void printbuf_init(struct print_buf *pb, char *buf, u32 sz);
+void printbuf_reset(struct print_buf *pb);
+int  printbuf_empty(struct print_buf *pb);
+int  printbuf_validate(struct print_buf *pb);
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
+
+void log_reinit(int log_size);
+void log_stop(void);
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *log_dump(void);
+
+#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644 (file)
index 0000000..c83c1be
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * net/tipc/discover.c
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "zone.h"
+#include "discover.h"
+#include "port.h"
+#include "name_table.h"
+
+#define TIPC_LINK_REQ_INIT     125     /* min delay during bearer start up */
+#define TIPC_LINK_REQ_FAST     2000    /* normal delay if bearer has no links */
+#define TIPC_LINK_REQ_SLOW     600000  /* normal delay if bearer has links */
+
+#if 0
+#define  GET_NODE_INFO         300
+#define  GET_NODE_INFO_RESULT  301
+#define  FORWARD_LINK_PROBE    302
+#define  LINK_REQUEST_REJECTED 303
+#define  LINK_REQUEST_ACCEPTED 304
+#define  DROP_LINK_REQUEST     305
+#define  CHECK_LINK_COUNT      306
+#endif
+
+/* 
+ * TODO: Most of the inter-cluster setup stuff should be
+ * rewritten, and be made conformant with specification.
+ */ 
+
+
+/**
+ * struct link_req - information about an ongoing link setup request
+ * @bearer: bearer issuing requests
+ * @dest: destination address for request messages
+ * @buf: request message to be (repeatedly) sent
+ * @timer: timer governing period between requests
+ * @timer_intv: current interval between requests (in ms)
+ */
+struct link_req {
+       struct bearer *bearer;
+       struct tipc_media_addr dest;
+       struct sk_buff *buf;
+       struct timer_list timer;
+       unsigned int timer_intv;
+};
+
+
+#if 0
+int disc_create_link(const struct tipc_link_create *argv) 
+{
+       /* 
+        * Code for inter cluster link setup here 
+        */
+       return TIPC_OK;
+}
+#endif
+
+/*
+ * disc_lost_link(): A link has lost contact
+ */
+
+void disc_link_event(u32 addr, char *name, int up) 
+{
+       if (in_own_cluster(addr))
+               return;
+       /* 
+        * Code for inter cluster link setup here 
+        */
+}
+
+/** 
+ * disc_init_msg - initialize a link setup message
+ * @type: message type (request or response)
+ * @req_links: number of links associated with message
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @b_ptr: ptr to bearer issuing message
+ */
+
+struct sk_buff *disc_init_msg(u32 type,
+                             u32 req_links,
+                             u32 dest_domain,
+                             struct bearer *b_ptr)
+{
+       struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
+                        dest_domain);
+               msg_set_non_seq(msg);
+               msg_set_req_links(msg, req_links);
+               msg_set_dest_domain(msg, dest_domain);
+               msg_set_bc_netid(msg, tipc_net_id);
+               msg_set_media_addr(msg, &b_ptr->publ.addr);
+       }
+       return buf;
+}
+
+/**
+ * disc_recv_msg - handle incoming link setup message (request or response)
+ * @buf: buffer containing message
+ */
+
+void disc_recv_msg(struct sk_buff *buf)
+{
+       struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
+       struct link *link;
+       struct tipc_media_addr media_addr;
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 dest = msg_dest_domain(msg);
+       u32 orig = msg_prevnode(msg);
+       u32 net_id = msg_bc_netid(msg);
+       u32 type = msg_type(msg);
+
+       msg_get_media_addr(msg,&media_addr);
+       msg_dbg(msg, "RECV:");
+       buf_discard(buf);
+
+       if (net_id != tipc_net_id)
+               return;
+       if (!addr_domain_valid(dest))
+               return;
+       if (!addr_node_valid(orig))
+               return;
+       if (orig == tipc_own_addr)
+               return;
+       if (!in_scope(dest, tipc_own_addr))
+               return;
+       if (is_slave(tipc_own_addr) && is_slave(orig))
+               return;
+       if (is_slave(orig) && !in_own_cluster(orig))
+               return;
+       if (in_own_cluster(orig)) {
+               /* Always accept link here */
+               struct sk_buff *rbuf;
+               struct tipc_media_addr *addr;
+               struct node *n_ptr = node_find(orig);
+               int link_up;
+               dbg(" in own cluster\n");
+               if (n_ptr == NULL) {
+                       n_ptr = node_create(orig);
+               }
+               if (n_ptr == NULL) {
+                       warn("Memory squeeze; Failed to create node\n");
+                       return;
+               }
+               spin_lock_bh(&n_ptr->lock);
+               link = n_ptr->links[b_ptr->identity];
+               if (!link) {
+                       dbg("creating link\n");
+                       link = link_create(b_ptr, orig, &media_addr);
+                       if (!link) {
+                               spin_unlock_bh(&n_ptr->lock);                
+                               return;
+                       }
+               }
+               addr = &link->media_addr;
+               if (memcmp(addr, &media_addr, sizeof(*addr))) {
+                       char addr_string[16];
+
+                       warn("New bearer address for %s\n", 
+                            addr_string_fill(addr_string, orig));
+                       memcpy(addr, &media_addr, sizeof(*addr));
+                       link_reset(link);     
+               }
+               link_up = link_is_up(link);
+               spin_unlock_bh(&n_ptr->lock);                
+               if ((type == DSC_RESP_MSG) || link_up)
+                       return;
+               rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+               if (rbuf != NULL) {
+                       msg_dbg(buf_msg(rbuf),"SEND:");
+                       b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+                       buf_discard(rbuf);
+               }
+       }
+}
+
+/**
+ * disc_stop_link_req - stop sending periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_stop_link_req(struct link_req *req) 
+{
+       if (!req)
+               return;
+               
+       k_cancel_timer(&req->timer);
+       k_term_timer(&req->timer);
+       buf_discard(req->buf);
+       kfree(req);
+} 
+
+/**
+ * disc_update_link_req - update frequency of periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_update_link_req(struct link_req *req) 
+{
+       if (!req)
+               return;
+
+       if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
+               if (!req->bearer->nodes.count) {
+                       req->timer_intv = TIPC_LINK_REQ_FAST;
+                       k_start_timer(&req->timer, req->timer_intv);
+               }
+       } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
+               if (req->bearer->nodes.count) {
+                       req->timer_intv = TIPC_LINK_REQ_SLOW;
+                       k_start_timer(&req->timer, req->timer_intv);
+               }
+       } else {
+               /* leave timer "as is" if haven't yet reached a "normal" rate */
+       }
+} 
+
+/**
+ * disc_timeout - send a periodic link setup request
+ * @req: ptr to link request structure
+ * 
+ * Called whenever a link setup request timer associated with a bearer expires.
+ */
+
+static void disc_timeout(struct link_req *req) 
+{
+       struct tipc_msg *msg = buf_msg(req->buf);
+
+       spin_lock_bh(&req->bearer->publ.lock);
+
+#if 0
+       /* CURRENTLY DON'T SUPPORT INTER-ZONE LINKS */
+       u32 dest_domain = msg_dest_domain(msg);
+       int stop = 0;
+       if (!in_scope(dest_domain, tipc_own_addr)) {
+               struct _zone *z_ptr = zone_find(dest_domain);
+
+               if (z_ptr && (z_ptr->links >= msg_req_links(msg)))
+                       stop = 1;
+               if (req->timer_intv >= 32000)
+                       stop = 1;
+       }
+       if (stop) {
+               k_cancel_timer(&req->timer);
+               buf_discard(req->buf);
+               kfree(req);
+               spin_unlock_bh(&req->bearer->publ.lock);
+               return;
+       }
+#endif
+
+       msg_dbg(msg,"SEND:");
+       req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+
+       if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
+           (req->timer_intv == TIPC_LINK_REQ_FAST)) {
+               /* leave timer interval "as is" if already at a "normal" rate */
+       } else {
+               req->timer_intv *= 2;
+               if (req->timer_intv > TIPC_LINK_REQ_FAST)
+                       req->timer_intv = TIPC_LINK_REQ_FAST;
+               if ((req->timer_intv == TIPC_LINK_REQ_FAST) && 
+                   (req->bearer->nodes.count))
+                       req->timer_intv = TIPC_LINK_REQ_SLOW;
+       }
+       k_start_timer(&req->timer, req->timer_intv);
+
+       spin_unlock_bh(&req->bearer->publ.lock);
+}
+
+/**
+ * disc_init_link_req - start sending periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest: destination address for request messages
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @req_links: max number of desired links
+ * 
+ * Returns pointer to link request structure, or NULL if unable to create.
+ */
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr, 
+                                   const struct tipc_media_addr *dest,
+                                   u32 dest_domain,
+                                   u32 req_links) 
+{
+       struct link_req *req;
+
+       req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
+       if (!req)
+               return NULL;
+
+       req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+       if (!req->buf) {
+               kfree(req);
+               return NULL;
+       }
+
+       memcpy(&req->dest, dest, sizeof(*dest));
+       req->bearer = b_ptr;
+       req->timer_intv = TIPC_LINK_REQ_INIT;
+       k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
+       k_start_timer(&req->timer, req->timer_intv);
+       return req;
+} 
+
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644 (file)
index 0000000..90c1de9
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * net/tipc/discover.h
+ *
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DISCOVER_H
+#define _TIPC_DISCOVER_H
+
+#include <linux/tipc.h>
+
+struct link_req;
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr, 
+                                   const struct tipc_media_addr *dest,
+                                   u32 dest_domain,
+                                   u32 req_links);
+void disc_update_link_req(struct link_req *req);
+void disc_stop_link_req(struct link_req *req);
+
+void disc_recv_msg(struct sk_buff *buf);
+
+void disc_link_event(u32 addr, char *name, int up);
+#if 0
+int  disc_create_link(const struct tipc_link_create *argv);
+#endif
+
+#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644 (file)
index 0000000..b634d7a
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * net/tipc/eth_media.c: Ethernet bearer support for TIPC
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/tipc/tipc.h>
+#include <net/tipc/tipc_bearer.h>
+#include <net/tipc/tipc_msg.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+
+#define MAX_ETH_BEARERS                2
+#define TIPC_PROTOCOL          0x88ca
+#define ETH_LINK_PRIORITY      10
+#define ETH_LINK_TOLERANCE     TIPC_DEF_LINK_TOL
+
+
+/**
+ * struct eth_bearer - Ethernet bearer data structure
+ * @bearer: ptr to associated "generic" bearer structure
+ * @dev: ptr to associated Ethernet network device
+ * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ */
+struct eth_bearer {
+       struct tipc_bearer *bearer;
+       struct net_device *dev;
+       struct packet_type tipc_packet_type;
+};
+
+static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static int eth_started = 0;
+static struct notifier_block notifier;
+
+/**
+ * send_msg - send a TIPC message out over an Ethernet interface 
+ */
+
+static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, 
+                   struct tipc_media_addr *dest)
+{
+       struct sk_buff *clone;
+       struct net_device *dev;
+
+       clone = skb_clone(buf, GFP_ATOMIC);
+       if (clone) {
+               clone->nh.raw = clone->data;
+               dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+               clone->dev = dev;
+               dev->hard_header(clone, dev, TIPC_PROTOCOL, 
+                                &dest->dev_addr.eth_addr,
+                                dev->dev_addr, clone->len);
+               dev_queue_xmit(clone);
+       }
+       return TIPC_OK;
+}
+
+/**
+ * recv_msg - handle incoming TIPC message from an Ethernet interface
+ * 
+ * Routine truncates any Ethernet padding/CRC appended to the message,
+ * and ensures message size matches actual length
+ */
+
+static int recv_msg(struct sk_buff *buf, struct net_device *dev, 
+                   struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+       u32 size;
+
+       if (likely(eb_ptr->bearer)) {
+               size = msg_size((struct tipc_msg *)buf->data);
+               skb_trim(buf, size);
+               if (likely(buf->len == size)) {
+                       buf->next = NULL;
+                       tipc_recv_msg(buf, eb_ptr->bearer);
+               } else {
+                       kfree_skb(buf);
+               }
+       } else {
+               kfree_skb(buf);
+       }
+       return TIPC_OK;
+}
+
+/**
+ * enable_bearer - attach TIPC bearer to an Ethernet interface 
+ */
+
+static int enable_bearer(struct tipc_bearer *tb_ptr)
+{
+       struct net_device *dev = dev_base;
+       struct eth_bearer *eb_ptr = &eth_bearers[0];
+       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+       char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
+
+       /* Find device with specified name */
+
+       while (dev && dev->name &&
+              (memcmp(dev->name, driver_name, strlen(dev->name)))) {
+               dev = dev->next;
+       }
+       if (!dev)
+               return -ENODEV;
+
+       /* Find Ethernet bearer for device (or create one) */
+
+       for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
+       if (eb_ptr == stop)
+               return -EDQUOT;
+       if (!eb_ptr->dev) {
+               eb_ptr->dev = dev;
+               eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL);
+               eb_ptr->tipc_packet_type.dev = dev;
+               eb_ptr->tipc_packet_type.func = recv_msg;
+               eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+               INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+               dev_hold(dev);
+               dev_add_pack(&eb_ptr->tipc_packet_type);
+       }
+
+       /* Associate TIPC bearer with Ethernet bearer */
+
+       eb_ptr->bearer = tb_ptr;
+       tb_ptr->usr_handle = (void *)eb_ptr;
+       tb_ptr->mtu = dev->mtu;
+       tb_ptr->blocked = 0; 
+       tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
+       memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
+       return 0;
+}
+
+/**
+ * disable_bearer - detach TIPC bearer from an Ethernet interface 
+ *
+ * We really should do dev_remove_pack() here, but this function can not be
+ * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
+ * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ */
+
+static void disable_bearer(struct tipc_bearer *tb_ptr)
+{
+       ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
+}
+
+/**
+ * recv_notification - handle device updates from OS
+ *
+ * Change the state of the Ethernet bearer (if any) associated with the 
+ * specified device.
+ */
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt, 
+                            void *dv)
+{
+       struct net_device *dev = (struct net_device *)dv;
+       struct eth_bearer *eb_ptr = &eth_bearers[0];
+       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+
+       while ((eb_ptr->dev != dev)) {
+               if (++eb_ptr == stop)
+                       return NOTIFY_DONE;     /* couldn't find device */
+       }
+       if (!eb_ptr->bearer)
+               return NOTIFY_DONE;             /* bearer had been disabled */
+
+        eb_ptr->bearer->mtu = dev->mtu;
+
+       switch (evt) {
+       case NETDEV_CHANGE:
+               if (netif_carrier_ok(dev))
+                       tipc_continue(eb_ptr->bearer);
+               else
+                       tipc_block_bearer(eb_ptr->bearer->name);
+               break;
+       case NETDEV_UP:
+               tipc_continue(eb_ptr->bearer);
+               break;
+       case NETDEV_DOWN:
+               tipc_block_bearer(eb_ptr->bearer->name);
+               break;
+       case NETDEV_CHANGEMTU:
+        case NETDEV_CHANGEADDR:
+               tipc_block_bearer(eb_ptr->bearer->name);
+                tipc_continue(eb_ptr->bearer);
+               break;
+       case NETDEV_UNREGISTER:
+        case NETDEV_CHANGENAME:
+               tipc_disable_bearer(eb_ptr->bearer->name);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+/**
+ * eth_addr2str - convert Ethernet address to string
+ */
+
+static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+{                       
+       unchar *addr = (unchar *)&a->dev_addr;
+
+       if (str_size < 18)
+               *str_buf = '\0';
+       else
+               sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+                       addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+       return str_buf;
+}
+
+/**
+ * eth_media_start - activate Ethernet bearer support
+ *
+ * Register Ethernet media type with TIPC bearer code.  Also register
+ * with OS for notifications about device state changes.
+ */
+
+int eth_media_start(void)
+{                       
+       struct tipc_media_addr bcast_addr;
+       int res;
+
+       if (eth_started)
+               return -EINVAL;
+
+       memset(&bcast_addr, 0xff, sizeof(bcast_addr));
+       memset(eth_bearers, 0, sizeof(eth_bearers));
+
+       res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
+                                 enable_bearer, disable_bearer, send_msg, 
+                                 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, 
+                                 ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN);
+       if (res)
+               return res;
+
+       notifier.notifier_call = &recv_notification;
+       notifier.priority = 0;
+       res = register_netdevice_notifier(&notifier);
+       if (!res)
+               eth_started = 1;
+       return res;
+}
+
+/**
+ * eth_media_stop - deactivate Ethernet bearer support
+ */
+
+void eth_media_stop(void)
+{
+       int i;
+
+       if (!eth_started)
+               return;
+
+       unregister_netdevice_notifier(&notifier);
+       for (i = 0; i < MAX_ETH_BEARERS ; i++) {
+               if (eth_bearers[i].bearer) {
+                       eth_bearers[i].bearer->blocked = 1;
+                       eth_bearers[i].bearer = 0;
+               }
+               if (eth_bearers[i].dev) {
+                       dev_remove_pack(&eth_bearers[i].tipc_packet_type);
+                       dev_put(eth_bearers[i].dev);
+               }
+       }
+       memset(&eth_bearers, 0, sizeof(eth_bearers));
+       eth_started = 0;
+}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
new file mode 100644 (file)
index 0000000..c8fbb20
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * net/tipc/handler.c: TIPC signal handling
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+
+struct queue_item {
+       struct list_head next_signal;
+       void (*handler) (unsigned long);
+       unsigned long data;
+};
+
+static kmem_cache_t *tipc_queue_item_cache;
+static struct list_head signal_queue_head;
+static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
+static int handler_enabled = 0;
+
+static void process_signal_queue(unsigned long dummy);
+
+static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
+
+
+unsigned int k_signal(Handler routine, unsigned long argument)
+{
+       struct queue_item *item;
+
+       if (!handler_enabled) {
+               err("Signal request ignored by handler\n");
+               return -ENOPROTOOPT;
+       }
+
+       spin_lock_bh(&qitem_lock);
+       item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
+       if (!item) {
+               err("Signal queue out of memory\n");
+               spin_unlock_bh(&qitem_lock);
+               return -ENOMEM;
+       }
+       item->handler = routine;
+       item->data = argument;
+       list_add_tail(&item->next_signal, &signal_queue_head);
+       spin_unlock_bh(&qitem_lock);
+       tasklet_schedule(&tipc_tasklet);
+       return 0;
+}
+
+static void process_signal_queue(unsigned long dummy)
+{
+       struct queue_item *__volatile__ item;
+       struct list_head *l, *n;
+
+       spin_lock_bh(&qitem_lock);
+       list_for_each_safe(l, n, &signal_queue_head) {
+               item = list_entry(l, struct queue_item, next_signal);
+               list_del(&item->next_signal);
+               spin_unlock_bh(&qitem_lock);
+               item->handler(item->data);
+               spin_lock_bh(&qitem_lock);
+               kmem_cache_free(tipc_queue_item_cache, item);
+       }
+       spin_unlock_bh(&qitem_lock);
+}
+
+int handler_start(void)
+{
+       tipc_queue_item_cache = 
+               kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
+                                 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+       if (!tipc_queue_item_cache)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&signal_queue_head);
+       tasklet_enable(&tipc_tasklet);
+       handler_enabled = 1;
+       return 0;
+}
+
+void handler_stop(void)
+{
+       struct list_head *l, *n;
+       struct queue_item *item; 
+
+       if (!handler_enabled)
+               return;
+
+       handler_enabled = 0;
+       tasklet_disable(&tipc_tasklet);
+       tasklet_kill(&tipc_tasklet);
+
+       spin_lock_bh(&qitem_lock);
+       list_for_each_safe(l, n, &signal_queue_head) {
+               item = list_entry(l, struct queue_item, next_signal);
+               list_del(&item->next_signal);
+               kmem_cache_free(tipc_queue_item_cache, item);
+       }
+       spin_unlock_bh(&qitem_lock);
+
+       kmem_cache_destroy(tipc_queue_item_cache);
+}
+
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644 (file)
index 0000000..92acb80
--- /dev/null
@@ -0,0 +1,3164 @@
+/*
+ * net/tipc/link.c: TIPC link code
+ * 
+ * Copyright (c) 2003-2005, Ericsson Research Canada
+ * Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2005-2006, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this 
+ * list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice, 
+ * this list of conditions and the following disclaimer in the documentation 
+ * and/or other materials provided with the distribution.
+ * Neither the names of the copyright holders nor the names of its 
+ * contributors may be used to endorse or promote products derived from this 
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "discover.h"
+#include "config.h"
+#include "bcast.h"
+
+
+/* 
+ * Limit for deferred reception queue: 
+ */
+
+#define DEF_QUEUE_LIMIT 256u
+
+/* 
+ * Link state events: 
+ */
+
+#define  STARTING_EVT    856384768     /* link processing trigger */
+#define  TRAFFIC_MSG_EVT 560815u       /* rx'd ??? */
+#define  TIMEOUT_EVT     560817u       /* link timer expired */
+
+/*   
+ * The following two 'message types' is really just implementation 
+ * data conveniently stored in the message header. 
+ * They must not be considered part of the protocol
+ */
+#define OPEN_MSG   0
+#define CLOSED_MSG 1
+
+/* 
+ * State value stored in 'exp_msg_count'
+ */
+
+#define START_CHANGEOVER 100000u
+
+/**
+ * struct link_name - deconstructed link name
+ * @addr_local: network address of node at this end
+ * @if_local: name of interface at this end
+ * @addr_peer: network address of node at far end
+ * @if_peer: name of interface at far end
+ */
+
+struct link_name {
+       u32 addr_local;
+       char if_local[TIPC_MAX_IF_NAME];
+       u32 addr_peer;
+       char if_peer[TIPC_MAX_IF_NAME];
+};
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+/** 
+ * struct link_event - link up/down event notification
+ */
+
+struct link_event {
+       u32 addr;
+       int up;
+       void (*fcn)(u32, char *, int);
+       char name[TIPC_MAX_LINK_NAME];
+};
+
+#endif
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr,
+                                      struct sk_buff *buf);
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
+static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static int  link_send_sections_long(struct port *sender,
+                                   struct iovec const *msg_sect,
+                                   u32 num_sect, u32 destnode);
+static void link_check_defragm_bufs(struct link *l_ptr);
+static void link_state_event(struct link *l_ptr, u32 event);
+static void link_reset_statistics(struct link *l_ptr);
+static void link_print(struct link *l_ptr, struct print_buf *buf, 
+                      const char *str);
+
+/*
+ * Debugging code used by link routines only
+ *
+ * When debugging link problems on a system that has multiple links,
+ * the standard TIPC debugging routines may not be useful since they
+ * allow the output from multiple links to be intermixed.  For this reason
+ * routines of the form "dbg_link_XXX()" have been created that will capture
+ * debug info into a link's personal print buffer, which can then be dumped
+ * into the TIPC system log (LOG) upon request.
+ *
+ * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
+ * of the print buffer used by each link.  If LINK_LOG_BUF_SIZE is set to 0,
+ * the dbg_link_XXX() routines simply send their output to the standard 
+ * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
+ * when there is only a single link in the system being debugged.
+ *
+ * Notes:
+ * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
+ * - "l_ptr" must be valid when using dbg_link_XXX() macros  
+ */
+
+#define LINK_LOG_BUF_SIZE 0
+
+#define dbg_link(fmt, arg...)  do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
+#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
+#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
+#define dbg_link_dump() do { \
+       if (LINK_LOG_BUF_SIZE) { \
+               tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
+               printbuf_move(LOG, &l_ptr->print_buf); \
+       } \
+} while (0)
+
+static inline void dbg_print_link(struct link *l_ptr, const char *str)
+{
+       if (DBG_OUTPUT)
+               link_print(l_ptr, DBG_OUTPUT, str);
+}
+
+static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
+{
+       if (DBG_OUTPUT) {
+               struct sk_buff *buf = root_buf;
+
+               while (buf) {
+                       msg_dbg(buf_msg(buf), "In chain: ");
+                       buf = buf->next;
+               }
+       }
+}
+
+/*
+ *  Simple inlined link routines
+ */
+
+static inline unsigned int align(unsigned int i)
+{
+       return (i + 3) & ~3u;
+}
+
+static inline int link_working_working(struct link *l_ptr)
+{
+       return (l_ptr->state == WORKING_WORKING);
+}
+
+static inline int link_working_unknown(struct link *l_ptr)
+{
+       return (l_ptr->state == WORKING_UNKNOWN);
+}
+
+static inline int link_reset_unknown(struct link *l_ptr)
+{
+       return (l_ptr->state == RESET_UNKNOWN);
+}
+
+static inline int link_reset_reset(struct link *l_ptr)
+{
+       return (l_ptr->state == RESET_RESET);
+}
+
+static inline int link_blocked(struct link *l_ptr)
+{
+       return (l_ptr->exp_msg_count || l_ptr->blocked);
+}
+
+static inline int link_congested(struct link *l_ptr)
+{
+       return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+}
+
+static inline u32 link_max_pkt(struct link *l_ptr)
+{
+       return l_ptr->max_pkt;
+}
+
+static inline void link_init_max_pkt(struct link *l_ptr)
+{
+       u32 max_pkt;
+       
+       max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+       if (max_pkt > MAX_MSG_SIZE)
+               max_pkt = MAX_MSG_SIZE;
+
+        l_ptr->max_pkt_target = max_pkt;
+       if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
+               l_ptr->max_pkt = l_ptr->max_pkt_target;
+       else 
+               l_ptr->max_pkt = MAX_PKT_DEFAULT;
+
+        l_ptr->max_pkt_probes = 0;
+}
+
+static inline u32 link_next_sent(struct link *l_ptr)
+{
+       if (l_ptr->next_out)
+               return msg_seqno(buf_msg(l_ptr->next_out));
+       return mod(l_ptr->next_out_no);
+}
+
+static inline u32 link_last_sent(struct link *l_ptr)
+{
+       return mod(link_next_sent(l_ptr) - 1);
+}
+
+/*
+ *  Simple non-inlined link routines (i.e. referenced outside this file)
+ */
+
+int link_is_up(struct link *l_ptr)
+{
+       if (!l_ptr)
+               return 0;
+       return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
+}
+
+int link_is_active(struct link *l_ptr)
+{
+       return ((l_ptr->owner->active_links[0] == l_ptr) ||
+               (l_ptr->owner->active_links[1] == l_ptr));
+}
+
+/**
+ * link_name_validate - validate & (optionally) deconstruct link name
+ * @name - ptr to link name string
+ * @name_parts - ptr to area for link name components (or NULL if not needed)
+ * 
+ * Returns 1 if link name is valid, otherwise 0.
+ */
+
+static int link_name_validate(const char *name, struct link_name *name_parts)
+{
+       char name_copy[TIPC_MAX_LINK_NAME];
+       char *addr_local;
+       char *if_local;
+       char *addr_peer;
+       char *if_peer;
+       char dummy;
+       u32 z_local, c_local, n_local;
+       u32 z_peer, c_peer, n_peer;
+       u32 if_local_len;
+       u32 if_peer_len;
+
+       /* copy link name & ensure length is OK */
+
+       name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
+       /* need above in case non-Posix strncpy() doesn't pad with nulls */
+       strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
+       if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
+               return 0;
+
+       /* ensure all component parts of link name are present */
+
+       addr_local = name_copy;
+       if ((if_local = strchr(addr_local, ':')) == NULL)
+               return 0;
+       *(if_local++) = 0;
+       if ((addr_peer = strchr(if_local, '-')) == NULL)
+               return 0;
+       *(addr_peer++) = 0;
+       if_local_len = addr_peer - if_local;
+       if ((if_peer = strchr(addr_peer, ':')) == NULL)
+               return 0;
+       *(if_peer++) = 0;
+       if_peer_len = strlen(if_peer) + 1;
+
+       /* validate component parts of link name */
+
+       if ((sscanf(addr_local, "%u.%u.%u%c",
+                   &z_local, &c_local, &n_local, &dummy) != 3) ||
+           (sscanf(addr_peer, "%u.%u.%u%c",
+                   &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
+           (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
+           (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
+           (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 
+           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) || 
+           (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
+           (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+               return 0;
+
+       /* return link name components, if necessary */
+
+       if (name_parts) {
+               name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
+               strcpy(name_parts->if_local, if_local);
+               name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
+               strcpy(name_parts->if_peer, if_peer);
+       }
+       return 1;
+}
+
+/**
+ * link_timeout - handle expiration of link timer
+ * @l_ptr: pointer to link
+ * 
+ * This routine must not grab "net_lock" to avoid a potential deadlock conflict
+ * with link_delete().  (There is no risk that the node will be deleted by
+ * another thread because link_delete() always cancels the link timer before
+ * node_delete() is called.)
+ */
+
+static void link_timeout(struct link *l_ptr)
+{
+       node_lock(l_ptr->owner);
+
+       /* update counters used in statistical profiling of send traffic */
+
+       l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
+       l_ptr->stats.queue_sz_counts++;
+
+       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
+
+       if (l_ptr->first_out) {
+               struct tipc_msg *msg = buf_msg(l_ptr->first_out);
+               u32 length = msg_size(msg);
+
+               if ((msg_user(msg) == MSG_FRAGMENTER)
+                   && (msg_type(msg) == FIRST_FRAGMENT)) {
+                       length = msg_size(msg_get_wrapped(msg));
+               }
+               if (length) {
+                       l_ptr->stats.msg_lengths_total += length;
+                       l_ptr->stats.msg_length_counts++;
+                       if (length <= 64)
+                               l_ptr->stats.msg_length_profile[0]++;
+                       else if (length <= 256)
+                               l_ptr->stats.msg_length_profile[1]++;
+                       else if (length <= 1024)
+                               l_ptr->stats.msg_length_profile[2]++;
+                       else if (length <= 4096)
+                               l_ptr->stats.msg_length_profile[3]++;
+                       else if (length <= 16384)
+                               l_ptr->stats.msg_length_profile[4]++;
+                       else if (length <= 32768)
+                               l_ptr->stats.msg_length_profile[5]++;
+                       else
+                               l_ptr->stats.msg_length_profile[6]++;
+               }
+       }
+
+       /* do all other link processing performed on a periodic basis */
+
+       link_check_defragm_bufs(l_ptr);
+
+       link_state_event(l_ptr, TIMEOUT_EVT);
+
+       if (l_ptr->next_out)
+               link_push_queue(l_ptr);
+
+       node_unlock(l_ptr->owner);
+}
+
+static inline void link_set_timer(struct link *l_ptr, u32 time)
+{
+       k_start_timer(&l_ptr->timer, time);
+}
+
+/**
+ * link_create - create a new link
+ * @b_ptr: pointer to associated bearer
+ * @peer: network address of node at other end of link
+ * @media_addr: media address to use when sending messages over link
+ * 
+ * Returns pointer to link.
+ */
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+                        const struct tipc_media_addr *media_addr)
+{
+       struct link *l_ptr;
+       struct tipc_msg *msg;
+       char *if_name;
+
+       l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
+       if (!l_ptr) {
+               warn("Memory squeeze; Failed to create link\n");
+               return NULL;
+       }
+       memset(l_ptr, 0, sizeof(*l_ptr));
+
+       l_ptr->addr = peer;
+       if_name = strchr(b_ptr->publ.name, ':') + 1;
+       sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+               tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+               tipc_node(tipc_own_addr), 
+               if_name,
+               tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+               /* note: peer i/f is appended to link name by reset/activate */
+       memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
+       k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
+       list_add_tail(&l_ptr->link_list, &b_ptr->links);
+       l_ptr->checkpoint = 1;
+       l_ptr->b_ptr = b_ptr;
+       link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+       l_ptr->state = RESET_UNKNOWN;
+
+       l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
+       msg = l_ptr->pmsg;
+       msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+       msg_set_size(msg, sizeof(l_ptr->proto_msg));
+       msg_set_session(msg, tipc_random);
+       msg_set_bearer_id(msg, b_ptr->identity);
+       strcpy((char *)msg_data(msg), if_name);
+
+       l_ptr->priority = b_ptr->priority;
+       link_set_queue_limits(l_ptr, b_ptr->media->window);
+
+       link_init_max_pkt(l_ptr);
+
+       l_ptr->next_out_no = 1;
+       INIT_LIST_HEAD(&l_ptr->waiting_ports);
+
+       link_reset_statistics(l_ptr);
+
+       l_ptr->owner = node_attach_link(l_ptr);
+       if (!l_ptr->owner) {
+               kfree(l_ptr);
+               return NULL;
+       }
+
+       if (LINK_LOG_BUF_SIZE) {
+               char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+               if (!pb) {
+                       kfree(l_ptr);
+                       warn("Memory squeeze; Failed to create link\n");
+                       return NULL;
+               }
+               printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+       }
+
+       k_signal((Handler)link_start, (unsigned long)l_ptr);
+
+       dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
+           l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
+       
+       return l_ptr;
+}
+
+/** 
+ * link_delete - delete a link
+ * @l_ptr: pointer to link
+ * 
+ * Note: 'net_lock' is write_locked, bearer is locked.
+ * This routine must not grab the node lock until after link timer cancellation
+ * to avoid a potential deadlock situation.  
+ */
+
+void link_delete(struct link *l_ptr)
+{
+       if (!l_ptr) {
+               err("Attempt to delete non-existent link\n");
+               return;
+       }
+
+       dbg("link_delete()\n");
+
+       k_cancel_timer(&l_ptr->timer);
+       
+       node_lock(l_ptr->owner);
+       link_reset(l_ptr);
+       node_detach_link(l_ptr->owner, l_ptr);
+       link_stop(l_ptr);
+       list_del_init(&l_ptr->link_list);
+       if (LINK_LOG_BUF_SIZE)
+               kfree(l_ptr->print_buf.buf);
+       node_unlock(l_ptr->owner);
+       k_term_timer(&l_ptr->timer);
+       kfree(l_ptr);
+}
+
+void link_start(struct link *l_ptr)
+{
+       dbg("link_start %x\n", l_ptr);
+       link_state_event(l_ptr, STARTING_EVT);
+}
+
+/**
+ * link_schedule_port - schedule port for deferred sending 
+ * @l_ptr: pointer to link
+ * @origport: reference to sending port
+ * @sz: amount of data to be sent
+ * 
+ * Schedules port for renewed sending of messages after link congestion 
+ * has abated.
+ */
+
+static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+{
+       struct port *p_ptr;
+
+       spin_lock_bh(&port_list_lock);
+       p_ptr = port_lock(origport);
+       if (p_ptr) {
+               if (!p_ptr->wakeup)
+                       goto exit;
+               if (!list_empty(&p_ptr->wait_list))
+                       goto exit;
+               p_ptr->congested_link = l_ptr;
+               p_ptr->publ.congested = 1;
+               p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
+               list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
+               l_ptr->stats.link_congs++;
+exit:
+               port_unlock(p_ptr);
+       }
+       spin_unlock_bh(&port_list_lock);
+       return -ELINKCONG;
+}
+
+void link_wakeup_ports(struct link *l_ptr, int all)
+{
+       struct port *p_ptr;
+       struct port *temp_p_ptr;
+       int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
+
+       if (all)
+               win = 100000;
+       if (win <= 0)
+               return;
+       if (!spin_trylock_bh(&port_list_lock))
+               return;
+       if (link_congested(l_ptr))
+               goto exit;
+       list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 
+                                wait_list) {
+               if (win <= 0)
+                       break;
+               list_del_init(&p_ptr->wait_list);
+               p_ptr->congested_link = 0;
+               assert(p_ptr->wakeup);
+               spin_lock_bh(p_ptr->publ.lock);
+               p_ptr->publ.congested = 0;
+               p_ptr->wakeup(&p_ptr->publ);
+               win -= p_ptr->waiting_pkts;
+               spin_unlock_bh(p_ptr->publ.lock);
+       }
+
+exit:
+       spin_unlock_bh(&port_list_lock);
+}
+
+/** 
+ * link_release_outqueue - purge link's outbound message queue
+ * @l_ptr: pointer to link
+ */
+
+static void link_release_outqueue(struct link *l_ptr)
+{
+       struct sk_buff *buf = l_ptr->first_out;
+       struct sk_buff *next;
+
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+       l_ptr->first_out = NULL;
+       l_ptr->out_queue_size = 0;
+}
+
+/**
+ * link_reset_fragments - purge link's inbound message fragments queue
+ * @l_ptr: pointer to link
+ */
+
+void link_reset_fragments(struct link *l_ptr)
+{
+       struct sk_buff *buf = l_ptr->defragm_buf;
+       struct sk_buff *next;
+
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+       l_ptr->defragm_buf = NULL;
+}
+
+/** 
+ * link_stop - purge all inbound and outbound messages associated with link
+ * @l_ptr: pointer to link
+ */
+
+void link_stop(struct link *l_ptr)
+{
+       struct sk_buff *buf;
+       struct sk_buff *next;
+
+       buf = l_ptr->oldest_deferred_in;
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+
+       buf = l_ptr->first_out;
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+
+       link_reset_fragments(l_ptr);
+
+       buf_discard(l_ptr->proto_msg_queue);
+       l_ptr->proto_msg_queue = NULL;
+}
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+static void link_recv_event(struct link_event *ev)
+{
+       ev->fcn(ev->addr, ev->name, ev->up);
+       kfree(ev);
+}
+
+static void link_send_event(void (*fcn)(u32 a, char *n, int up),
+                           struct link *l_ptr, int up)
+{
+       struct link_event *ev;
+       
+       ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
+       if (!ev) {
+               warn("Link event allocation failure\n");
+               return;
+       }
+       ev->addr = l_ptr->addr;
+       ev->up = up;
+       ev->fcn = fcn;
+       memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
+       k_signal((Handler)link_recv_event, (unsigned long)ev);
+}
+
+#else
+
+#define link_send_event(fcn, l_ptr, up) do { } while (0)
+
+#endif
+
+void link_reset(struct link *l_ptr)
+{
+       struct sk_buff *buf;
+       u32 prev_state = l_ptr->state;
+       u32 checkpoint = l_ptr->next_in_no;
+       
+       msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
+
+        /* Link is down, accept any session: */
+       l_ptr->peer_session = 0;
+
+        /* Prepare for max packet size negotiation */
+       link_init_max_pkt(l_ptr);
+       
+       l_ptr->state = RESET_UNKNOWN;
+       dbg_link_state("Resetting Link\n");
+
+       if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
+               return;
+
+       node_link_down(l_ptr->owner, l_ptr);
+       bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+#if 0
+       tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
+       dbg_link_dump();
+#endif
+       if (node_has_active_links(l_ptr->owner) &&
+           l_ptr->owner->permit_changeover) {
+               l_ptr->reset_checkpoint = checkpoint;
+               l_ptr->exp_msg_count = START_CHANGEOVER;
+       }
+
+       /* Clean up all queues: */
+
+       link_release_outqueue(l_ptr);
+       buf_discard(l_ptr->proto_msg_queue);
+       l_ptr->proto_msg_queue = NULL;
+       buf = l_ptr->oldest_deferred_in;
+       while (buf) {
+               struct sk_buff *next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+       if (!list_empty(&l_ptr->waiting_ports))
+               link_wakeup_ports(l_ptr, 1);
+
+       l_ptr->retransm_queue_head = 0;
+       l_ptr->retransm_queue_size = 0;
+       l_ptr->last_out = NULL;
+       l_ptr->first_out = NULL;
+       l_ptr->next_out = NULL;
+       l_ptr->unacked_window = 0;
+       l_ptr->checkpoint = 1;
+       l_ptr->next_out_no = 1;
+       l_ptr->deferred_inqueue_sz = 0;
+       l_ptr->oldest_deferred_in = NULL;
+       l_ptr->newest_deferred_in = NULL;
+       l_ptr->fsm_msg_cnt = 0;
+       l_ptr->stale_count = 0;
+       link_reset_statistics(l_ptr);
+
+       link_send_event(cfg_link_event, l_ptr, 0);
+       if (!in_own_cluster(l_ptr->addr))
+               link_send_event(disc_link_event, l_ptr, 0);
+}
+
+
+static void link_activate(struct link *l_ptr)
+{
+       l_ptr->next_in_no = 1;
+       node_link_up(l_ptr->owner, l_ptr);
+       bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
+       link_send_event(cfg_link_event, l_ptr, 1);
+       if (!in_own_cluster(l_ptr->addr))
+               link_send_event(disc_link_event, l_ptr, 1);
+}
+
+/**
+ * link_state_event - link finite state machine
+ * @l_ptr: pointer to link
+ * @event: state machine event to process
+ */
+
+static void link_state_event(struct link *l_ptr, unsigned event)
+{
+       struct link *other; 
+       u32 cont_intv = l_ptr->continuity_interval;
+
+       if (!l_ptr->started && (event != STARTING_EVT))
+               return;         /* Not yet. */
+
+       if (link_blocked(l_ptr)) {
+               if (event == TIMEOUT_EVT) {
+                       link_set_timer(l_ptr, cont_intv);
+               }
+               return;   /* Changeover going on */
+       }
+       dbg_link("STATE_EV: <%s> ", l_ptr->name);
+
+       switch (l_ptr->state) {
+       case WORKING_WORKING:
+               dbg_link("WW/");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-");
+                       /* fall through */
+               case ACTIVATE_MSG:
+                       dbg_link("ACT\n");
+                       break;
+               case TIMEOUT_EVT:
+                       dbg_link("TIM ");
+                       if (l_ptr->next_in_no != l_ptr->checkpoint) {
+                               l_ptr->checkpoint = l_ptr->next_in_no;
+                               if (bclink_acks_missing(l_ptr->owner)) {
+                                       link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                           0, 0, 0, 0, 0);
+                                       l_ptr->fsm_msg_cnt++;
+                               } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
+                                       link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                           1, 0, 0, 0, 0);
+                                       l_ptr->fsm_msg_cnt++;
+                               }
+                               link_set_timer(l_ptr, cont_intv);
+                               break;
+                       }
+                       dbg_link(" -> WU\n");
+                       l_ptr->state = WORKING_UNKNOWN;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv / 4);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES -> RR\n");
+                       link_reset(l_ptr);
+                       l_ptr->state = RESET_RESET;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               default:
+                       err("Unknown link event %u in WW state\n", event);
+               }
+               break;
+       case WORKING_UNKNOWN:
+               dbg_link("WU/");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-");
+               case ACTIVATE_MSG:
+                       dbg_link("ACT -> WW\n");
+                       l_ptr->state = WORKING_WORKING;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES -> RR\n");
+                       link_reset(l_ptr);
+                       l_ptr->state = RESET_RESET;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case TIMEOUT_EVT:
+                       dbg_link("TIM ");
+                       if (l_ptr->next_in_no != l_ptr->checkpoint) {
+                               dbg_link("-> WW \n");
+                               l_ptr->state = WORKING_WORKING;
+                               l_ptr->fsm_msg_cnt = 0;
+                               l_ptr->checkpoint = l_ptr->next_in_no;
+                               if (bclink_acks_missing(l_ptr->owner)) {
+                                       link_send_proto_msg(l_ptr, STATE_MSG,
+                                                           0, 0, 0, 0, 0);
+                                       l_ptr->fsm_msg_cnt++;
+                               }
+                               link_set_timer(l_ptr, cont_intv);
+                       } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+                               dbg_link("Probing %u/%u,timer = %u ms)\n",
+                                        l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
+                                        cont_intv / 4);
+                               link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                   1, 0, 0, 0, 0);
+                               l_ptr->fsm_msg_cnt++;
+                               link_set_timer(l_ptr, cont_intv / 4);
+                       } else {        /* Link has failed */
+                               dbg_link("-> RU (%u probes unanswered)\n",
+                                        l_ptr->fsm_msg_cnt);
+                               link_reset(l_ptr);
+                               l_ptr->state = RESET_UNKNOWN;
+                               l_ptr->fsm_msg_cnt = 0;
+                               link_send_proto_msg(l_ptr, RESET_MSG,
+                                                   0, 0, 0, 0, 0);
+                               l_ptr->fsm_msg_cnt++;
+                               link_set_timer(l_ptr, cont_intv);
+                       }
+                       break;
+               default:
+                       err("Unknown link event %u in WU state\n", event);
+               }
+               break;
+       case RESET_UNKNOWN:
+               dbg_link("RU/");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-\n");
+                       break;
+               case ACTIVATE_MSG:
+                       other = l_ptr->owner->active_links[0];
+                       if (other && link_working_unknown(other)) {
+                               dbg_link("ACT\n");
+                               break;
+                       }
+                       dbg_link("ACT -> WW\n");
+                       l_ptr->state = WORKING_WORKING;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_activate(l_ptr);
+                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES \n");
+                       dbg_link(" -> RR\n");
+                       l_ptr->state = RESET_RESET;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case STARTING_EVT:
+                       dbg_link("START-");
+                       l_ptr->started = 1;
+                       /* fall through */
+               case TIMEOUT_EVT:
+                       dbg_link("TIM \n");
+                       link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               default:
+                       err("Unknown link event %u in RU state\n", event);
+               }
+               break;
+       case RESET_RESET:
+               dbg_link("RR/ ");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-");
+                       /* fall through */
+               case ACTIVATE_MSG:
+                       other = l_ptr->owner->active_links[0];
+                       if (other && link_working_unknown(other)) {
+                               dbg_link("ACT\n");
+                               break;
+                       }
+                       dbg_link("ACT -> WW\n");
+                       l_ptr->state = WORKING_WORKING;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_activate(l_ptr);
+                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES\n");
+                       break;
+               case TIMEOUT_EVT:
+                       dbg_link("TIM\n");
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
+                       break;
+               default:
+                       err("Unknown link event %u in RR state\n", event);
+               }
+               break;
+       default:
+               err("Unknown link state %u/%u\n", l_ptr->state, event);
+       }
+}
+
+/*
+ * link_bundle_buf(): Append contents of a buffer to
+ * the tail of an existing one. 
+ */
+
+static int link_bundle_buf(struct link *l_ptr,
+                          struct sk_buff *bundler, 
+                          struct sk_buff *buf)
+{
+       struct tipc_msg *bundler_msg = buf_msg(bundler);
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 size = msg_size(msg);
+       u32 to_pos = align(msg_size(bundler_msg));
+       u32 rest = link_max_pkt(l_ptr) - to_pos;
+
+       if (msg_user(bundler_msg) != MSG_BUNDLER)
+               return 0;
+       if (msg_type(bundler_msg) != OPEN_MSG)
+               return 0;
+       if (rest < align(size))
+               return 0;
+
+       skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
+       memcpy(bundler->data + to_pos, buf->data, size);
+       msg_set_size(bundler_msg, to_pos + size);
+       msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
+       dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
+           msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
+       msg_dbg(msg, "PACKD:");
+       buf_discard(buf);
+       l_ptr->stats.sent_bundled++;
+       return 1;
+}
+
+static inline void link_add_to_outqueue(struct link *l_ptr, 
+                                       struct sk_buff *buf, 
+                                       struct tipc_msg *msg)
+{
+       u32 ack = mod(l_ptr->next_in_no - 1);
+       u32 seqno = mod(l_ptr->next_out_no++);
+
+       msg_set_word(msg, 2, ((ack << 16) | seqno));
+       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+       buf->next = NULL;
+       if (l_ptr->first_out) {
+               l_ptr->last_out->next = buf;
+               l_ptr->last_out = buf;
+       } else
+               l_ptr->first_out = l_ptr->last_out = buf;
+       l_ptr->out_queue_size++;
+}
+
+/* 
+ * link_send_buf() is the 'full path' for messages, called from 
+ * inside TIPC when the 'fast path' in tipc_send_buf
+ * has failed, and from link_send()
+ */
+
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 size = msg_size(msg);
+       u32 dsz = msg_data_sz(msg);
+       u32 queue_size = l_ptr->out_queue_size;
+       u32 imp = msg_tot_importance(msg);
+       u32 queue_limit = l_ptr->queue_limit[imp];
+       u32 max_packet = link_max_pkt(l_ptr);
+
+       msg_set_prevnode(msg, tipc_own_addr);   /* If routed message */
+
+       /* Match msg importance against queue limits: */
+
+       if (unlikely(queue_size >= queue_limit)) {
+               if (imp <= TIPC_CRITICAL_IMPORTANCE) {
+                       return link_schedule_port(l_ptr, msg_origport(msg),
+                                                 size);
+               }
+               msg_dbg(msg, "TIPC: Congestion, throwing away\n");
+               buf_discard(buf);
+               if (imp > CONN_MANAGER) {
+                       warn("Resetting <%s>, send queue full", l_ptr->name);
+                       link_reset(l_ptr);
+               }
+               return dsz;
+       }
+
+       /* Fragmentation needed ? */
+
+       if (size > max_packet)
+               return link_send_long_buf(l_ptr, buf);
+
+       /* Packet can be queued or sent: */
+
+       if (queue_size > l_ptr->stats.max_queue_sz)
+               l_ptr->stats.max_queue_sz = queue_size;
+
+       if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 
+                  !link_congested(l_ptr))) {
+               link_add_to_outqueue(l_ptr, buf, msg);
+
+               if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
+                       l_ptr->unacked_window = 0;
+               } else {
+                       bearer_schedule(l_ptr->b_ptr, l_ptr);
+                       l_ptr->stats.bearer_congs++;
+                       l_ptr->next_out = buf;
+               }
+               return dsz;
+       }
+       /* Congestion: can message be bundled ?: */
+
+       if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
+           (msg_user(msg) != MSG_FRAGMENTER)) {
+
+               /* Try adding message to an existing bundle */
+
+               if (l_ptr->next_out && 
+                   link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
+                       bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+                       return dsz;
+               }
+
+               /* Try creating a new bundle */
+
+               if (size <= max_packet * 2 / 3) {
+                       struct sk_buff *bundler = buf_acquire(max_packet);
+                       struct tipc_msg bundler_hdr;
+
+                       if (bundler) {
+                               msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
+                                        TIPC_OK, INT_H_SIZE, l_ptr->addr);
+                               memcpy(bundler->data, (unchar *)&bundler_hdr, 
+                                      INT_H_SIZE);
+                               skb_trim(bundler, INT_H_SIZE);
+                               link_bundle_buf(l_ptr, bundler, buf);
+                               buf = bundler;
+                               msg = buf_msg(buf);
+                               l_ptr->stats.sent_bundles++;
+                       }
+               }
+       }
+       if (!l_ptr->next_out)
+               l_ptr->next_out = buf;
+       link_add_to_outqueue(l_ptr, buf, msg);
+       bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+       return dsz;
+}
+
+/* 
+ * link_send(): same as link_send_buf(), but the link to use has 
+ * not been selected yet, and the the owner node is not locked
+ * Called by TIPC internal users, e.g. the name distributor
+ */
+
+int link_send(struct sk_buff *buf, u32 dest, u32 selector)
+{
+       struct link *l_ptr;
+       struct node *n_ptr;
+       int res = -ELINKCONG;
+
+       read_lock_bh(&net_lock);
+       n_ptr = node_select(dest, selector);
+       if (n_ptr) {
+               node_lock(n_ptr);
+               l_ptr = n_ptr->active_links[selector & 1];
+               dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
+               if (l_ptr) {
+                       res = link_send_buf(l_ptr, buf);
+               }
+               node_unlock(n_ptr);
+       } else {
+               dbg("Attempt to send msg to unknown node:\n");
+               msg_dbg(buf_msg(buf),">>>");
+               buf_discard(buf);
+       }
+       read_unlock_bh(&net_lock);
+       return res;
+}
+
+/* 
+ * link_send_buf_fast: Entry for data messages where the 
+ * destination link is known and the header is complete,
+ * inclusive total message length. Very time critical.
+ * Link is locked. Returns user data length.
+ */
+
+static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+                                    u32 *used_max_pkt)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       int res = msg_data_sz(msg);
+
+       if (likely(!link_congested(l_ptr))) {
+               if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
+                       if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
+                               link_add_to_outqueue(l_ptr, buf, msg);
+                               if (likely(bearer_send(l_ptr->b_ptr, buf,
+                                                      &l_ptr->media_addr))) {
+                                       l_ptr->unacked_window = 0;
+                                       msg_dbg(msg,"SENT_FAST:");
+                                       return res;
+                               }
+                               dbg("failed sent fast...\n");
+                               bearer_schedule(l_ptr->b_ptr, l_ptr);
+                               l_ptr->stats.bearer_congs++;
+                               l_ptr->next_out = buf;
+                               return res;
+                       }
+               }
+               else
+                       *used_max_pkt = link_max_pkt(l_ptr);
+       }
+       return link_send_buf(l_ptr, buf);  /* All other cases */
+}
+
+/* 
+ * tipc_send_buf_fast: Entry for data messages where the 
+ * destination node is known and the header is complete,
+ * inclusive total message length.
+ * Returns user data length.
+ */
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
+{
+       struct link *l_ptr;
+       struct node *n_ptr;
+       int res;
+       u32 selector = msg_origport(buf_msg(buf)) & 1;
+       u32 dummy;
+
+       if (destnode == tipc_own_addr)
+               return port_recv_msg(buf);
+
+       read_lock_bh(&net_lock);
+       n_ptr = node_select(destnode, selector);
+       if (likely(n_ptr)) {
+               node_lock(n_ptr);
+               l_ptr = n_ptr->active_links[selector];
+               dbg("send_fast: buf %x selected %x, destnode = %x\n",
+                   buf, l_ptr, destnode);
+               if (likely(l_ptr)) {
+                       res = link_send_buf_fast(l_ptr, buf, &dummy);
+                       node_unlock(n_ptr);
+                       read_unlock_bh(&net_lock);
+                       return res;
+               }
+               node_unlock(n_ptr);
+       }
+       read_unlock_bh(&net_lock);
+       res = msg_data_sz(buf_msg(buf));
+       tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+       return res;
+}
+
+
+/* 
+ * link_send_sections_fast: Entry for messages where the 
+ * destination processor is known and the header is complete,
+ * except for total message length. 
+ * Returns user data length or errno.
+ */
+int link_send_sections_fast(struct port *sender, 
+                           struct iovec const *msg_sect,
+                           const u32 num_sect, 
+                           u32 destaddr)
+{
+       struct tipc_msg *hdr = &sender->publ.phdr;
+       struct link *l_ptr;
+       struct sk_buff *buf;
+       struct node *node;
+       int res;
+       u32 selector = msg_origport(hdr) & 1;
+
+       assert(destaddr != tipc_own_addr);
+
+again:
+       /*
+        * Try building message using port's max_pkt hint.
+        * (Must not hold any locks while building message.)
+        */
+
+       res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
+                       !sender->user_port, &buf);
+
+       read_lock_bh(&net_lock);
+       node = node_select(destaddr, selector);
+       if (likely(node)) {
+               node_lock(node);
+               l_ptr = node->active_links[selector];
+               if (likely(l_ptr)) {
+                       if (likely(buf)) {
+                               res = link_send_buf_fast(l_ptr, buf,
+                                                        &sender->max_pkt);
+                               if (unlikely(res < 0))
+                                       buf_discard(buf);
+exit:
+                               node_unlock(node);
+                               read_unlock_bh(&net_lock);
+                               return res;
+                       }
+
+                       /* Exit if build request was invalid */
+
+                       if (unlikely(res < 0))
+                               goto exit;
+
+                       /* Exit if link (or bearer) is congested */
+
+                       if (link_congested(l_ptr) || 
+                           !list_empty(&l_ptr->b_ptr->cong_links)) {
+                               res = link_schedule_port(l_ptr,
+                                                        sender->publ.ref, res);
+                               goto exit;
+                       }
+
+                       /* 
+                        * Message size exceeds max_pkt hint; update hint,
+                        * then re-try fast path or fragment the message
+                        */
+
+                       sender->max_pkt = link_max_pkt(l_ptr);
+                       node_unlock(node);
+                       read_unlock_bh(&net_lock);
+
+
+                       if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
+                               goto again;
+
+                       return link_send_sections_long(sender, msg_sect,
+                                                      num_sect, destaddr);
+               }
+               node_unlock(node);
+       }
+       read_unlock_bh(&net_lock);
+
+       /* Couldn't find a link to the destination node */
+
+       if (buf)
+               return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+       if (res >= 0)
+               return port_reject_sections(sender, hdr, msg_sect, num_sect,
+                                           TIPC_ERR_NO_NODE);
+       return res;
+}
+
+/* 
+ * link_send_sections_long(): Entry for long messages where the 
+ * destination node is known and the header is complete,
+ * inclusive total message length. 
+ * Link and bearer congestion status have been checked to be ok,
+ * and are ignored if they change.
+ *
+ * Note that fragments do not use the full link MTU so that they won't have
+ * to undergo refragmentation if link changeover causes them to be sent
+ * over another link with an additional tunnel header added as prefix.
+ * (Refragmentation will still occur if the other link has a smaller MTU.)
+ *
+ * Returns user data length or errno.
+ */
+static int link_send_sections_long(struct port *sender,
+                                  struct iovec const *msg_sect,
+                                  u32 num_sect,
+                                  u32 destaddr)
+{
+       struct link *l_ptr;
+       struct node *node;
+       struct tipc_msg *hdr = &sender->publ.phdr;
+       u32 dsz = msg_data_sz(hdr);
+       u32 max_pkt,fragm_sz,rest;
+       struct tipc_msg fragm_hdr;
+       struct sk_buff *buf,*buf_chain,*prev;
+       u32 fragm_crs,fragm_rest,hsz,sect_rest;
+       const unchar *sect_crs;
+       int curr_sect;
+       u32 fragm_no;
+
+again:
+       fragm_no = 1;
+       max_pkt = sender->max_pkt - INT_H_SIZE;  
+               /* leave room for tunnel header in case of link changeover */
+       fragm_sz = max_pkt - INT_H_SIZE; 
+               /* leave room for fragmentation header in each fragment */
+       rest = dsz;
+       fragm_crs = 0;
+       fragm_rest = 0;
+       sect_rest = 0;
+       sect_crs = 0;
+       curr_sect = -1;
+
+       /* Prepare reusable fragment header: */
+
+       msg_dbg(hdr, ">FRAGMENTING>");
+       msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+                TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
+       msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+       msg_set_size(&fragm_hdr, max_pkt);
+       msg_set_fragm_no(&fragm_hdr, 1);
+
+       /* Prepare header of first fragment: */
+
+       buf_chain = buf = buf_acquire(max_pkt);
+       if (!buf)
+               return -ENOMEM;
+       buf->next = NULL;
+       memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+       hsz = msg_hdr_sz(hdr);
+       memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
+       msg_dbg(buf_msg(buf), ">BUILD>");
+
+       /* Chop up message: */
+
+       fragm_crs = INT_H_SIZE + hsz;
+       fragm_rest = fragm_sz - hsz;
+
+       do {            /* For all sections */
+               u32 sz;
+
+               if (!sect_rest) {
+                       sect_rest = msg_sect[++curr_sect].iov_len;
+                       sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+               }
+
+               if (sect_rest < fragm_rest)
+                       sz = sect_rest;
+               else
+                       sz = fragm_rest;
+
+               if (likely(!sender->user_port)) {
+                       if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+error:
+                               for (; buf_chain; buf_chain = buf) {
+                                       buf = buf_chain->next;
+                                       buf_discard(buf_chain);
+                               }
+                               return -EFAULT;
+                       }
+               } else
+                       memcpy(buf->data + fragm_crs, sect_crs, sz);
+
+               sect_crs += sz;
+               sect_rest -= sz;
+               fragm_crs += sz;
+               fragm_rest -= sz;
+               rest -= sz;
+
+               if (!fragm_rest && rest) {
+
+                       /* Initiate new fragment: */
+                       if (rest <= fragm_sz) {
+                               fragm_sz = rest;
+                               msg_set_type(&fragm_hdr,LAST_FRAGMENT);
+                       } else {
+                               msg_set_type(&fragm_hdr, FRAGMENT);
+                       }
+                       msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+                       msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+                       prev = buf;
+                       buf = buf_acquire(fragm_sz + INT_H_SIZE);
+                       if (!buf)
+                               goto error;
+
+                       buf->next = NULL;                                
+                       prev->next = buf;
+                       memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+                       fragm_crs = INT_H_SIZE;
+                       fragm_rest = fragm_sz;
+                       msg_dbg(buf_msg(buf),"  >BUILD>");
+               }
+       }
+       while (rest > 0);
+
+       /* 
+        * Now we have a buffer chain. Select a link and check
+        * that packet size is still OK
+        */
+       node = node_select(destaddr, sender->publ.ref & 1);
+       if (likely(node)) {
+               node_lock(node);
+               l_ptr = node->active_links[sender->publ.ref & 1];
+               if (!l_ptr) {
+                       node_unlock(node);
+                       goto reject;
+               }
+               if (link_max_pkt(l_ptr) < max_pkt) {
+                       sender->max_pkt = link_max_pkt(l_ptr);
+                       node_unlock(node);
+                       for (; buf_chain; buf_chain = buf) {
+                               buf = buf_chain->next;
+                               buf_discard(buf_chain);
+                       }
+                       goto again;
+               }
+       } else {
+reject:
+               for (; buf_chain; buf_chain = buf) {
+                       buf = buf_chain->next;
+                       buf_discard(buf_chain);
+               }
+               return port_reject_sections(sender, hdr, msg_sect, num_sect,
+                                           TIPC_ERR_NO_NODE);
+       }
+
+       /* Append whole chain to send queue: */
+
+       buf = buf_chain;
+       l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
+       if (!l_ptr->next_out)
+               l_ptr->next_out = buf_chain;
+       l_ptr->stats.sent_fragmented++;
+       while (buf) {
+               struct sk_buff *next = buf->next;
+               struct tipc_msg *msg = buf_msg(buf);
+
+               l_ptr->stats.sent_fragments++;
+               msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
+               link_add_to_outqueue(l_ptr, buf, msg);
+               msg_dbg(msg, ">ADD>");
+               buf = next;
+       }
+
+       /* Send it, if possible: */
+
+       link_push_queue(l_ptr);
+       node_unlock(node);
+       return dsz;
+}
+
+/* 
+ * link_push_packet: Push one unsent packet to the media
+ */
+u32 link_push_packet(struct link *l_ptr)
+{
+       struct sk_buff *buf = l_ptr->first_out;
+       u32 r_q_size = l_ptr->retransm_queue_size;
+       u32 r_q_head = l_ptr->retransm_queue_head;
+
+       /* Step to position where retransmission failed, if any,    */
+       /* consider that buffers may have been released in meantime */
+
+       if (r_q_size && buf) {
+               u32 last = lesser(mod(r_q_head + r_q_size), 
+                                 link_last_sent(l_ptr));
+               u32 first = msg_seqno(buf_msg(buf));
+
+               while (buf && less(first, r_q_head)) {
+                       first = mod(first + 1);
+                       buf = buf->next;
+               }
+               l_ptr->retransm_queue_head = r_q_head = first;
+               l_ptr->retransm_queue_size = r_q_size = mod(last - first);
+       }
+
+       /* Continue retransmission now, if there is anything: */
+
+       if (r_q_size && buf && !skb_cloned(buf)) {
+               msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+               msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 
+               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                       msg_dbg(buf_msg(buf), ">DEF-RETR>");
+                       l_ptr->retransm_queue_head = mod(++r_q_head);
+                       l_ptr->retransm_queue_size = --r_q_size;
+                       l_ptr->stats.retransmitted++;
+                       return TIPC_OK;
+               } else {
+                       l_ptr->stats.bearer_congs++;
+                       msg_dbg(buf_msg(buf), "|>DEF-RETR>");
+                       return PUSH_FAILED;
+               }
+       }
+
+       /* Send deferred protocol message, if any: */
+
+       buf = l_ptr->proto_msg_queue;
+       if (buf) {
+               msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+               msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 
+               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                       msg_dbg(buf_msg(buf), ">DEF-PROT>");
+                       l_ptr->unacked_window = 0;
+                       buf_discard(buf);
+                       l_ptr->proto_msg_queue = 0;
+                       return TIPC_OK;
+               } else {
+                       msg_dbg(buf_msg(buf), "|>DEF-PROT>");
+                       l_ptr->stats.bearer_congs++;
+                       return PUSH_FAILED;
+               }
+       }
+
+       /* Send one deferred data message, if send window not full: */
+
+       buf = l_ptr->next_out;
+       if (buf) {
+               struct tipc_msg *msg = buf_msg(buf);
+               u32 next = msg_seqno(msg);
+               u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+
+               if (mod(next - first) < l_ptr->queue_limit[0]) {
+                       msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+                       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+                       if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                               if (msg_user(msg) == MSG_BUNDLER)
+                                       msg_set_type(msg, CLOSED_MSG);
+                               msg_dbg(msg, ">PUSH-DATA>");
+                               l_ptr->next_out = buf->next;
+                               return TIPC_OK;
+                       } else {
+                               msg_dbg(msg, "|PUSH-DATA|");
+                               l_ptr->stats.bearer_congs++;
+                               return PUSH_FAILED;
+                       }
+               }
+       }
+       return PUSH_FINISHED;
+}
+
+/*
+ * push_queue(): push out the unsent messages of a link where
+ *               congestion has abated. Node is locked
+ */
+void link_push_queue(struct link *l_ptr)
+{
+       u32 res;
+
+       if (bearer_congested(l_ptr->b_ptr, l_ptr))
+               return;
+
+       do {
+               res = link_push_packet(l_ptr);
+       }
+       while (res == TIPC_OK);
+       if (res == PUSH_FAILED)
+               bearer_schedule(l_ptr->b_ptr, l_ptr);
+}
+
+void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
+                    u32 retransmits)
+{
+       struct tipc_msg *msg;
+
+       dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
+
+       if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
+               msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
+               dbg_print_link(l_ptr, "   ");
+               l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+               l_ptr->retransm_queue_size = retransmits;
+               return;
+       }
+       while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
+               msg = buf_msg(buf);
+               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+               msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                        /* Catch if retransmissions fail repeatedly: */
+                        if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+                                if (++l_ptr->stale_count > 100) {
+                                        msg_print(CONS, buf_msg(buf), ">RETR>");
+                                        info("...Retransmitted %u times\n",
+                                            l_ptr->stale_count);
+                                        link_print(l_ptr, CONS, "Resetting Link\n");;
+                                        link_reset(l_ptr);
+                                        break;
+                                }
+                        } else {
+                                l_ptr->stale_count = 0;
+                        }
+                        l_ptr->last_retransmitted = msg_seqno(msg);
+
+                       msg_dbg(buf_msg(buf), ">RETR>");
+                       buf = buf->next;
+                       retransmits--;
+                       l_ptr->stats.retransmitted++;
+               } else {
+                       bearer_schedule(l_ptr->b_ptr, l_ptr);
+                       l_ptr->stats.bearer_congs++;
+                       l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+                       l_ptr->retransm_queue_size = retransmits;
+                       return;
+               }
+       }
+       l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
+}
+
+/* 
+ * link_recv_non_seq: Receive packets which are outside
+ *                    the link sequence flow
+ */
+
+static void link_recv_non_seq(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+
+       if (msg_user(msg) ==  LINK_CONFIG)
+               disc_recv_msg(buf);
+       else
+               bclink_recv_pkt(buf);
+}
+
+/** 
+ * link_insert_deferred_queue - insert deferred messages back into receive chain
+ */
+
+static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, 
+                                                 struct sk_buff *buf)
+{
+       u32 seq_no;
+
+       if (l_ptr->oldest_deferred_in == NULL)
+               return buf;
+
+       seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+       if (seq_no == mod(l_ptr->next_in_no)) {
+               l_ptr->newest_deferred_in->next = buf;
+               buf = l_ptr->oldest_deferred_in;
+               l_ptr->oldest_deferred_in = NULL;
+               l_ptr->deferred_inqueue_sz = 0;
+       }
+       return buf;
+}
+
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+{
+       read_lock_bh(&net_lock);
+       while (head) {
+               struct bearer *b_ptr;
+               struct node *n_ptr;
+               struct link *l_ptr;
+               struct sk_buff *crs;
+               struct sk_buff *buf = head;
+               struct tipc_msg *msg = buf_msg(buf);
+               u32 seq_no = msg_seqno(msg);
+               u32 ackd = msg_ack(msg);
+               u32 released = 0;
+               int type;
+
+               b_ptr = (struct bearer *)tb_ptr;
+               TIPC_SKB_CB(buf)->handle = b_ptr;
+
+               head = head->next;
+               if (unlikely(msg_version(msg) != TIPC_VERSION))
+                       goto cont;
+#if 0
+               if (msg_user(msg) != LINK_PROTOCOL)
+#endif
+                       msg_dbg(msg,"<REC<");
+
+               if (unlikely(msg_non_seq(msg))) {
+                       link_recv_non_seq(buf);
+                       continue;
+               }
+               n_ptr = node_find(msg_prevnode(msg));
+               if (unlikely(!n_ptr))
+                       goto cont;
+
+               node_lock(n_ptr);
+               l_ptr = n_ptr->links[b_ptr->identity];
+               if (unlikely(!l_ptr)) {
+                       node_unlock(n_ptr);
+                       goto cont;
+               }
+               /* 
+                * Release acked messages 
+                */
+               if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
+                       if (node_is_up(n_ptr) && n_ptr->bclink.supported)
+                               bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
+               }
+
+               crs = l_ptr->first_out;
+               while ((crs != l_ptr->next_out) && 
+                      less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+                       struct sk_buff *next = crs->next;
+
+                       buf_discard(crs);
+                       crs = next;
+                       released++;
+               }
+               if (released) {
+                       l_ptr->first_out = crs;
+                       l_ptr->out_queue_size -= released;
+               }
+               if (unlikely(l_ptr->next_out))
+                       link_push_queue(l_ptr);
+               if (unlikely(!list_empty(&l_ptr->waiting_ports)))
+                       link_wakeup_ports(l_ptr, 0);
+               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+                       l_ptr->stats.sent_acks++;
+                       link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+               }
+
+protocol_check:
+               if (likely(link_working_working(l_ptr))) {
+                       if (likely(seq_no == mod(l_ptr->next_in_no))) {
+                               l_ptr->next_in_no++;
+                               if (unlikely(l_ptr->oldest_deferred_in))
+                                       head = link_insert_deferred_queue(l_ptr,
+                                                                         head);
+                               if (likely(msg_is_dest(msg, tipc_own_addr))) {
+deliver:
+                                       if (likely(msg_isdata(msg))) {
+                                               node_unlock(n_ptr);
+                                               port_recv_msg(buf);
+                                               continue;
+                                       }
+                                       switch (msg_user(msg)) {
+                                       case MSG_BUNDLER:
+                                               l_ptr->stats.recv_bundles++;
+                                               l_ptr->stats.recv_bundled += 
+                                                       msg_msgcnt(msg);
+                                               node_unlock(n_ptr);
+                                               link_recv_bundle(buf);
+                                               continue;
+                                       case ROUTE_DISTRIBUTOR:
+                                               node_unlock(n_ptr);
+                                               cluster_recv_routing_table(buf);
+                                               continue;
+                                       case NAME_DISTRIBUTOR:
+                                               node_unlock(n_ptr);
+                                               named_recv(buf);
+                                               continue;
+                                       case CONN_MANAGER:
+                                               node_unlock(n_ptr);
+                                               port_recv_proto_msg(buf);
+                                               continue;
+                                       case MSG_FRAGMENTER:
+                                               l_ptr->stats.recv_fragments++;
+                                               if (link_recv_fragment(
+                                                       &l_ptr->defragm_buf, 
+                                                       &buf, &msg)) {
+                                                       l_ptr->stats.recv_fragmented++;
+                                                       goto deliver;
+                                               }
+                                               break;
+                                       case CHANGEOVER_PROTOCOL:
+                                               type = msg_type(msg);
+                                               if (link_recv_changeover_msg(
+                                                       &l_ptr, &buf)) {
+                                                       msg = buf_msg(buf);
+                                                       seq_no = msg_seqno(msg);
+                                                       TIPC_SKB_CB(buf)->handle 
+                                                               = b_ptr;
+                                                       if (type == ORIGINAL_MSG)
+                                                               goto deliver;
+                                                       goto protocol_check;
+                                               }
+                                               break;
+                                       }
+