#include "name_table.h"
#include "bcast.h"
-
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
#define BCLINK_LOG_BUF_SIZE 0
+/*
+ * Loss rate for incoming broadcast frames; used to test retransmission code.
+ * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
+ */
+
+#define TIPC_BCAST_LOSS_RATE 0
+
/**
* struct bcbearer_pair - a pair of bearers used by broadcast link
* @primary: pointer to primary bearer
char tipc_bclink_name[] = "multicast-link";
-static inline u32 buf_seqno(struct sk_buff *buf)
+static u32 buf_seqno(struct sk_buff *buf)
{
return msg_seqno(buf_msg(buf));
}
-static inline u32 bcbuf_acks(struct sk_buff *buf)
+static u32 bcbuf_acks(struct sk_buff *buf)
{
return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
}
-static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
{
TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
}
-static inline void bcbuf_decr_acks(struct sk_buff *buf)
+static void bcbuf_decr_acks(struct sk_buff *buf)
{
bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
}
* Called with 'node' locked, bc_lock unlocked
*/
-static inline void bclink_set_gap(struct node *n_ptr)
+static void bclink_set_gap(struct node *n_ptr)
{
struct sk_buff *buf = n_ptr->bclink.deferred_head;
* distribute NACKs, but tries to use the same spacing (divide by 16).
*/
-static inline int bclink_ack_allowed(u32 n)
+static int bclink_ack_allowed(u32 n)
{
return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
}
* @after: sequence number of last packet to *not* retransmit
* @to: sequence number of last packet to retransmit
*
- * Called with 'node' locked, bc_lock unlocked
+ * Called with bc_lock locked
*/
static void bclink_retransmit_pkt(u32 after, u32 to)
{
struct sk_buff *buf;
- spin_lock_bh(&bc_lock);
buf = bcl->first_out;
while (buf && less_eq(buf_seqno(buf), after)) {
buf = buf->next;
}
- if (buf != NULL)
- tipc_link_retransmit(bcl, buf, mod(to - after));
- spin_unlock_bh(&bc_lock);
+ tipc_link_retransmit(bcl, buf, mod(to - after));
}
/**
msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
msg_set_bcast_tag(msg, tipc_own_tag);
- if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) {
+ if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
bcl->stats.sent_nacks++;
buf_discard(buf);
} else {
* Only tipc_net_lock set.
*/
-void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
{
struct node *n_ptr = tipc_node_find(dest);
u32 my_after, my_to;
*/
void tipc_bclink_recv_pkt(struct sk_buff *buf)
-{
+{
+#if (TIPC_BCAST_LOSS_RATE)
+ static int rx_count = 0;
+#endif
struct tipc_msg *msg = buf_msg(buf);
struct node* node = tipc_node_find(msg_prevnode(msg));
u32 next_in;
tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
+ spin_lock_bh(&bc_lock);
bcl->stats.recv_nacks++;
+ bcl->owner->next = node; /* remember requestor */
bclink_retransmit_pkt(msg_bcgap_after(msg),
msg_bcgap_to(msg));
+ bcl->owner->next = NULL;
+ spin_unlock_bh(&bc_lock);
} else {
tipc_bclink_peek_nack(msg_destnode(msg),
- msg_bcast_tag(msg),
- msg_bcgap_after(msg),
- msg_bcgap_to(msg));
+ msg_bcast_tag(msg),
+ msg_bcgap_after(msg),
+ msg_bcgap_to(msg));
}
buf_discard(buf);
return;
}
+#if (TIPC_BCAST_LOSS_RATE)
+ if (++rx_count == TIPC_BCAST_LOSS_RATE) {
+ rx_count = 0;
+ buf_discard(buf);
+ return;
+ }
+#endif
+
tipc_node_lock(node);
receive:
deferred = node->bclink.deferred_head;
* Returns 0 if packet sent successfully, non-zero if not
*/
-int tipc_bcbearer_send(struct sk_buff *buf,
- struct tipc_bearer *unused1,
- struct tipc_media_addr *unused2)
+static int tipc_bcbearer_send(struct sk_buff *buf,
+ struct tipc_bearer *unused1,
+ struct tipc_media_addr *unused2)
{
static int send_count = 0;
- struct node_map remains;
- struct node_map remains_new;
+ struct node_map *remains;
+ struct node_map *remains_new;
+ struct node_map *remains_tmp;
int bp_index;
int swap_time;
+ int err;
/* Prepare buffer for broadcasting (if first time trying to send it) */
/* Send buffer over bearers until all targets reached */
- remains = tipc_cltr_bcast_nodes;
+ remains = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
+ remains_new = kmalloc(sizeof(struct node_map), GFP_ATOMIC);
+ *remains = tipc_cltr_bcast_nodes;
for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
struct bearer *p = bcbearer->bpairs[bp_index].primary;
if (!p)
break; /* no more bearers to try */
- tipc_nmap_diff(&remains, &p->nodes, &remains_new);
- if (remains_new.count == remains.count)
+ tipc_nmap_diff(remains, &p->nodes, remains_new);
+ if (remains_new->count == remains->count)
continue; /* bearer pair doesn't add anything */
if (!p->publ.blocked &&
bcbearer->bpairs[bp_index].primary = s;
bcbearer->bpairs[bp_index].secondary = p;
update:
- if (remains_new.count == 0)
- return TIPC_OK;
+ if (remains_new->count == 0) {
+ err = TIPC_OK;
+ goto out;
+ }
+ /* swap map */
+ remains_tmp = remains;
remains = remains_new;
+ remains_new = remains_tmp;
}
/* Unable to reach all targets */
bcbearer->bearer.publ.blocked = 1;
bcl->stats.bearer_congs++;
- return ~TIPC_OK;
+ err = ~TIPC_OK;
+
+ out:
+ kfree(remains_new);
+ kfree(remains);
+ return err;
}
/**