Merge branch 'batman-adv/next' of git://git.open-mesh.org/ecsv/linux-merge
[linux-3.10.git] / net / batman-adv / send.c
1 /*
2  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3  *
4  * Marek Lindner, Simon Wunderlich
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of version 2 of the GNU General Public
8  * License as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18  * 02110-1301, USA
19  *
20  */
21
22 #include "main.h"
23 #include "send.h"
24 #include "routing.h"
25 #include "translation-table.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "vis.h"
29 #include "aggregation.h"
30 #include "gateway_common.h"
31 #include "originator.h"
32
33 static void send_outstanding_bcast_packet(struct work_struct *work);
34
35 /* apply hop penalty for a normal link */
36 static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv)
37 {
38         int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39         return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40 }
41
42 /* when do we schedule our own packet to be sent */
43 static unsigned long own_send_time(struct bat_priv *bat_priv)
44 {
45         return jiffies + msecs_to_jiffies(
46                    atomic_read(&bat_priv->orig_interval) -
47                    JITTER + (random32() % 2*JITTER));
48 }
49
50 /* when do we schedule a forwarded packet to be sent */
51 static unsigned long forward_send_time(void)
52 {
53         return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54 }
55
56 /* send out an already prepared packet to the given address via the
57  * specified batman interface */
58 int send_skb_packet(struct sk_buff *skb,
59                                 struct hard_iface *hard_iface,
60                                 uint8_t *dst_addr)
61 {
62         struct ethhdr *ethhdr;
63
64         if (hard_iface->if_status != IF_ACTIVE)
65                 goto send_skb_err;
66
67         if (unlikely(!hard_iface->net_dev))
68                 goto send_skb_err;
69
70         if (!(hard_iface->net_dev->flags & IFF_UP)) {
71                 pr_warning("Interface %s is not up - can't send packet via "
72                            "that interface!\n", hard_iface->net_dev->name);
73                 goto send_skb_err;
74         }
75
76         /* push to the ethernet header. */
77         if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
78                 goto send_skb_err;
79
80         skb_reset_mac_header(skb);
81
82         ethhdr = (struct ethhdr *) skb_mac_header(skb);
83         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
84         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
85         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
86
87         skb_set_network_header(skb, ETH_HLEN);
88         skb->priority = TC_PRIO_CONTROL;
89         skb->protocol = __constant_htons(ETH_P_BATMAN);
90
91         skb->dev = hard_iface->net_dev;
92
93         /* dev_queue_xmit() returns a negative result on error.  However on
94          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
95          * (which is > 0). This will not be treated as an error. */
96
97         return dev_queue_xmit(skb);
98 send_skb_err:
99         kfree_skb(skb);
100         return NET_XMIT_DROP;
101 }
102
103 /* Send a packet to a given interface */
104 static void send_packet_to_if(struct forw_packet *forw_packet,
105                               struct hard_iface *hard_iface)
106 {
107         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
108         char *fwd_str;
109         uint8_t packet_num;
110         int16_t buff_pos;
111         struct batman_packet *batman_packet;
112         struct sk_buff *skb;
113
114         if (hard_iface->if_status != IF_ACTIVE)
115                 return;
116
117         packet_num = 0;
118         buff_pos = 0;
119         batman_packet = (struct batman_packet *)forw_packet->skb->data;
120
121         /* adjust all flags and log packets */
122         while (aggregated_packet(buff_pos,
123                                  forw_packet->packet_len,
124                                  batman_packet->num_hna)) {
125
126                 /* we might have aggregated direct link packets with an
127                  * ordinary base packet */
128                 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
129                     (forw_packet->if_incoming == hard_iface))
130                         batman_packet->flags |= DIRECTLINK;
131                 else
132                         batman_packet->flags &= ~DIRECTLINK;
133
134                 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
135                                                             "Sending own" :
136                                                             "Forwarding"));
137                 bat_dbg(DBG_BATMAN, bat_priv,
138                         "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
139                         " IDF %s) on interface %s [%pM]\n",
140                         fwd_str, (packet_num > 0 ? "aggregated " : ""),
141                         batman_packet->orig, ntohl(batman_packet->seqno),
142                         batman_packet->tq, batman_packet->ttl,
143                         (batman_packet->flags & DIRECTLINK ?
144                          "on" : "off"),
145                         hard_iface->net_dev->name,
146                         hard_iface->net_dev->dev_addr);
147
148                 buff_pos += sizeof(struct batman_packet) +
149                         (batman_packet->num_hna * ETH_ALEN);
150                 packet_num++;
151                 batman_packet = (struct batman_packet *)
152                         (forw_packet->skb->data + buff_pos);
153         }
154
155         /* create clone because function is called more than once */
156         skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
157         if (skb)
158                 send_skb_packet(skb, hard_iface, broadcast_addr);
159 }
160
161 /* send a batman packet */
162 static void send_packet(struct forw_packet *forw_packet)
163 {
164         struct hard_iface *hard_iface;
165         struct net_device *soft_iface;
166         struct bat_priv *bat_priv;
167         struct batman_packet *batman_packet =
168                 (struct batman_packet *)(forw_packet->skb->data);
169         unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170
171         if (!forw_packet->if_incoming) {
172                 pr_err("Error - can't forward packet: incoming iface not "
173                        "specified\n");
174                 return;
175         }
176
177         soft_iface = forw_packet->if_incoming->soft_iface;
178         bat_priv = netdev_priv(soft_iface);
179
180         if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181                 return;
182
183         /* multihomed peer assumed */
184         /* non-primary OGMs are only broadcasted on their interface */
185         if ((directlink && (batman_packet->ttl == 1)) ||
186             (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
187
188                 /* FIXME: what about aggregated packets ? */
189                 bat_dbg(DBG_BATMAN, bat_priv,
190                         "%s packet (originator %pM, seqno %d, TTL %d) "
191                         "on interface %s [%pM]\n",
192                         (forw_packet->own ? "Sending own" : "Forwarding"),
193                         batman_packet->orig, ntohl(batman_packet->seqno),
194                         batman_packet->ttl,
195                         forw_packet->if_incoming->net_dev->name,
196                         forw_packet->if_incoming->net_dev->dev_addr);
197
198                 /* skb is only used once and than forw_packet is free'd */
199                 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
200                                 broadcast_addr);
201                 forw_packet->skb = NULL;
202
203                 return;
204         }
205
206         /* broadcast on every interface */
207         rcu_read_lock();
208         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
209                 if (hard_iface->soft_iface != soft_iface)
210                         continue;
211
212                 send_packet_to_if(forw_packet, hard_iface);
213         }
214         rcu_read_unlock();
215 }
216
217 static void rebuild_batman_packet(struct bat_priv *bat_priv,
218                                   struct hard_iface *hard_iface)
219 {
220         int new_len;
221         unsigned char *new_buff;
222         struct batman_packet *batman_packet;
223
224         new_len = sizeof(struct batman_packet) +
225                         (bat_priv->num_local_hna * ETH_ALEN);
226         new_buff = kmalloc(new_len, GFP_ATOMIC);
227
228         /* keep old buffer if kmalloc should fail */
229         if (new_buff) {
230                 memcpy(new_buff, hard_iface->packet_buff,
231                        sizeof(struct batman_packet));
232                 batman_packet = (struct batman_packet *)new_buff;
233
234                 batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
235                                 new_buff + sizeof(struct batman_packet),
236                                 new_len - sizeof(struct batman_packet));
237
238                 kfree(hard_iface->packet_buff);
239                 hard_iface->packet_buff = new_buff;
240                 hard_iface->packet_len = new_len;
241         }
242 }
243
244 void schedule_own_packet(struct hard_iface *hard_iface)
245 {
246         struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
247         unsigned long send_time;
248         struct batman_packet *batman_packet;
249         int vis_server;
250
251         if ((hard_iface->if_status == IF_NOT_IN_USE) ||
252             (hard_iface->if_status == IF_TO_BE_REMOVED))
253                 return;
254
255         vis_server = atomic_read(&bat_priv->vis_mode);
256
257         /**
258          * the interface gets activated here to avoid race conditions between
259          * the moment of activating the interface in
260          * hardif_activate_interface() where the originator mac is set and
261          * outdated packets (especially uninitialized mac addresses) in the
262          * packet queue
263          */
264         if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
265                 hard_iface->if_status = IF_ACTIVE;
266
267         /* if local hna has changed and interface is a primary interface */
268         if ((atomic_read(&bat_priv->hna_local_changed)) &&
269             (hard_iface == bat_priv->primary_if))
270                 rebuild_batman_packet(bat_priv, hard_iface);
271
272         /**
273          * NOTE: packet_buff might just have been re-allocated in
274          * rebuild_batman_packet()
275          */
276         batman_packet = (struct batman_packet *)hard_iface->packet_buff;
277
278         /* change sequence number to network order */
279         batman_packet->seqno =
280                 htonl((uint32_t)atomic_read(&hard_iface->seqno));
281
282         if (vis_server == VIS_TYPE_SERVER_SYNC)
283                 batman_packet->flags |= VIS_SERVER;
284         else
285                 batman_packet->flags &= ~VIS_SERVER;
286
287         if ((hard_iface == bat_priv->primary_if) &&
288             (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
289                 batman_packet->gw_flags =
290                                 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
291         else
292                 batman_packet->gw_flags = 0;
293
294         atomic_inc(&hard_iface->seqno);
295
296         slide_own_bcast_window(hard_iface);
297         send_time = own_send_time(bat_priv);
298         add_bat_packet_to_list(bat_priv,
299                                hard_iface->packet_buff,
300                                hard_iface->packet_len,
301                                hard_iface, 1, send_time);
302 }
303
304 void schedule_forward_packet(struct orig_node *orig_node,
305                              struct ethhdr *ethhdr,
306                              struct batman_packet *batman_packet,
307                              uint8_t directlink, int hna_buff_len,
308                              struct hard_iface *if_incoming)
309 {
310         struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
311         unsigned char in_tq, in_ttl, tq_avg = 0;
312         unsigned long send_time;
313
314         if (batman_packet->ttl <= 1) {
315                 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
316                 return;
317         }
318
319         in_tq = batman_packet->tq;
320         in_ttl = batman_packet->ttl;
321
322         batman_packet->ttl--;
323         memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
324
325         /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
326          * of our best tq value */
327         if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
328
329                 /* rebroadcast ogm of best ranking neighbor as is */
330                 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) {
331                         batman_packet->tq = orig_node->router->tq_avg;
332
333                         if (orig_node->router->last_ttl)
334                                 batman_packet->ttl = orig_node->router->last_ttl
335                                                         - 1;
336                 }
337
338                 tq_avg = orig_node->router->tq_avg;
339         }
340
341         /* apply hop penalty */
342         batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343
344         bat_dbg(DBG_BATMAN, bat_priv,
345                 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
346                 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
347                 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
348                 batman_packet->ttl);
349
350         batman_packet->seqno = htonl(batman_packet->seqno);
351
352         /* switch of primaries first hop flag when forwarding */
353         batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
354         if (directlink)
355                 batman_packet->flags |= DIRECTLINK;
356         else
357                 batman_packet->flags &= ~DIRECTLINK;
358
359         send_time = forward_send_time();
360         add_bat_packet_to_list(bat_priv,
361                                (unsigned char *)batman_packet,
362                                sizeof(struct batman_packet) + hna_buff_len,
363                                if_incoming, 0, send_time);
364 }
365
366 static void forw_packet_free(struct forw_packet *forw_packet)
367 {
368         if (forw_packet->skb)
369                 kfree_skb(forw_packet->skb);
370         kfree(forw_packet);
371 }
372
373 static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
374                                       struct forw_packet *forw_packet,
375                                       unsigned long send_time)
376 {
377         INIT_HLIST_NODE(&forw_packet->list);
378
379         /* add new packet to packet list */
380         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
381         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
382         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
383
384         /* start timer for this packet */
385         INIT_DELAYED_WORK(&forw_packet->delayed_work,
386                           send_outstanding_bcast_packet);
387         queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
388                            send_time);
389 }
390
391 #define atomic_dec_not_zero(v)          atomic_add_unless((v), -1, 0)
392 /* add a broadcast packet to the queue and setup timers. broadcast packets
393  * are sent multiple times to increase probability for beeing received.
394  *
395  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
396  * errors.
397  *
398  * The skb is not consumed, so the caller should make sure that the
399  * skb is freed. */
400 int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
401 {
402         struct forw_packet *forw_packet;
403         struct bcast_packet *bcast_packet;
404
405         if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
406                 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
407                 goto out;
408         }
409
410         if (!bat_priv->primary_if)
411                 goto out;
412
413         forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
414
415         if (!forw_packet)
416                 goto out_and_inc;
417
418         skb = skb_copy(skb, GFP_ATOMIC);
419         if (!skb)
420                 goto packet_free;
421
422         /* as we have a copy now, it is safe to decrease the TTL */
423         bcast_packet = (struct bcast_packet *)skb->data;
424         bcast_packet->ttl--;
425
426         skb_reset_mac_header(skb);
427
428         forw_packet->skb = skb;
429         forw_packet->if_incoming = bat_priv->primary_if;
430
431         /* how often did we send the bcast packet ? */
432         forw_packet->num_packets = 0;
433
434         _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
435         return NETDEV_TX_OK;
436
437 packet_free:
438         kfree(forw_packet);
439 out_and_inc:
440         atomic_inc(&bat_priv->bcast_queue_left);
441 out:
442         return NETDEV_TX_BUSY;
443 }
444
445 static void send_outstanding_bcast_packet(struct work_struct *work)
446 {
447         struct hard_iface *hard_iface;
448         struct delayed_work *delayed_work =
449                 container_of(work, struct delayed_work, work);
450         struct forw_packet *forw_packet =
451                 container_of(delayed_work, struct forw_packet, delayed_work);
452         struct sk_buff *skb1;
453         struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
454         struct bat_priv *bat_priv = netdev_priv(soft_iface);
455
456         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
457         hlist_del(&forw_packet->list);
458         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
459
460         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
461                 goto out;
462
463         /* rebroadcast packet */
464         rcu_read_lock();
465         list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
466                 if (hard_iface->soft_iface != soft_iface)
467                         continue;
468
469                 /* send a copy of the saved skb */
470                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
471                 if (skb1)
472                         send_skb_packet(skb1, hard_iface, broadcast_addr);
473         }
474         rcu_read_unlock();
475
476         forw_packet->num_packets++;
477
478         /* if we still have some more bcasts to send */
479         if (forw_packet->num_packets < 3) {
480                 _add_bcast_packet_to_list(bat_priv, forw_packet,
481                                           ((5 * HZ) / 1000));
482                 return;
483         }
484
485 out:
486         forw_packet_free(forw_packet);
487         atomic_inc(&bat_priv->bcast_queue_left);
488 }
489
490 void send_outstanding_bat_packet(struct work_struct *work)
491 {
492         struct delayed_work *delayed_work =
493                 container_of(work, struct delayed_work, work);
494         struct forw_packet *forw_packet =
495                 container_of(delayed_work, struct forw_packet, delayed_work);
496         struct bat_priv *bat_priv;
497
498         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
499         spin_lock_bh(&bat_priv->forw_bat_list_lock);
500         hlist_del(&forw_packet->list);
501         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
502
503         if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
504                 goto out;
505
506         send_packet(forw_packet);
507
508         /**
509          * we have to have at least one packet in the queue
510          * to determine the queues wake up time unless we are
511          * shutting down
512          */
513         if (forw_packet->own)
514                 schedule_own_packet(forw_packet->if_incoming);
515
516 out:
517         /* don't count own packet */
518         if (!forw_packet->own)
519                 atomic_inc(&bat_priv->batman_queue_left);
520
521         forw_packet_free(forw_packet);
522 }
523
524 void purge_outstanding_packets(struct bat_priv *bat_priv,
525                                struct hard_iface *hard_iface)
526 {
527         struct forw_packet *forw_packet;
528         struct hlist_node *tmp_node, *safe_tmp_node;
529
530         if (hard_iface)
531                 bat_dbg(DBG_BATMAN, bat_priv,
532                         "purge_outstanding_packets(): %s\n",
533                         hard_iface->net_dev->name);
534         else
535                 bat_dbg(DBG_BATMAN, bat_priv,
536                         "purge_outstanding_packets()\n");
537
538         /* free bcast list */
539         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
540         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
541                                   &bat_priv->forw_bcast_list, list) {
542
543                 /**
544                  * if purge_outstanding_packets() was called with an argmument
545                  * we delete only packets belonging to the given interface
546                  */
547                 if ((hard_iface) &&
548                     (forw_packet->if_incoming != hard_iface))
549                         continue;
550
551                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
552
553                 /**
554                  * send_outstanding_bcast_packet() will lock the list to
555                  * delete the item from the list
556                  */
557                 cancel_delayed_work_sync(&forw_packet->delayed_work);
558                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
559         }
560         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
561
562         /* free batman packet list */
563         spin_lock_bh(&bat_priv->forw_bat_list_lock);
564         hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
565                                   &bat_priv->forw_bat_list, list) {
566
567                 /**
568                  * if purge_outstanding_packets() was called with an argmument
569                  * we delete only packets belonging to the given interface
570                  */
571                 if ((hard_iface) &&
572                     (forw_packet->if_incoming != hard_iface))
573                         continue;
574
575                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
576
577                 /**
578                  * send_outstanding_bat_packet() will lock the list to
579                  * delete the item from the list
580                  */
581                 cancel_delayed_work_sync(&forw_packet->delayed_work);
582                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
583         }
584         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
585 }