net: Add MHI support for RMC PegaPCI.
[linux-3.10.git] / net / mhi / l3mhdp.c
1 /*
2  * File: l3mhdp.c
3  *
4  * MHDP - Modem Host Data Protocol for MHI protocol family.
5  *
6  * Copyright (C) 2011 Renesas Mobile Corporation. All rights reserved.
7  *
8  * Author:      Sugnan Prabhu S <sugnan.prabhu@renesasmobile.com>
9  *              Petri Mattila <petri.to.mattila@renesasmobile.com>
10  *
11  * Based on work by: Sam Lantinga (slouken@cs.ucdavis.edu)
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License
15  * version 2 as published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. ┬áSee the GNU
20  * General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25  * 02110-1301 USA
26  *
27  */
28
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/kernel.h>
32 #include <linux/version.h>
33
34 #include <linux/skbuff.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_arp.h>
37 #include <linux/l2mux.h>
38 #include <linux/etherdevice.h>
39 #include <linux/pkt_sched.h>
40
41 #include <net/netns/generic.h>
42 #include <net/mhi/mhdp.h>
43
44
45 /* MHDP device MTU limits */
46 #define MHDP_MTU_MAX            0x2400
47 #define MHDP_MTU_MIN            0x44
48
49 /* MHDP device names */
50 #define MHDP_IFNAME             "rmnet%d"
51 #define MHDP_CTL_IFNAME         "rmnetctl"
52
53 /* Print every MHDP SKB content */
54 /*#define MHDP_DEBUG_SKB*/
55
56
57 #define EPRINTK(...)    printk(KERN_DEBUG "MHI/MHDP: " __VA_ARGS__)
58
59 #ifdef CONFIG_MHI_DEBUG
60 # define DPRINTK(...)    printk(KERN_DEBUG "MHI/MHDP: " __VA_ARGS__)
61 #else
62 # define DPRINTK(...)
63 #endif
64
65 #ifdef MHDP_DEBUG_SKB
66 # define SKBPRINT(a, b)    __print_skb_content(a, b)
67 #else
68 # define SKBPRINT(a, b)
69 #endif
70
71 /* IPv6 support */
72 #define VER_IPv4 0x04
73 #define VER_IPv6 0x06
74 #define ETH_IP_TYPE(x) (((0x00|(x>>4)) == VER_IPv4) ? ETH_P_IP : ETH_P_IPV6)
75
76 int sysctl_mhdp_concat_nb_pkt __read_mostly;
77 EXPORT_SYMBOL(sysctl_mhdp_concat_nb_pkt);
78
79 /*** Type definitions ***/
80
81 #define MAX_MHDPHDR_SIZE 12
82
83 struct mhdp_tunnel {
84         struct mhdp_tunnel      *next;
85         struct net_device       *dev;
86         struct net_device       *master_dev;
87         struct sk_buff          *skb;
88         int pdn_id;
89         struct timer_list tx_timer;
90         struct sk_buff *skb_to_free[MAX_MHDPHDR_SIZE];
91         spinlock_t timer_lock;
92 };
93
94 struct mhdp_net {
95         struct mhdp_tunnel      *tunnels;
96         struct net_device       *ctl_dev;
97 };
98
99 struct packet_info {
100         uint32_t pdn_id;
101         uint32_t packet_offset;
102         uint32_t packet_length;
103 };
104
105 struct mhdp_hdr {
106         uint32_t packet_count;
107         struct packet_info info[MAX_MHDPHDR_SIZE];
108 };
109
110
111 /*** Prototypes ***/
112
113 static void mhdp_netdev_setup(struct net_device *dev);
114
115 static void mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel);
116
117 static int mhdp_netdev_event(struct notifier_block *this,
118                              unsigned long event, void *ptr);
119
120 static void tx_timer_timeout(unsigned long arg);
121
122 /*** Global Variables ***/
123
124 static int  mhdp_net_id __read_mostly;
125
126 static struct notifier_block mhdp_netdev_notifier = {
127         .notifier_call = mhdp_netdev_event,
128 };
129
130 /*** Funtions ***/
131
132 #ifdef MHDP_DEBUG_SKB
133 static void
134 __print_skb_content(struct sk_buff *skb, const char *tag)
135 {
136         struct page *page;
137         skb_frag_t *frag;
138         int len;
139         int i, j;
140         u8 *ptr;
141
142         /* Main SKB buffer */
143         ptr = (u8 *)skb->data;
144         len = skb_headlen(skb);
145
146         printk(KERN_DEBUG "MHDP: SKB buffer lenght %02u\n", len);
147         for (i = 0; i < len; i++) {
148                 if (i%8 == 0)
149                         printk(KERN_DEBUG "%s DATA: ", tag);
150                 printk(" 0x%02X", ptr[i]);
151                 if (i%8 == 7 || i == len - 1)
152                         printk("\n");
153         }
154
155         /* SKB fragments */
156         for (i = 0; i < (skb_shinfo(skb)->nr_frags); i++) {
157                 frag = &skb_shinfo(skb)->frags[i];
158                 page = skb_frag_page(frag);
159
160                 ptr = page_address(page);
161
162                 for (j = 0; j < frag->size; j++) {
163                         if (j%8 == 0)
164                                 printk(KERN_DEBUG "%s FRAG[%d]: ", tag, i);
165                         printk(" 0x%02X", ptr[frag->page_offset + j]);
166                         if (j%8 == 7 || j == frag->size - 1)
167                                 printk("\n");
168                 }
169         }
170 }
171 #endif
172
173
174 static inline struct mhdp_net *
175 mhdp_net_dev(struct net_device *dev)
176 {
177         return net_generic(dev_net(dev), mhdp_net_id);
178 }
179
180 static void
181 mhdp_tunnel_init(struct net_device *dev,
182                  struct mhdp_tunnel_parm *parms,
183                  struct net_device *master_dev)
184 {
185         struct mhdp_net *mhdpn = mhdp_net_dev(dev);
186         struct mhdp_tunnel *tunnel = netdev_priv(dev);
187
188         DPRINTK("mhdp_tunnel_init: dev:%s", dev->name);
189
190         tunnel->next = mhdpn->tunnels;
191         mhdpn->tunnels = tunnel;
192
193         tunnel->dev         = dev;
194         tunnel->master_dev  = master_dev;
195         tunnel->skb         = NULL;
196         tunnel->pdn_id      = parms->pdn_id;
197
198         init_timer(&tunnel->tx_timer);
199         spin_lock_init(&tunnel->timer_lock);
200 }
201
202 static void
203 mhdp_tunnel_destroy(struct net_device *dev)
204 {
205         DPRINTK("mhdp_tunnel_destroy: dev:%s", dev->name);
206
207         unregister_netdevice(dev);
208 }
209
210 static void
211 mhdp_destroy_tunnels(struct mhdp_net *mhdpn)
212 {
213         struct mhdp_tunnel *tunnel;
214
215         for (tunnel = mhdpn->tunnels; (tunnel); tunnel = tunnel->next)
216                 mhdp_tunnel_destroy(tunnel->dev);
217
218         mhdpn->tunnels = NULL;
219 }
220
221 static struct mhdp_tunnel *
222 mhdp_locate_tunnel(struct mhdp_net *mhdpn, int pdn_id)
223 {
224         struct mhdp_tunnel *tunnel;
225
226         for (tunnel = mhdpn->tunnels; tunnel; tunnel = tunnel->next)
227                 if (tunnel->pdn_id == pdn_id)
228                         return tunnel;
229
230         return NULL;
231 }
232
233 static struct net_device *
234 mhdp_add_tunnel(struct net *net, struct mhdp_tunnel_parm *parms)
235 {
236         struct net_device *mhdp_dev, *master_dev;
237
238         DPRINTK("mhdp_add_tunnel: adding a tunnel to %s\n", parms->master);
239
240         master_dev = dev_get_by_name(net, parms->master);
241         if (!master_dev)
242                 goto err_alloc_dev;
243
244         mhdp_dev = alloc_netdev(sizeof(struct mhdp_tunnel),
245                                 MHDP_IFNAME, mhdp_netdev_setup);
246         if (!mhdp_dev)
247                 goto err_alloc_dev;
248
249         dev_net_set(mhdp_dev, net);
250
251         if (dev_alloc_name(mhdp_dev, MHDP_IFNAME) < 0)
252                 goto err_reg_dev;
253
254         strcpy(parms->name, mhdp_dev->name);
255
256         if (register_netdevice(mhdp_dev)) {
257                 printk(KERN_ERR "MHDP: register_netdev failed\n");
258                 goto err_reg_dev;
259         }
260
261         dev_hold(mhdp_dev);
262
263         mhdp_tunnel_init(mhdp_dev, parms, master_dev);
264
265         mhdp_dev->flags    |= IFF_SLAVE;
266         master_dev->flags  |= IFF_MASTER;
267
268         dev_put(master_dev);
269
270         return mhdp_dev;
271
272 err_reg_dev:
273         free_netdev(mhdp_dev);
274 err_alloc_dev:
275         return NULL;
276 }
277
278
279 static int
280 mhdp_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
281 {
282         struct net *net = dev_net(dev);
283         struct mhdp_net *mhdpn = mhdp_net_dev(dev);
284         struct mhdp_tunnel *tunnel, *pre_dev;
285         struct mhdp_tunnel_parm __user *u_parms;
286         struct mhdp_tunnel_parm k_parms;
287
288         int err = 0;
289
290         DPRINTK("mhdp tunnel ioctl %X", cmd);
291
292         switch (cmd) {
293
294         case SIOCADDPDNID:
295                 u_parms = (struct mhdp_tunnel_parm *)ifr->ifr_data;
296                 if (copy_from_user(&k_parms, u_parms,
297                                    sizeof(struct mhdp_tunnel_parm))) {
298                         DPRINTK("Error: Failed to copy data from user space");
299                         return -EFAULT;
300                 }
301
302                 DPRINTK("pdn_id:%d master_device:%s", k_parms.pdn_id,
303                                                         k_parms.master);
304
305                 if (!mhdp_locate_tunnel(mhdpn, k_parms.pdn_id)) {
306                         if (mhdp_add_tunnel(net, &k_parms)) {
307                                 if (copy_to_user(u_parms, &k_parms,
308                                          sizeof(struct mhdp_tunnel_parm)))
309                                         err = -EINVAL;
310                         } else {
311                                 err = -EINVAL;
312                         }
313                 } else {
314                         err = -EBUSY;
315                 }
316                 break;
317
318         case SIOCDELPDNID:
319                 u_parms = (struct mhdp_tunnel_parm *)ifr->ifr_data;
320                 if (copy_from_user(&k_parms, u_parms,
321                                         sizeof(struct mhdp_tunnel_parm))) {
322                         DPRINTK("Error: Failed to copy data from user space");
323                         return -EFAULT;
324                 }
325
326                 DPRINTK("pdn_id:%d", k_parms.pdn_id);
327
328                 for (tunnel = mhdpn->tunnels, pre_dev = NULL;
329                         tunnel;
330                         pre_dev = tunnel, tunnel = tunnel->next) {
331                         if (tunnel->pdn_id == k_parms.pdn_id) {
332                                 if (!pre_dev)
333                                         mhdpn->tunnels = mhdpn->tunnels->next;
334                                 else
335                                         pre_dev->next = tunnel->next;
336
337                                 mhdp_tunnel_destroy(tunnel->dev);
338                         }
339                 }
340                 break;
341
342         case SIOCRESETMHDP:
343                 mhdp_destroy_tunnels(mhdpn);
344                 break;
345
346         default:
347                 err = -EINVAL;
348         }
349
350         return err;
351 }
352
353 static int
354 mhdp_netdev_change_mtu(struct net_device *dev, int new_mtu)
355 {
356         if (new_mtu < MHDP_MTU_MIN || new_mtu > MHDP_MTU_MAX)
357                 return -EINVAL;
358
359         dev->mtu = new_mtu;
360
361         return 0;
362 }
363
364 static void
365 mhdp_netdev_uninit(struct net_device *dev)
366 {
367         dev_put(dev);
368 }
369
370
371 static void
372 mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel)
373 {
374         struct sk_buff *skb = tunnel->skb;
375         struct l2muxhdr *l2hdr;
376         struct mhdp_hdr *mhdpHdr;
377         int i, nb_frags;
378
379         BUG_ON(!tunnel->master_dev);
380
381         if (skb) {
382                 mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data;
383                 nb_frags = mhdpHdr->packet_count;
384
385                 skb->protocol = htons(ETH_P_MHDP);
386                 skb->priority = 1;
387
388                 skb->dev = tunnel->master_dev;
389
390                 skb_reset_network_header(skb);
391
392                 skb_push(skb, L2MUX_HDR_SIZE);
393                 skb_reset_mac_header(skb);
394
395                 l2hdr = l2mux_hdr(skb);
396                 l2mux_set_proto(l2hdr, MHI_L3_MHDP_UL);
397                 l2mux_set_length(l2hdr, skb->len - L2MUX_HDR_SIZE);
398
399                 SKBPRINT(skb, "MHDP: TX");
400
401                 tunnel->dev->stats.tx_packets++;
402                 tunnel->skb = NULL;
403
404                 dev_queue_xmit(skb);
405
406                 for (i = 0; i < nb_frags; i++)
407                         dev_kfree_skb(tunnel->skb_to_free[i]);
408         }
409 }
410
411 static int
412 mhdp_netdev_rx(struct sk_buff *skb, struct net_device *dev)
413 {
414         skb_frag_t *frag = NULL;
415         struct page *page = NULL;
416         struct sk_buff *newskb;
417         struct mhdp_hdr *mhdpHdr;
418         int offset, length;
419         int err = 0, i, pdn_id;
420         int mhdp_header_len;
421         struct mhdp_tunnel *tunnel = NULL;
422         int start = 0;
423         int has_frag = skb_shinfo(skb)->nr_frags;
424         uint32_t packet_count;
425         unsigned char ip_ver;
426
427         if (has_frag) {
428                 frag = &skb_shinfo(skb)->frags[0];
429                 page = skb_frag_page(frag);
430         }
431
432         if (skb_headlen(skb) > L2MUX_HDR_SIZE)
433                 skb_pull(skb, L2MUX_HDR_SIZE);
434         else if (has_frag)
435                 frag->page_offset += L2MUX_HDR_SIZE;
436
437         packet_count = *((unsigned char *)skb->data);
438
439         mhdp_header_len = sizeof(packet_count) +
440                 (packet_count * sizeof(struct packet_info));
441
442         if (mhdp_header_len > skb_headlen(skb)) {
443                 int skbheadlen = skb_headlen(skb);
444
445                 DPRINTK("mhdp header length: %d, skb_headerlen: %d",
446                                 mhdp_header_len, skbheadlen);
447
448                 mhdpHdr = kmalloc(mhdp_header_len, GFP_ATOMIC);
449                 if (mhdpHdr == NULL) {
450                         printk(KERN_ERR "%s: kmalloc failed.\n", __func__);
451                         return err;
452                 }
453
454                 if (skbheadlen == 0) {
455                         memcpy((__u8 *)mhdpHdr, page_address(page) +
456                                                 frag->page_offset,
457                                                 mhdp_header_len);
458
459                 } else {
460                         memcpy((__u8 *)mhdpHdr, skb->data, skbheadlen);
461
462                         memcpy((__u8 *)mhdpHdr + skbheadlen,
463                                page_address(page) +
464                                frag->page_offset,
465                                mhdp_header_len - skbheadlen);
466
467                         start = mhdp_header_len - skbheadlen;
468                 }
469
470                 DPRINTK("page start: %d", start);
471         } else {
472                 DPRINTK("skb->data has whole mhdp header");
473                 mhdpHdr = (struct mhdp_hdr *)(((__u8 *)skb->data));
474         }
475
476         DPRINTK("MHDP PACKET COUNT : %d",  mhdpHdr->packet_count);
477
478         rcu_read_lock();
479
480         for (i = 0; i < mhdpHdr->packet_count; i++) {
481
482                 DPRINTK(" packet_info[%d] - PDNID:%d, packet_offset: %d,
483                         packet_length: %d\n", i, mhdpHdr->info[i].pdn_id,
484                         mhdpHdr->info[i].packet_offset,
485                         mhdpHdr->info[i].packet_length);
486
487                 pdn_id = mhdpHdr->info[i].pdn_id;
488                 offset = mhdpHdr->info[i].packet_offset;
489                 length = mhdpHdr->info[i].packet_length;
490
491                 if (skb_headlen(skb) > (mhdp_header_len + offset)) {
492
493                         newskb = skb_clone(skb, GFP_ATOMIC);
494                         if (unlikely(!newskb))
495                                 goto error;
496
497                         skb_pull(newskb, mhdp_header_len + offset);
498                         ip_ver = (u8)*newskb->data;
499
500                 } else if (has_frag) {
501
502                         newskb = netdev_alloc_skb(dev, skb_headlen(skb));
503
504                         if (unlikely(!newskb))
505                                 goto error;
506
507                         get_page(page);
508                         skb_add_rx_frag(newskb, skb_shinfo(newskb)->nr_frags,
509                         page,
510                         frag->page_offset +
511                         ((mhdp_header_len - skb_headlen(skb)) + offset),
512                         length, PAGE_SIZE);
513
514                         ip_ver = *((unsigned long *)page_address(page) +
515                         (frag->page_offset +
516                         ((mhdp_header_len - skb_headlen(skb)) + offset)));
517
518                         if ((ip_ver>>4) != VER_IPv4 &&
519                                 (ip_ver>>4) != VER_IPv6)
520                                 goto error;
521
522                 } else {
523                         DPRINTK("Error in the data received");
524                         goto error;
525                 }
526
527                 skb_reset_network_header(newskb);
528
529                 /* IPv6 Support - Check the IP version and set
530                 ETH_P_IP or ETH_P_IPv6 for received packets */
531                 newskb->protocol = htons(ETH_IP_TYPE(ip_ver));
532
533                 newskb->pkt_type = PACKET_HOST;
534
535                 skb_tunnel_rx(newskb, dev);
536
537                 tunnel = mhdp_locate_tunnel(mhdp_net_dev(dev), pdn_id);
538                 if (tunnel) {
539                         struct net_device_stats *stats = &tunnel->dev->stats;
540                         stats->rx_packets++;
541                         newskb->dev = tunnel->dev;
542                         SKBPRINT(newskb, "NEWSKB: RX");
543                         netif_rx(newskb);
544                 }
545         }
546         rcu_read_unlock();
547
548 error:
549         if (mhdp_header_len > skb_headlen(skb))
550                 kfree(mhdpHdr);
551
552         dev_kfree_skb(skb);
553
554         return err;
555 }
556
557 static void tx_timer_timeout(unsigned long arg)
558 {
559         struct mhdp_tunnel *tunnel = (struct mhdp_tunnel *) arg;
560
561         spin_lock(&tunnel->timer_lock);
562
563         mhdp_submit_queued_skb(tunnel);
564
565         spin_unlock(&tunnel->timer_lock);
566 }
567
568
569 static int
570 mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
571 {
572         struct mhdp_hdr *mhdpHdr;
573         struct mhdp_tunnel *tunnel = netdev_priv(dev);
574         struct net_device_stats *stats = &tunnel->dev->stats;
575         struct page *page = NULL;
576         int i;
577         int packet_count, offset, len;
578
579         spin_lock(&tunnel->timer_lock);
580
581         SKBPRINT(skb, "SKB: TX");
582
583         if (timer_pending(&tunnel->tx_timer))
584                 del_timer(&tunnel->tx_timer);
585
586         if (tunnel->skb == NULL) {
587                 tunnel->skb = netdev_alloc_skb(dev,
588                         L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr) + ETH_HLEN);
589
590                 if (!tunnel->skb) {
591                         EPRINTK("mhdp_netdev_xmit error1");
592                         BUG();
593                 }
594
595                 /* Place holder for the mhdp packet count */
596                 len = skb_headroom(tunnel->skb) - L2MUX_HDR_SIZE - ETH_HLEN;
597
598                 skb_push(tunnel->skb, len);
599                 len -= 4;
600
601                 memset(tunnel->skb->data, 0, len);
602
603                 /*
604                  * Need to replace following logic, with something better like
605                  * __pskb_pull_tail or pskb_may_pull(tunnel->skb, len);
606                  */
607                 {
608                         tunnel->skb->tail -= len;
609                         tunnel->skb->len  -= len;
610                 }
611
612
613                 mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data;
614                 mhdpHdr->packet_count = 0;
615         }
616
617         /*
618          * skb_put cannot be called as the (data_len != 0)
619          */
620         {
621                 tunnel->skb->tail += sizeof(struct packet_info);
622                 tunnel->skb->len  += sizeof(struct packet_info);
623
624                 DPRINTK("new - skb->tail:%lu skb->end:%lu skb->data_len:%lu",
625                                 (unsigned long)tunnel->skb->tail,
626                                 (unsigned long)tunnel->skb->end,
627                                 (unsigned long)tunnel->skb->data_len);
628         }
629
630         mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data;
631
632         tunnel->skb_to_free[mhdpHdr->packet_count] = skb;
633
634         packet_count = mhdpHdr->packet_count;
635         mhdpHdr->info[packet_count].pdn_id = tunnel->pdn_id;
636         if (packet_count == 0) {
637                 mhdpHdr->info[packet_count].packet_offset = 0;
638         } else {
639                 mhdpHdr->info[packet_count].packet_offset =
640                         mhdpHdr->info[packet_count - 1].packet_offset +
641                         mhdpHdr->info[packet_count - 1].packet_length;
642         }
643
644         mhdpHdr->info[packet_count].packet_length = skb->len;
645         mhdpHdr->packet_count++;
646
647         page = virt_to_page(skb->data);
648
649         if (page == NULL) {
650                 EPRINTK("kmap_atomic_to_page returns NULL");
651                 goto tx_error;
652         }
653
654         get_page(page);
655
656         offset = ((unsigned long)skb->data -
657                   (unsigned long)page_address(page));
658
659         skb_add_rx_frag(tunnel->skb, skb_shinfo(tunnel->skb)->nr_frags,
660                         page, offset, skb_headlen(skb), PAGE_SIZE);
661
662         if (skb_shinfo(skb)->nr_frags) {
663                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
664                         skb_frag_t *frag = &skb_shinfo(tunnel->skb)->frags[i];
665                         get_page(skb_frag_page(frag));
666                         skb_add_rx_frag(tunnel->skb,
667                                         skb_shinfo(tunnel->skb)->nr_frags,
668                                         skb_frag_page(frag), frag->page_offset,
669                                         frag->size, PAGE_SIZE);
670                 }
671         }
672
673         if (mhdpHdr->packet_count == MAX_MHDPHDR_SIZE) {
674                 mhdp_submit_queued_skb(tunnel);
675         } else {
676             tunnel->tx_timer.function = &tx_timer_timeout;
677             tunnel->tx_timer.data     = (unsigned long) tunnel;
678             tunnel->tx_timer.expires = jiffies + ((HZ + 999) / 1000) ;
679             add_timer(&tunnel->tx_timer);
680         }
681
682         spin_unlock(&tunnel->timer_lock);
683         return NETDEV_TX_OK;
684
685 tx_error:
686         spin_unlock(&tunnel->timer_lock);
687         stats->tx_errors++;
688         dev_kfree_skb(skb);
689         return NETDEV_TX_OK;
690 }
691
692
693 static int
694 mhdp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
695 {
696         struct net_device *event_dev = (struct net_device *)ptr;
697
698         DPRINTK("event_dev: %s, event: %lx\n",
699                 event_dev ? event_dev->name : "None", event);
700
701         switch (event) {
702         case NETDEV_UNREGISTER:
703         {
704                 struct mhdp_net *mhdpn = mhdp_net_dev(event_dev);
705                 struct mhdp_tunnel *iter, *prev;
706
707                 DPRINTK("event_dev: %s, event: %lx\n",
708                         event_dev ? event_dev->name : "None", event);
709
710                 for (iter = mhdpn->tunnels, prev = NULL;
711                         iter; prev = iter, iter = iter->next) {
712                         if (event_dev == iter->master_dev) {
713                                 if (!prev)
714                                         mhdpn->tunnels = mhdpn->tunnels->next;
715                                 else
716                                         prev->next = iter->next;
717                                 mhdp_tunnel_destroy(iter->dev);
718                         }
719                 }
720         }
721         break;
722         }
723
724         return NOTIFY_DONE;
725 }
726
727 static const struct net_device_ops mhdp_netdev_ops = {
728         .ndo_uninit     = mhdp_netdev_uninit,
729         .ndo_start_xmit = mhdp_netdev_xmit,
730         .ndo_do_ioctl   = mhdp_netdev_ioctl,
731         .ndo_change_mtu = mhdp_netdev_change_mtu,
732 };
733
734 static void mhdp_netdev_setup(struct net_device *dev)
735 {
736         dev->netdev_ops         = &mhdp_netdev_ops;
737         dev->destructor         = free_netdev;
738
739         dev->type               = ARPHRD_TUNNEL;
740         dev->hard_header_len    = L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr);
741         dev->mtu                = ETH_DATA_LEN;
742         dev->flags              = IFF_NOARP;
743         dev->iflink             = 0;
744         dev->addr_len           = 4;
745         dev->features          |= (NETIF_F_NETNS_LOCAL | NETIF_F_FRAGLIST);
746 }
747
748 static int __net_init mhdp_init_net(struct net *net)
749 {
750         struct mhdp_net *mhdpn = net_generic(net, mhdp_net_id);
751         int err;
752
753         mhdpn->tunnels = NULL;
754
755         mhdpn->ctl_dev = alloc_netdev(sizeof(struct mhdp_tunnel),
756                                       MHDP_CTL_IFNAME,
757                                       mhdp_netdev_setup);
758         if (!mhdpn->ctl_dev)
759                 return -ENOMEM;
760
761         dev_net_set(mhdpn->ctl_dev, net);
762         dev_hold(mhdpn->ctl_dev);
763
764         err = register_netdev(mhdpn->ctl_dev);
765         if (err) {
766                 printk(KERN_ERR MHDP_CTL_IFNAME " register failed");
767                 free_netdev(mhdpn->ctl_dev);
768                 return err;
769         }
770
771         return 0;
772 }
773
774 static void __net_exit mhdp_exit_net(struct net *net)
775 {
776         struct mhdp_net *mhdpn = net_generic(net, mhdp_net_id);
777
778         rtnl_lock();
779         mhdp_destroy_tunnels(mhdpn);
780         unregister_netdevice(mhdpn->ctl_dev);
781         rtnl_unlock();
782 }
783
784 static struct pernet_operations mhdp_net_ops = {
785         .init = mhdp_init_net,
786         .exit = mhdp_exit_net,
787         .id   = &mhdp_net_id,
788         .size = sizeof(struct mhdp_net),
789 };
790
791
792 static int __init mhdp_init(void)
793 {
794         int err;
795
796         err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx);
797         if (err)
798                 goto rollback0;
799
800         err = register_pernet_device(&mhdp_net_ops);
801         if (err < 0)
802                 goto rollback1;
803
804         err = register_netdevice_notifier(&mhdp_netdev_notifier);
805         if (err < 0)
806                 goto rollback2;
807
808         return 0;
809
810 rollback2:
811         unregister_pernet_device(&mhdp_net_ops);
812 rollback1:
813         l2mux_netif_rx_unregister(MHI_L3_MHDP_DL);
814 rollback0:
815         return err;
816 }
817
818 static void __exit mhdp_exit(void)
819 {
820         l2mux_netif_rx_unregister(MHI_L3_MHDP_DL);
821         unregister_netdevice_notifier(&mhdp_netdev_notifier);
822         unregister_pernet_device(&mhdp_net_ops);
823 }
824
825
826 module_init(mhdp_init);
827 module_exit(mhdp_exit);
828
829 MODULE_AUTHOR("Sugnan Prabhu S <sugnan.prabhu@renesasmobile.com>");
830 MODULE_DESCRIPTION("Modem Host Data Protocol for MHI");
831 MODULE_LICENSE("GPL");
832