Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 1 | /* |
| 2 | * File: l3mhdp.c |
| 3 | * |
| 4 | * MHDP - Modem Host Data Protocol for MHI protocol family. |
| 5 | * |
| 6 | * Copyright (C) 2011 Renesas Mobile Corporation. All rights reserved. |
| 7 | * |
| 8 | * Author: Sugnan Prabhu S <sugnan.prabhu@renesasmobile.com> |
| 9 | * Petri Mattila <petri.to.mattila@renesasmobile.com> |
| 10 | * |
| 11 | * Based on work by: Sam Lantinga (slouken@cs.ucdavis.edu) |
| 12 | * |
| 13 | * This program is free software; you can redistribute it and/or |
| 14 | * modify it under the terms of the GNU General Public License |
| 15 | * version 2 as published by the Free Software Foundation. |
| 16 | * |
| 17 | * This program is distributed in the hope that it will be useful, but |
| 18 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 20 | * General Public License for more details. |
| 21 | * |
| 22 | * You should have received a copy of the GNU General Public License |
| 23 | * along with this program; if not, write to the Free Software |
| 24 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA |
| 25 | * 02110-1301 USA |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/types.h> |
| 31 | #include <linux/kernel.h> |
| 32 | #include <linux/version.h> |
| 33 | |
| 34 | #include <linux/skbuff.h> |
| 35 | #include <linux/netdevice.h> |
| 36 | #include <linux/if_arp.h> |
| 37 | #include <linux/l2mux.h> |
| 38 | #include <linux/etherdevice.h> |
| 39 | #include <linux/pkt_sched.h> |
| 40 | |
Xavier LANGELLIER | 5b3f08b | 2012-10-11 14:48:10 +0200 | [diff] [blame] | 41 | #ifdef CONFIG_MHDP_BONDING_SUPPORT |
| 42 | #define MHDP_BONDING_SUPPORT |
| 43 | #endif |
| 44 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 45 | /*#define MHDP_USE_NAPI*/ |
| 46 | |
Xavier LANGELLIER | 5b3f08b | 2012-10-11 14:48:10 +0200 | [diff] [blame] | 47 | #ifdef MHDP_BONDING_SUPPORT |
| 48 | #include <linux/etherdevice.h> |
| 49 | #endif /* MHDP_BONDING_SUPPORT */ |
| 50 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 51 | #include <net/netns/generic.h> |
| 52 | #include <net/mhi/mhdp.h> |
| 53 | |
| 54 | |
| 55 | /* MHDP device MTU limits */ |
| 56 | #define MHDP_MTU_MAX 0x2400 |
| 57 | #define MHDP_MTU_MIN 0x44 |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 58 | #define MAX_MHDP_FRAME_SIZE 16000 |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 59 | |
| 60 | /* MHDP device names */ |
| 61 | #define MHDP_IFNAME "rmnet%d" |
| 62 | #define MHDP_CTL_IFNAME "rmnetctl" |
| 63 | |
| 64 | /* Print every MHDP SKB content */ |
| 65 | /*#define MHDP_DEBUG_SKB*/ |
| 66 | |
| 67 | |
| 68 | #define EPRINTK(...) printk(KERN_DEBUG "MHI/MHDP: " __VA_ARGS__) |
| 69 | |
| 70 | #ifdef CONFIG_MHI_DEBUG |
| 71 | # define DPRINTK(...) printk(KERN_DEBUG "MHI/MHDP: " __VA_ARGS__) |
| 72 | #else |
| 73 | # define DPRINTK(...) |
| 74 | #endif |
| 75 | |
| 76 | #ifdef MHDP_DEBUG_SKB |
| 77 | # define SKBPRINT(a, b) __print_skb_content(a, b) |
| 78 | #else |
| 79 | # define SKBPRINT(a, b) |
| 80 | #endif |
| 81 | |
| 82 | /* IPv6 support */ |
| 83 | #define VER_IPv4 0x04 |
| 84 | #define VER_IPv6 0x06 |
| 85 | #define ETH_IP_TYPE(x) (((0x00|(x>>4)) == VER_IPv4) ? ETH_P_IP : ETH_P_IPV6) |
| 86 | |
| 87 | int sysctl_mhdp_concat_nb_pkt __read_mostly; |
| 88 | EXPORT_SYMBOL(sysctl_mhdp_concat_nb_pkt); |
| 89 | |
| 90 | /*** Type definitions ***/ |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 91 | #ifdef MHDP_USE_NAPI |
| 92 | #define NAPI_WEIGHT 64 |
| 93 | #endif /*MHDP_USE_NAPI*/ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 94 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 95 | #define MAX_MHDPHDR_SIZE 18 |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 96 | |
| 97 | struct mhdp_tunnel { |
| 98 | struct mhdp_tunnel *next; |
| 99 | struct net_device *dev; |
| 100 | struct net_device *master_dev; |
| 101 | struct sk_buff *skb; |
| 102 | int pdn_id; |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 103 | int free_pdn; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 104 | struct timer_list tx_timer; |
| 105 | struct sk_buff *skb_to_free[MAX_MHDPHDR_SIZE]; |
| 106 | spinlock_t timer_lock; |
| 107 | }; |
| 108 | |
| 109 | struct mhdp_net { |
| 110 | struct mhdp_tunnel *tunnels; |
| 111 | struct net_device *ctl_dev; |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 112 | #ifdef MHDP_USE_NAPI |
| 113 | struct net_device *dev; |
| 114 | struct napi_struct napi; |
| 115 | struct sk_buff_head skb_list; |
| 116 | #endif /*#ifdef MHDP_USE_NAPI*/ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 117 | }; |
| 118 | |
| 119 | struct packet_info { |
| 120 | uint32_t pdn_id; |
| 121 | uint32_t packet_offset; |
| 122 | uint32_t packet_length; |
| 123 | }; |
| 124 | |
| 125 | struct mhdp_hdr { |
| 126 | uint32_t packet_count; |
| 127 | struct packet_info info[MAX_MHDPHDR_SIZE]; |
| 128 | }; |
| 129 | |
| 130 | |
| 131 | /*** Prototypes ***/ |
| 132 | |
| 133 | static void mhdp_netdev_setup(struct net_device *dev); |
| 134 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 135 | static void mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel, int force_send); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 136 | |
| 137 | static int mhdp_netdev_event(struct notifier_block *this, |
| 138 | unsigned long event, void *ptr); |
| 139 | |
| 140 | static void tx_timer_timeout(unsigned long arg); |
| 141 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 142 | #ifdef MHDP_USE_NAPI |
| 143 | |
| 144 | static int mhdp_poll(struct napi_struct *napi, int budget); |
| 145 | |
| 146 | #endif /*MHDP_USE_NAPI*/ |
| 147 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 148 | /*** Global Variables ***/ |
| 149 | |
| 150 | static int mhdp_net_id __read_mostly; |
| 151 | |
| 152 | static struct notifier_block mhdp_netdev_notifier = { |
| 153 | .notifier_call = mhdp_netdev_event, |
| 154 | }; |
| 155 | |
| 156 | /*** Funtions ***/ |
| 157 | |
| 158 | #ifdef MHDP_DEBUG_SKB |
| 159 | static void |
| 160 | __print_skb_content(struct sk_buff *skb, const char *tag) |
| 161 | { |
| 162 | struct page *page; |
| 163 | skb_frag_t *frag; |
| 164 | int len; |
| 165 | int i, j; |
| 166 | u8 *ptr; |
| 167 | |
| 168 | /* Main SKB buffer */ |
| 169 | ptr = (u8 *)skb->data; |
| 170 | len = skb_headlen(skb); |
| 171 | |
| 172 | printk(KERN_DEBUG "MHDP: SKB buffer lenght %02u\n", len); |
| 173 | for (i = 0; i < len; i++) { |
| 174 | if (i%8 == 0) |
| 175 | printk(KERN_DEBUG "%s DATA: ", tag); |
| 176 | printk(" 0x%02X", ptr[i]); |
| 177 | if (i%8 == 7 || i == len - 1) |
| 178 | printk("\n"); |
| 179 | } |
| 180 | |
| 181 | /* SKB fragments */ |
| 182 | for (i = 0; i < (skb_shinfo(skb)->nr_frags); i++) { |
| 183 | frag = &skb_shinfo(skb)->frags[i]; |
| 184 | page = skb_frag_page(frag); |
| 185 | |
| 186 | ptr = page_address(page); |
| 187 | |
| 188 | for (j = 0; j < frag->size; j++) { |
| 189 | if (j%8 == 0) |
| 190 | printk(KERN_DEBUG "%s FRAG[%d]: ", tag, i); |
| 191 | printk(" 0x%02X", ptr[frag->page_offset + j]); |
| 192 | if (j%8 == 7 || j == frag->size - 1) |
| 193 | printk("\n"); |
| 194 | } |
| 195 | } |
| 196 | } |
| 197 | #endif |
| 198 | |
| 199 | |
| 200 | static inline struct mhdp_net * |
| 201 | mhdp_net_dev(struct net_device *dev) |
| 202 | { |
| 203 | return net_generic(dev_net(dev), mhdp_net_id); |
| 204 | } |
| 205 | |
| 206 | static void |
| 207 | mhdp_tunnel_init(struct net_device *dev, |
| 208 | struct mhdp_tunnel_parm *parms, |
| 209 | struct net_device *master_dev) |
| 210 | { |
| 211 | struct mhdp_net *mhdpn = mhdp_net_dev(dev); |
| 212 | struct mhdp_tunnel *tunnel = netdev_priv(dev); |
| 213 | |
| 214 | DPRINTK("mhdp_tunnel_init: dev:%s", dev->name); |
| 215 | |
| 216 | tunnel->next = mhdpn->tunnels; |
| 217 | mhdpn->tunnels = tunnel; |
| 218 | |
| 219 | tunnel->dev = dev; |
| 220 | tunnel->master_dev = master_dev; |
| 221 | tunnel->skb = NULL; |
| 222 | tunnel->pdn_id = parms->pdn_id; |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 223 | tunnel->free_pdn = 0; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 224 | |
| 225 | init_timer(&tunnel->tx_timer); |
| 226 | spin_lock_init(&tunnel->timer_lock); |
| 227 | } |
| 228 | |
| 229 | static void |
| 230 | mhdp_tunnel_destroy(struct net_device *dev) |
| 231 | { |
| 232 | DPRINTK("mhdp_tunnel_destroy: dev:%s", dev->name); |
| 233 | |
| 234 | unregister_netdevice(dev); |
| 235 | } |
| 236 | |
| 237 | static void |
| 238 | mhdp_destroy_tunnels(struct mhdp_net *mhdpn) |
| 239 | { |
| 240 | struct mhdp_tunnel *tunnel; |
| 241 | |
| 242 | for (tunnel = mhdpn->tunnels; (tunnel); tunnel = tunnel->next) |
| 243 | mhdp_tunnel_destroy(tunnel->dev); |
| 244 | |
| 245 | mhdpn->tunnels = NULL; |
| 246 | } |
| 247 | |
| 248 | static struct mhdp_tunnel * |
| 249 | mhdp_locate_tunnel(struct mhdp_net *mhdpn, int pdn_id) |
| 250 | { |
| 251 | struct mhdp_tunnel *tunnel; |
| 252 | |
| 253 | for (tunnel = mhdpn->tunnels; tunnel; tunnel = tunnel->next) |
| 254 | if (tunnel->pdn_id == pdn_id) |
| 255 | return tunnel; |
| 256 | |
| 257 | return NULL; |
| 258 | } |
| 259 | |
| 260 | static struct net_device * |
| 261 | mhdp_add_tunnel(struct net *net, struct mhdp_tunnel_parm *parms) |
| 262 | { |
| 263 | struct net_device *mhdp_dev, *master_dev; |
| 264 | |
| 265 | DPRINTK("mhdp_add_tunnel: adding a tunnel to %s\n", parms->master); |
| 266 | |
| 267 | master_dev = dev_get_by_name(net, parms->master); |
| 268 | if (!master_dev) |
| 269 | goto err_alloc_dev; |
| 270 | |
| 271 | mhdp_dev = alloc_netdev(sizeof(struct mhdp_tunnel), |
| 272 | MHDP_IFNAME, mhdp_netdev_setup); |
| 273 | if (!mhdp_dev) |
| 274 | goto err_alloc_dev; |
| 275 | |
| 276 | dev_net_set(mhdp_dev, net); |
| 277 | |
| 278 | if (dev_alloc_name(mhdp_dev, MHDP_IFNAME) < 0) |
| 279 | goto err_reg_dev; |
| 280 | |
| 281 | strcpy(parms->name, mhdp_dev->name); |
| 282 | |
| 283 | if (register_netdevice(mhdp_dev)) { |
| 284 | printk(KERN_ERR "MHDP: register_netdev failed\n"); |
| 285 | goto err_reg_dev; |
| 286 | } |
| 287 | |
| 288 | dev_hold(mhdp_dev); |
| 289 | |
| 290 | mhdp_tunnel_init(mhdp_dev, parms, master_dev); |
| 291 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 292 | dev_put(master_dev); |
| 293 | |
| 294 | return mhdp_dev; |
| 295 | |
| 296 | err_reg_dev: |
| 297 | free_netdev(mhdp_dev); |
| 298 | err_alloc_dev: |
| 299 | return NULL; |
| 300 | } |
| 301 | |
| 302 | |
| 303 | static int |
| 304 | mhdp_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| 305 | { |
| 306 | struct net *net = dev_net(dev); |
| 307 | struct mhdp_net *mhdpn = mhdp_net_dev(dev); |
| 308 | struct mhdp_tunnel *tunnel, *pre_dev; |
| 309 | struct mhdp_tunnel_parm __user *u_parms; |
| 310 | struct mhdp_tunnel_parm k_parms; |
| 311 | |
| 312 | int err = 0; |
| 313 | |
| 314 | DPRINTK("mhdp tunnel ioctl %X", cmd); |
| 315 | |
| 316 | switch (cmd) { |
| 317 | |
| 318 | case SIOCADDPDNID: |
| 319 | u_parms = (struct mhdp_tunnel_parm *)ifr->ifr_data; |
| 320 | if (copy_from_user(&k_parms, u_parms, |
| 321 | sizeof(struct mhdp_tunnel_parm))) { |
| 322 | DPRINTK("Error: Failed to copy data from user space"); |
| 323 | return -EFAULT; |
| 324 | } |
| 325 | |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 326 | DPRINTK("pdn_id:%d master_device:%s", |
| 327 | k_parms.pdn_id, |
| 328 | k_parms.master); |
| 329 | tunnel = mhdp_locate_tunnel(mhdpn, k_parms.pdn_id); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 330 | |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 331 | if (NULL == tunnel) { |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 332 | if (mhdp_add_tunnel(net, &k_parms)) { |
| 333 | if (copy_to_user(u_parms, &k_parms, |
| 334 | sizeof(struct mhdp_tunnel_parm))) |
| 335 | err = -EINVAL; |
| 336 | } else { |
| 337 | err = -EINVAL; |
| 338 | } |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 339 | |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 340 | } else if (1 == tunnel->free_pdn) { |
| 341 | |
| 342 | tunnel->free_pdn = 0; |
| 343 | |
| 344 | strcpy(&k_parms.name, tunnel->dev->name); |
| 345 | |
| 346 | if (copy_to_user(u_parms, &k_parms, |
| 347 | sizeof(struct mhdp_tunnel_parm))) |
| 348 | err = -EINVAL; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 349 | } else { |
| 350 | err = -EBUSY; |
| 351 | } |
| 352 | break; |
| 353 | |
| 354 | case SIOCDELPDNID: |
| 355 | u_parms = (struct mhdp_tunnel_parm *)ifr->ifr_data; |
| 356 | if (copy_from_user(&k_parms, u_parms, |
| 357 | sizeof(struct mhdp_tunnel_parm))) { |
| 358 | DPRINTK("Error: Failed to copy data from user space"); |
| 359 | return -EFAULT; |
| 360 | } |
| 361 | |
| 362 | DPRINTK("pdn_id:%d", k_parms.pdn_id); |
| 363 | |
| 364 | for (tunnel = mhdpn->tunnels, pre_dev = NULL; |
| 365 | tunnel; |
| 366 | pre_dev = tunnel, tunnel = tunnel->next) { |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 367 | if (tunnel->pdn_id == k_parms.pdn_id) |
| 368 | tunnel->free_pdn = 1; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 369 | } |
| 370 | break; |
| 371 | |
| 372 | case SIOCRESETMHDP: |
| 373 | mhdp_destroy_tunnels(mhdpn); |
| 374 | break; |
| 375 | |
| 376 | default: |
| 377 | err = -EINVAL; |
| 378 | } |
| 379 | |
| 380 | return err; |
| 381 | } |
| 382 | |
| 383 | static int |
| 384 | mhdp_netdev_change_mtu(struct net_device *dev, int new_mtu) |
| 385 | { |
| 386 | if (new_mtu < MHDP_MTU_MIN || new_mtu > MHDP_MTU_MAX) |
| 387 | return -EINVAL; |
| 388 | |
| 389 | dev->mtu = new_mtu; |
| 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | static void |
| 395 | mhdp_netdev_uninit(struct net_device *dev) |
| 396 | { |
| 397 | dev_put(dev); |
| 398 | } |
| 399 | |
| 400 | |
| 401 | static void |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 402 | mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel, int force_send) |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 403 | { |
| 404 | struct sk_buff *skb = tunnel->skb; |
| 405 | struct l2muxhdr *l2hdr; |
| 406 | struct mhdp_hdr *mhdpHdr; |
| 407 | int i, nb_frags; |
| 408 | |
| 409 | BUG_ON(!tunnel->master_dev); |
| 410 | |
| 411 | if (skb) { |
| 412 | mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data; |
| 413 | nb_frags = mhdpHdr->packet_count; |
| 414 | |
| 415 | skb->protocol = htons(ETH_P_MHDP); |
| 416 | skb->priority = 1; |
| 417 | |
| 418 | skb->dev = tunnel->master_dev; |
| 419 | |
| 420 | skb_reset_network_header(skb); |
| 421 | |
| 422 | skb_push(skb, L2MUX_HDR_SIZE); |
| 423 | skb_reset_mac_header(skb); |
| 424 | |
| 425 | l2hdr = l2mux_hdr(skb); |
| 426 | l2mux_set_proto(l2hdr, MHI_L3_MHDP_UL); |
| 427 | l2mux_set_length(l2hdr, skb->len - L2MUX_HDR_SIZE); |
| 428 | |
| 429 | SKBPRINT(skb, "MHDP: TX"); |
| 430 | |
| 431 | tunnel->dev->stats.tx_packets++; |
| 432 | tunnel->skb = NULL; |
| 433 | |
| 434 | dev_queue_xmit(skb); |
| 435 | |
| 436 | for (i = 0; i < nb_frags; i++) |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 437 | if (tunnel->skb_to_free[i]) |
| 438 | dev_kfree_skb(tunnel->skb_to_free[i]); |
| 439 | else |
| 440 | EPRINTK("submit_q_skb: error no skb to free\n"); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 441 | } |
| 442 | } |
| 443 | |
| 444 | static int |
| 445 | mhdp_netdev_rx(struct sk_buff *skb, struct net_device *dev) |
| 446 | { |
| 447 | skb_frag_t *frag = NULL; |
| 448 | struct page *page = NULL; |
| 449 | struct sk_buff *newskb; |
| 450 | struct mhdp_hdr *mhdpHdr; |
| 451 | int offset, length; |
| 452 | int err = 0, i, pdn_id; |
| 453 | int mhdp_header_len; |
| 454 | struct mhdp_tunnel *tunnel = NULL; |
| 455 | int start = 0; |
| 456 | int has_frag = skb_shinfo(skb)->nr_frags; |
| 457 | uint32_t packet_count; |
| 458 | unsigned char ip_ver; |
| 459 | |
| 460 | if (has_frag) { |
| 461 | frag = &skb_shinfo(skb)->frags[0]; |
| 462 | page = skb_frag_page(frag); |
| 463 | } |
| 464 | |
| 465 | if (skb_headlen(skb) > L2MUX_HDR_SIZE) |
| 466 | skb_pull(skb, L2MUX_HDR_SIZE); |
| 467 | else if (has_frag) |
| 468 | frag->page_offset += L2MUX_HDR_SIZE; |
| 469 | |
| 470 | packet_count = *((unsigned char *)skb->data); |
| 471 | |
| 472 | mhdp_header_len = sizeof(packet_count) + |
| 473 | (packet_count * sizeof(struct packet_info)); |
| 474 | |
| 475 | if (mhdp_header_len > skb_headlen(skb)) { |
| 476 | int skbheadlen = skb_headlen(skb); |
| 477 | |
| 478 | DPRINTK("mhdp header length: %d, skb_headerlen: %d", |
| 479 | mhdp_header_len, skbheadlen); |
| 480 | |
| 481 | mhdpHdr = kmalloc(mhdp_header_len, GFP_ATOMIC); |
| 482 | if (mhdpHdr == NULL) { |
| 483 | printk(KERN_ERR "%s: kmalloc failed.\n", __func__); |
| 484 | return err; |
| 485 | } |
| 486 | |
| 487 | if (skbheadlen == 0) { |
| 488 | memcpy((__u8 *)mhdpHdr, page_address(page) + |
| 489 | frag->page_offset, |
| 490 | mhdp_header_len); |
| 491 | |
| 492 | } else { |
| 493 | memcpy((__u8 *)mhdpHdr, skb->data, skbheadlen); |
| 494 | |
| 495 | memcpy((__u8 *)mhdpHdr + skbheadlen, |
| 496 | page_address(page) + |
| 497 | frag->page_offset, |
| 498 | mhdp_header_len - skbheadlen); |
| 499 | |
| 500 | start = mhdp_header_len - skbheadlen; |
| 501 | } |
| 502 | |
| 503 | DPRINTK("page start: %d", start); |
| 504 | } else { |
| 505 | DPRINTK("skb->data has whole mhdp header"); |
| 506 | mhdpHdr = (struct mhdp_hdr *)(((__u8 *)skb->data)); |
| 507 | } |
| 508 | |
| 509 | DPRINTK("MHDP PACKET COUNT : %d", mhdpHdr->packet_count); |
| 510 | |
| 511 | rcu_read_lock(); |
| 512 | |
| 513 | for (i = 0; i < mhdpHdr->packet_count; i++) { |
| 514 | |
| 515 | DPRINTK(" packet_info[%d] - PDNID:%d, packet_offset: %d, |
| 516 | packet_length: %d\n", i, mhdpHdr->info[i].pdn_id, |
| 517 | mhdpHdr->info[i].packet_offset, |
| 518 | mhdpHdr->info[i].packet_length); |
| 519 | |
| 520 | pdn_id = mhdpHdr->info[i].pdn_id; |
| 521 | offset = mhdpHdr->info[i].packet_offset; |
| 522 | length = mhdpHdr->info[i].packet_length; |
| 523 | |
| 524 | if (skb_headlen(skb) > (mhdp_header_len + offset)) { |
| 525 | |
| 526 | newskb = skb_clone(skb, GFP_ATOMIC); |
| 527 | if (unlikely(!newskb)) |
| 528 | goto error; |
| 529 | |
| 530 | skb_pull(newskb, mhdp_header_len + offset); |
| 531 | ip_ver = (u8)*newskb->data; |
| 532 | |
| 533 | } else if (has_frag) { |
| 534 | |
| 535 | newskb = netdev_alloc_skb(dev, skb_headlen(skb)); |
| 536 | |
| 537 | if (unlikely(!newskb)) |
| 538 | goto error; |
| 539 | |
| 540 | get_page(page); |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 541 | skb_add_rx_frag(newskb, |
| 542 | skb_shinfo(newskb)->nr_frags, |
| 543 | page, |
| 544 | frag->page_offset + |
| 545 | ((mhdp_header_len - skb_headlen(skb)) + offset), |
| 546 | length, |
| 547 | PAGE_SIZE); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 548 | |
| 549 | ip_ver = *((unsigned long *)page_address(page) + |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 550 | (frag->page_offset + |
| 551 | ((mhdp_header_len - skb_headlen(skb)) + |
| 552 | offset))); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 553 | if ((ip_ver>>4) != VER_IPv4 && |
| 554 | (ip_ver>>4) != VER_IPv6) |
| 555 | goto error; |
| 556 | |
| 557 | } else { |
| 558 | DPRINTK("Error in the data received"); |
| 559 | goto error; |
| 560 | } |
| 561 | |
| 562 | skb_reset_network_header(newskb); |
| 563 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 564 | /* IPv6 Support - Check the IP version */ |
| 565 | /* and set ETH_P_IP or ETH_P_IPv6 for received packets */ |
| 566 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 567 | newskb->protocol = htons(ETH_IP_TYPE(ip_ver)); |
| 568 | |
| 569 | newskb->pkt_type = PACKET_HOST; |
| 570 | |
| 571 | skb_tunnel_rx(newskb, dev); |
| 572 | |
| 573 | tunnel = mhdp_locate_tunnel(mhdp_net_dev(dev), pdn_id); |
| 574 | if (tunnel) { |
| 575 | struct net_device_stats *stats = &tunnel->dev->stats; |
| 576 | stats->rx_packets++; |
| 577 | newskb->dev = tunnel->dev; |
| 578 | SKBPRINT(newskb, "NEWSKB: RX"); |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 579 | #ifdef MHDP_USE_NAPI |
| 580 | netif_receive_skb(newskb); |
| 581 | #else |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 582 | netif_rx(newskb); |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 583 | #endif /*#ifdef MHDP_USE_NAPI*/ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 584 | } |
| 585 | } |
| 586 | rcu_read_unlock(); |
| 587 | |
Xavier LANGELLIER | 08dc4d8 | 2012-10-11 15:01:38 +0200 | [diff] [blame] | 588 | if (mhdp_header_len > skb_headlen(skb)) |
| 589 | kfree(mhdpHdr); |
| 590 | |
| 591 | dev_kfree_skb(skb); |
| 592 | |
Xavier LANGELLIER | 9d0cbb8 | 2012-10-11 11:58:49 +0200 | [diff] [blame] | 593 | return err; |
| 594 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 595 | error: |
| 596 | if (mhdp_header_len > skb_headlen(skb)) |
| 597 | kfree(mhdpHdr); |
| 598 | |
| 599 | dev_kfree_skb(skb); |
| 600 | |
| 601 | return err; |
| 602 | } |
| 603 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 604 | #ifdef MHDP_USE_NAPI |
| 605 | /* |
| 606 | static int mhdp_poll(struct napi_struct *napi, int budget) |
| 607 | function called through napi to read current ip frame received |
| 608 | */ |
| 609 | static int mhdp_poll(struct napi_struct *napi, int budget) |
| 610 | { |
| 611 | struct mhdp_net *mhdpn = container_of(napi, struct mhdp_net, napi); |
| 612 | int err = 0; |
| 613 | struct sk_buff *skb; |
| 614 | |
| 615 | while (!skb_queue_empty(&mhdpn->skb_list)) { |
| 616 | |
| 617 | skb = skb_dequeue(&mhdpn->skb_list); |
| 618 | err = mhdp_netdev_rx(skb, mhdpn->dev); |
| 619 | } |
| 620 | |
| 621 | napi_complete(napi); |
| 622 | |
| 623 | return err; |
| 624 | } |
| 625 | /*l2mux callback*/ |
| 626 | static int |
| 627 | mhdp_netdev_rx_napi(struct sk_buff *skb, struct net_device *dev) |
| 628 | { |
| 629 | struct mhdp_net *mhdpn = mhdp_net_dev(dev); |
| 630 | |
| 631 | |
| 632 | if (mhdpn) { |
| 633 | |
| 634 | mhdpn->dev = dev; |
| 635 | skb_queue_tail(&mhdpn->skb_list, skb); |
| 636 | |
| 637 | napi_schedule(&mhdpn->napi); |
| 638 | |
| 639 | } else { |
| 640 | EPRINTK("mhdp_netdev_rx_napi-MHDP driver init not correct\n"); |
| 641 | } |
| 642 | |
| 643 | return 0; |
| 644 | } |
| 645 | #endif /*MHDP_USE_NAPI*/ |
| 646 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 647 | static void tx_timer_timeout(unsigned long arg) |
| 648 | { |
| 649 | struct mhdp_tunnel *tunnel = (struct mhdp_tunnel *) arg; |
| 650 | |
| 651 | spin_lock(&tunnel->timer_lock); |
| 652 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 653 | mhdp_submit_queued_skb(tunnel, 1); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 654 | |
| 655 | spin_unlock(&tunnel->timer_lock); |
| 656 | } |
| 657 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 658 | static int |
| 659 | mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev) |
| 660 | { |
| 661 | struct mhdp_hdr *mhdpHdr; |
| 662 | struct mhdp_tunnel *tunnel = netdev_priv(dev); |
| 663 | struct net_device_stats *stats = &tunnel->dev->stats; |
| 664 | struct page *page = NULL; |
| 665 | int i; |
| 666 | int packet_count, offset, len; |
| 667 | |
| 668 | spin_lock(&tunnel->timer_lock); |
| 669 | |
| 670 | SKBPRINT(skb, "SKB: TX"); |
| 671 | |
| 672 | if (timer_pending(&tunnel->tx_timer)) |
| 673 | del_timer(&tunnel->tx_timer); |
| 674 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 675 | #if 0 |
| 676 | { |
| 677 | int i; |
| 678 | int len = skb->len; |
| 679 | u8 *ptr = skb->data; |
| 680 | |
| 681 | for (i = 0; i < len; i++) { |
| 682 | if (i%8 == 0) |
| 683 | printk(KERN_DEBUG |
| 684 | "MHDP mhdp_netdev_xmit: TX [%04X] ", i); |
| 685 | printk(" 0x%02X", ptr[i]); |
| 686 | if (i%8 == 7 || i == len-1) |
| 687 | printk("\n"); |
| 688 | } |
| 689 | } |
| 690 | #endif |
| 691 | xmit_again: |
| 692 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 693 | if (tunnel->skb == NULL) { |
| 694 | tunnel->skb = netdev_alloc_skb(dev, |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 695 | L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr)); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 696 | |
| 697 | if (!tunnel->skb) { |
| 698 | EPRINTK("mhdp_netdev_xmit error1"); |
| 699 | BUG(); |
| 700 | } |
| 701 | |
| 702 | /* Place holder for the mhdp packet count */ |
| 703 | len = skb_headroom(tunnel->skb) - L2MUX_HDR_SIZE - ETH_HLEN; |
| 704 | |
| 705 | skb_push(tunnel->skb, len); |
| 706 | len -= 4; |
| 707 | |
| 708 | memset(tunnel->skb->data, 0, len); |
| 709 | |
| 710 | /* |
| 711 | * Need to replace following logic, with something better like |
| 712 | * __pskb_pull_tail or pskb_may_pull(tunnel->skb, len); |
| 713 | */ |
| 714 | { |
| 715 | tunnel->skb->tail -= len; |
| 716 | tunnel->skb->len -= len; |
| 717 | } |
| 718 | |
| 719 | |
| 720 | mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data; |
| 721 | mhdpHdr->packet_count = 0; |
| 722 | } |
| 723 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 724 | /*This new frame is to big for the current mhdp frame, |
| 725 | * send the frame first*/ |
| 726 | if (tunnel->skb->len + skb->len >= MAX_MHDP_FRAME_SIZE) { |
| 727 | |
| 728 | mhdp_submit_queued_skb(tunnel, 1); |
| 729 | |
| 730 | goto xmit_again; |
| 731 | |
| 732 | } else { |
| 733 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 734 | /* |
| 735 | * skb_put cannot be called as the (data_len != 0) |
| 736 | */ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 737 | tunnel->skb->tail += sizeof(struct packet_info); |
| 738 | tunnel->skb->len += sizeof(struct packet_info); |
| 739 | |
| 740 | DPRINTK("new - skb->tail:%lu skb->end:%lu skb->data_len:%lu", |
| 741 | (unsigned long)tunnel->skb->tail, |
| 742 | (unsigned long)tunnel->skb->end, |
| 743 | (unsigned long)tunnel->skb->data_len); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 744 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 745 | mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 746 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 747 | tunnel->skb_to_free[mhdpHdr->packet_count] = skb; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 748 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 749 | packet_count = mhdpHdr->packet_count; |
| 750 | mhdpHdr->info[packet_count].pdn_id = tunnel->pdn_id; |
| 751 | if (packet_count == 0) { |
| 752 | mhdpHdr->info[packet_count].packet_offset = 0; |
| 753 | } else { |
| 754 | mhdpHdr->info[packet_count].packet_offset = |
| 755 | mhdpHdr->info[packet_count - 1].packet_offset + |
| 756 | mhdpHdr->info[packet_count - 1].packet_length; |
| 757 | } |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 758 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 759 | mhdpHdr->info[packet_count].packet_length = skb->len; |
| 760 | mhdpHdr->packet_count++; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 761 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 762 | page = virt_to_page(skb->data); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 763 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 764 | if (page == NULL) { |
| 765 | EPRINTK("kmap_atomic_to_page returns NULL"); |
| 766 | goto tx_error; |
| 767 | } |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 768 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 769 | get_page(page); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 770 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 771 | offset = ((unsigned long)skb->data - |
| 772 | (unsigned long)page_address(page)); |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 773 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 774 | skb_add_rx_frag(tunnel->skb, skb_shinfo(tunnel->skb)->nr_frags, |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 775 | page, offset, skb_headlen(skb), PAGE_SIZE); |
| 776 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 777 | if (skb_shinfo(skb)->nr_frags) { |
| 778 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 779 | skb_frag_t *frag = |
| 780 | &skb_shinfo(tunnel->skb)->frags[i]; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 781 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 782 | get_page(skb_frag_page(frag)); |
| 783 | skb_add_rx_frag(tunnel->skb, |
| 784 | skb_shinfo(tunnel->skb)->nr_frags, |
| 785 | skb_frag_page(frag), |
| 786 | frag->page_offset, |
| 787 | frag->size, |
| 788 | PAGE_SIZE); |
| 789 | } |
| 790 | } |
| 791 | |
| 792 | if (mhdpHdr->packet_count >= MAX_MHDPHDR_SIZE) { |
| 793 | mhdp_submit_queued_skb(tunnel, 1); |
| 794 | } else { |
| 795 | tunnel->tx_timer.function = &tx_timer_timeout; |
| 796 | tunnel->tx_timer.data = (unsigned long) tunnel; |
| 797 | tunnel->tx_timer.expires = |
| 798 | jiffies + ((HZ + 999) / 1000); |
| 799 | add_timer(&tunnel->tx_timer); |
| 800 | } |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 801 | } |
| 802 | |
| 803 | spin_unlock(&tunnel->timer_lock); |
| 804 | return NETDEV_TX_OK; |
| 805 | |
| 806 | tx_error: |
| 807 | spin_unlock(&tunnel->timer_lock); |
| 808 | stats->tx_errors++; |
| 809 | dev_kfree_skb(skb); |
| 810 | return NETDEV_TX_OK; |
| 811 | } |
| 812 | |
| 813 | |
| 814 | static int |
| 815 | mhdp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) |
| 816 | { |
| 817 | struct net_device *event_dev = (struct net_device *)ptr; |
| 818 | |
| 819 | DPRINTK("event_dev: %s, event: %lx\n", |
| 820 | event_dev ? event_dev->name : "None", event); |
| 821 | |
| 822 | switch (event) { |
| 823 | case NETDEV_UNREGISTER: |
| 824 | { |
| 825 | struct mhdp_net *mhdpn = mhdp_net_dev(event_dev); |
| 826 | struct mhdp_tunnel *iter, *prev; |
| 827 | |
| 828 | DPRINTK("event_dev: %s, event: %lx\n", |
| 829 | event_dev ? event_dev->name : "None", event); |
| 830 | |
| 831 | for (iter = mhdpn->tunnels, prev = NULL; |
| 832 | iter; prev = iter, iter = iter->next) { |
| 833 | if (event_dev == iter->master_dev) { |
| 834 | if (!prev) |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 835 | mhdpn->tunnels = |
| 836 | mhdpn->tunnels->next; |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 837 | else |
| 838 | prev->next = iter->next; |
| 839 | mhdp_tunnel_destroy(iter->dev); |
| 840 | } |
| 841 | } |
| 842 | } |
| 843 | break; |
| 844 | } |
| 845 | |
| 846 | return NOTIFY_DONE; |
| 847 | } |
| 848 | |
Xavier LANGELLIER | 5b3f08b | 2012-10-11 14:48:10 +0200 | [diff] [blame] | 849 | #ifdef MHDP_BONDING_SUPPORT |
| 850 | static void |
| 851 | cdma_netdev_uninit(struct net_device *dev) |
| 852 | { |
| 853 | dev_put(dev); |
| 854 | } |
| 855 | |
| 856 | static void mhdp_ethtool_get_drvinfo(struct net_device *dev, |
| 857 | struct ethtool_drvinfo *drvinfo) |
| 858 | { |
| 859 | strncpy(drvinfo->driver, dev->name, 32); |
| 860 | } |
| 861 | |
| 862 | static const struct ethtool_ops mhdp_ethtool_ops = { |
| 863 | .get_drvinfo = mhdp_ethtool_get_drvinfo, |
| 864 | .get_link = ethtool_op_get_link, |
| 865 | }; |
| 866 | #endif /* MHDP_BONDING_SUPPORT */ |
| 867 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 868 | static const struct net_device_ops mhdp_netdev_ops = { |
| 869 | .ndo_uninit = mhdp_netdev_uninit, |
| 870 | .ndo_start_xmit = mhdp_netdev_xmit, |
| 871 | .ndo_do_ioctl = mhdp_netdev_ioctl, |
| 872 | .ndo_change_mtu = mhdp_netdev_change_mtu, |
| 873 | }; |
| 874 | |
| 875 | static void mhdp_netdev_setup(struct net_device *dev) |
| 876 | { |
| 877 | dev->netdev_ops = &mhdp_netdev_ops; |
Xavier LANGELLIER | 5b3f08b | 2012-10-11 14:48:10 +0200 | [diff] [blame] | 878 | #ifdef MHDP_BONDING_SUPPORT |
| 879 | dev->ethtool_ops = &mhdp_ethtool_ops; |
| 880 | #endif /* MHDP_BONDING_SUPPORT */ |
| 881 | |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 882 | dev->destructor = free_netdev; |
| 883 | |
Xavier LANGELLIER | 5b3f08b | 2012-10-11 14:48:10 +0200 | [diff] [blame] | 884 | #ifdef truc /* MHDP_BONDING_SUPPORT */ |
| 885 | ether_setup(dev); |
| 886 | dev->flags |= IFF_NOARP; |
| 887 | dev->iflink = 0; |
| 888 | dev->features |= (NETIF_F_NETNS_LOCAL | NETIF_F_FRAGLIST); |
| 889 | #else |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 890 | dev->type = ARPHRD_TUNNEL; |
| 891 | dev->hard_header_len = L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr); |
| 892 | dev->mtu = ETH_DATA_LEN; |
| 893 | dev->flags = IFF_NOARP; |
| 894 | dev->iflink = 0; |
| 895 | dev->addr_len = 4; |
| 896 | dev->features |= (NETIF_F_NETNS_LOCAL | NETIF_F_FRAGLIST); |
Xavier LANGELLIER | 5b3f08b | 2012-10-11 14:48:10 +0200 | [diff] [blame] | 897 | #endif /* MHDP_BONDING_SUPPORT */ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 898 | } |
| 899 | |
| 900 | static int __net_init mhdp_init_net(struct net *net) |
| 901 | { |
| 902 | struct mhdp_net *mhdpn = net_generic(net, mhdp_net_id); |
| 903 | int err; |
| 904 | |
| 905 | mhdpn->tunnels = NULL; |
| 906 | |
| 907 | mhdpn->ctl_dev = alloc_netdev(sizeof(struct mhdp_tunnel), |
| 908 | MHDP_CTL_IFNAME, |
| 909 | mhdp_netdev_setup); |
| 910 | if (!mhdpn->ctl_dev) |
| 911 | return -ENOMEM; |
| 912 | |
| 913 | dev_net_set(mhdpn->ctl_dev, net); |
| 914 | dev_hold(mhdpn->ctl_dev); |
| 915 | |
| 916 | err = register_netdev(mhdpn->ctl_dev); |
| 917 | if (err) { |
| 918 | printk(KERN_ERR MHDP_CTL_IFNAME " register failed"); |
| 919 | free_netdev(mhdpn->ctl_dev); |
| 920 | return err; |
| 921 | } |
| 922 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 923 | #ifdef MHDP_USE_NAPI |
| 924 | netif_napi_add(mhdpn->ctl_dev, &mhdpn->napi, mhdp_poll, NAPI_WEIGHT); |
| 925 | napi_enable(&mhdpn->napi); |
| 926 | skb_queue_head_init(&mhdpn->skb_list); |
| 927 | #endif /*#ifdef MHDP_USE_NAPI*/ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 928 | return 0; |
| 929 | } |
| 930 | |
| 931 | static void __net_exit mhdp_exit_net(struct net *net) |
| 932 | { |
| 933 | struct mhdp_net *mhdpn = net_generic(net, mhdp_net_id); |
| 934 | |
| 935 | rtnl_lock(); |
| 936 | mhdp_destroy_tunnels(mhdpn); |
| 937 | unregister_netdevice(mhdpn->ctl_dev); |
| 938 | rtnl_unlock(); |
| 939 | } |
| 940 | |
| 941 | static struct pernet_operations mhdp_net_ops = { |
| 942 | .init = mhdp_init_net, |
| 943 | .exit = mhdp_exit_net, |
| 944 | .id = &mhdp_net_id, |
| 945 | .size = sizeof(struct mhdp_net), |
| 946 | }; |
| 947 | |
| 948 | |
| 949 | static int __init mhdp_init(void) |
| 950 | { |
| 951 | int err; |
| 952 | |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 953 | #ifdef MHDP_USE_NAPI |
| 954 | err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx_napi); |
| 955 | #else |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 956 | err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx); |
Xavier LANGELLIER | 6d520db | 2012-10-12 17:05:18 +0200 | [diff] [blame] | 957 | #endif /* MHDP_USE_NAPI */ |
Raj Jayaraman | bbdf260 | 2012-09-16 16:10:37 -0700 | [diff] [blame] | 958 | if (err) |
| 959 | goto rollback0; |
| 960 | |
| 961 | err = register_pernet_device(&mhdp_net_ops); |
| 962 | if (err < 0) |
| 963 | goto rollback1; |
| 964 | |
| 965 | err = register_netdevice_notifier(&mhdp_netdev_notifier); |
| 966 | if (err < 0) |
| 967 | goto rollback2; |
| 968 | |
| 969 | return 0; |
| 970 | |
| 971 | rollback2: |
| 972 | unregister_pernet_device(&mhdp_net_ops); |
| 973 | rollback1: |
| 974 | l2mux_netif_rx_unregister(MHI_L3_MHDP_DL); |
| 975 | rollback0: |
| 976 | return err; |
| 977 | } |
| 978 | |
| 979 | static void __exit mhdp_exit(void) |
| 980 | { |
| 981 | l2mux_netif_rx_unregister(MHI_L3_MHDP_DL); |
| 982 | unregister_netdevice_notifier(&mhdp_netdev_notifier); |
| 983 | unregister_pernet_device(&mhdp_net_ops); |
| 984 | } |
| 985 | |
| 986 | |
| 987 | module_init(mhdp_init); |
| 988 | module_exit(mhdp_exit); |
| 989 | |
| 990 | MODULE_AUTHOR("Sugnan Prabhu S <sugnan.prabhu@renesasmobile.com>"); |
| 991 | MODULE_DESCRIPTION("Modem Host Data Protocol for MHI"); |
| 992 | MODULE_LICENSE("GPL"); |
| 993 | |