net: mhi: mhdp optimizations and typo corrections
Xavier LANGELLIER [Fri, 12 Oct 2012 15:05:18 +0000 (17:05 +0200)]
mhdp driver optimizations and typo corrections

Bug 1167018

Change-Id: Icc829b4c42bee941ceb361cb0c04d01ece6cbf14
Signed-off-by: wtsai <wtsai@nvidia.com>
Reviewed-on: http://git-master/r/165099
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>

net/mhi/l3mhdp.c

index 6acf1f6..1771050 100644 (file)
@@ -42,6 +42,8 @@
 #define MHDP_BONDING_SUPPORT
 #endif
 
+/*#define MHDP_USE_NAPI*/
+
 #ifdef MHDP_BONDING_SUPPORT
 #include <linux/etherdevice.h>
 #endif /* MHDP_BONDING_SUPPORT */
@@ -53,6 +55,7 @@
 /* MHDP device MTU limits */
 #define MHDP_MTU_MAX           0x2400
 #define MHDP_MTU_MIN           0x44
+#define MAX_MHDP_FRAME_SIZE    16000
 
 /* MHDP device names */
 #define MHDP_IFNAME            "rmnet%d"
@@ -85,8 +88,11 @@ int sysctl_mhdp_concat_nb_pkt __read_mostly;
 EXPORT_SYMBOL(sysctl_mhdp_concat_nb_pkt);
 
 /*** Type definitions ***/
+#ifdef MHDP_USE_NAPI
+#define NAPI_WEIGHT 64
+#endif /*MHDP_USE_NAPI*/
 
-#define MAX_MHDPHDR_SIZE 10
+#define MAX_MHDPHDR_SIZE 18
 
 struct mhdp_tunnel {
        struct mhdp_tunnel      *next;
@@ -103,6 +109,11 @@ struct mhdp_tunnel {
 struct mhdp_net {
        struct mhdp_tunnel      *tunnels;
        struct net_device       *ctl_dev;
+#ifdef MHDP_USE_NAPI
+       struct net_device       *dev;
+       struct napi_struct      napi;
+       struct sk_buff_head     skb_list;
+#endif /*#ifdef MHDP_USE_NAPI*/
 };
 
 struct packet_info {
@@ -121,13 +132,19 @@ struct mhdp_hdr {
 
 static void mhdp_netdev_setup(struct net_device *dev);
 
-static void mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel);
+static void mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel, int force_send);
 
 static int mhdp_netdev_event(struct notifier_block *this,
                             unsigned long event, void *ptr);
 
 static void tx_timer_timeout(unsigned long arg);
 
+#ifdef MHDP_USE_NAPI
+
+static int mhdp_poll(struct napi_struct *napi, int budget);
+
+#endif /*MHDP_USE_NAPI*/
+
 /*** Global Variables ***/
 
 static int  mhdp_net_id __read_mostly;
@@ -319,6 +336,7 @@ mhdp_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        } else {
                                err = -EINVAL;
                        }
+
                } else if (1 == tunnel->free_pdn) {
 
                        tunnel->free_pdn = 0;
@@ -381,7 +399,7 @@ mhdp_netdev_uninit(struct net_device *dev)
 
 
 static void
-mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel)
+mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel, int force_send)
 {
        struct sk_buff *skb = tunnel->skb;
        struct l2muxhdr *l2hdr;
@@ -416,7 +434,10 @@ mhdp_submit_queued_skb(struct mhdp_tunnel *tunnel)
                dev_queue_xmit(skb);
 
                for (i = 0; i < nb_frags; i++)
-                       dev_kfree_skb(tunnel->skb_to_free[i]);
+                       if (tunnel->skb_to_free[i])
+                               dev_kfree_skb(tunnel->skb_to_free[i]);
+                       else
+                               EPRINTK("submit_q_skb: error no skb to free\n");
        }
 }
 
@@ -517,16 +538,18 @@ mhdp_netdev_rx(struct sk_buff *skb, struct net_device *dev)
                                goto error;
 
                        get_page(page);
-                       skb_add_rx_frag(newskb, skb_shinfo(newskb)->nr_frags,
-                       page,
-                       frag->page_offset +
-                       ((mhdp_header_len - skb_headlen(skb)) + offset),
-                       length, PAGE_SIZE);
+                       skb_add_rx_frag(newskb,
+                               skb_shinfo(newskb)->nr_frags,
+                               page,
+                               frag->page_offset +
+                               ((mhdp_header_len - skb_headlen(skb)) + offset),
+                               length,
+                               PAGE_SIZE);
 
                        ip_ver = *((unsigned long *)page_address(page) +
-                       (frag->page_offset +
-                       ((mhdp_header_len - skb_headlen(skb)) + offset)));
-
+                                       (frag->page_offset +
+                                       ((mhdp_header_len - skb_headlen(skb)) +
+                                       offset)));
                        if ((ip_ver>>4) != VER_IPv4 &&
                                (ip_ver>>4) != VER_IPv6)
                                goto error;
@@ -538,8 +561,9 @@ mhdp_netdev_rx(struct sk_buff *skb, struct net_device *dev)
 
                skb_reset_network_header(newskb);
 
-               /* IPv6 Support - Check the IP version and set
-               ETH_P_IP or ETH_P_IPv6 for received packets */
+               /* IPv6 Support - Check the IP version */
+               /* and set ETH_P_IP or ETH_P_IPv6 for received packets */
+
                newskb->protocol = htons(ETH_IP_TYPE(ip_ver));
 
                newskb->pkt_type = PACKET_HOST;
@@ -552,7 +576,11 @@ mhdp_netdev_rx(struct sk_buff *skb, struct net_device *dev)
                        stats->rx_packets++;
                        newskb->dev = tunnel->dev;
                        SKBPRINT(newskb, "NEWSKB: RX");
+#ifdef MHDP_USE_NAPI
+                       netif_receive_skb(newskb);
+#else
                        netif_rx(newskb);
+#endif /*#ifdef MHDP_USE_NAPI*/
                }
        }
        rcu_read_unlock();
@@ -573,18 +601,60 @@ error:
        return err;
 }
 
+#ifdef MHDP_USE_NAPI
+/*
+static int mhdp_poll(struct napi_struct *napi, int budget)
+function called through napi to read current ip frame received
+*/
+static int mhdp_poll(struct napi_struct *napi, int budget)
+{
+       struct mhdp_net *mhdpn = container_of(napi, struct mhdp_net, napi);
+       int err = 0;
+       struct sk_buff *skb;
+
+       while (!skb_queue_empty(&mhdpn->skb_list)) {
+
+               skb = skb_dequeue(&mhdpn->skb_list);
+               err = mhdp_netdev_rx(skb, mhdpn->dev);
+       }
+
+       napi_complete(napi);
+
+       return err;
+}
+/*l2mux callback*/
+static int
+mhdp_netdev_rx_napi(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mhdp_net *mhdpn = mhdp_net_dev(dev);
+
+
+       if (mhdpn) {
+
+               mhdpn->dev = dev;
+               skb_queue_tail(&mhdpn->skb_list, skb);
+
+               napi_schedule(&mhdpn->napi);
+
+       } else {
+               EPRINTK("mhdp_netdev_rx_napi-MHDP driver init not correct\n");
+       }
+
+       return 0;
+}
+#endif /*MHDP_USE_NAPI*/
+
 static void tx_timer_timeout(unsigned long arg)
 {
        struct mhdp_tunnel *tunnel = (struct mhdp_tunnel *) arg;
 
        spin_lock(&tunnel->timer_lock);
 
-       mhdp_submit_queued_skb(tunnel);
+       mhdp_submit_queued_skb(tunnel, 1);
 
        spin_unlock(&tunnel->timer_lock);
 }
 
-
 static int
 mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -602,9 +672,27 @@ mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
        if (timer_pending(&tunnel->tx_timer))
                del_timer(&tunnel->tx_timer);
 
+#if 0
+       {
+               int i;
+               int len = skb->len;
+               u8 *ptr = skb->data;
+
+               for (i = 0; i < len; i++) {
+                       if (i%8 == 0)
+                               printk(KERN_DEBUG
+                                       "MHDP mhdp_netdev_xmit: TX [%04X] ", i);
+                       printk(" 0x%02X", ptr[i]);
+                       if (i%8 == 7 || i == len-1)
+                               printk("\n");
+               }
+       }
+#endif
+xmit_again:
+
        if (tunnel->skb == NULL) {
                tunnel->skb = netdev_alloc_skb(dev,
-                       L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr) + ETH_HLEN);
+                       L2MUX_HDR_SIZE + sizeof(struct mhdp_hdr));
 
                if (!tunnel->skb) {
                        EPRINTK("mhdp_netdev_xmit error1");
@@ -633,10 +721,19 @@ mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
                mhdpHdr->packet_count = 0;
        }
 
+       /*This new frame is to big for the current mhdp frame,
+       * send the frame first*/
+       if (tunnel->skb->len + skb->len >= MAX_MHDP_FRAME_SIZE) {
+
+               mhdp_submit_queued_skb(tunnel, 1);
+
+               goto xmit_again;
+
+       } else {
+
        /*
         * skb_put cannot be called as the (data_len != 0)
         */
-       {
                tunnel->skb->tail += sizeof(struct packet_info);
                tunnel->skb->len  += sizeof(struct packet_info);
 
@@ -644,58 +741,63 @@ mhdp_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
                                (unsigned long)tunnel->skb->tail,
                                (unsigned long)tunnel->skb->end,
                                (unsigned long)tunnel->skb->data_len);
-       }
 
-       mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data;
+               mhdpHdr = (struct mhdp_hdr *)tunnel->skb->data;
 
-       tunnel->skb_to_free[mhdpHdr->packet_count] = skb;
+               tunnel->skb_to_free[mhdpHdr->packet_count] = skb;
 
-       packet_count = mhdpHdr->packet_count;
-       mhdpHdr->info[packet_count].pdn_id = tunnel->pdn_id;
-       if (packet_count == 0) {
-               mhdpHdr->info[packet_count].packet_offset = 0;
-       } else {
-               mhdpHdr->info[packet_count].packet_offset =
-                       mhdpHdr->info[packet_count - 1].packet_offset +
-                       mhdpHdr->info[packet_count - 1].packet_length;
-       }
+               packet_count = mhdpHdr->packet_count;
+               mhdpHdr->info[packet_count].pdn_id = tunnel->pdn_id;
+               if (packet_count == 0) {
+                       mhdpHdr->info[packet_count].packet_offset = 0;
+               } else {
+                       mhdpHdr->info[packet_count].packet_offset =
+                               mhdpHdr->info[packet_count - 1].packet_offset +
+                               mhdpHdr->info[packet_count - 1].packet_length;
+               }
 
-       mhdpHdr->info[packet_count].packet_length = skb->len;
-       mhdpHdr->packet_count++;
+               mhdpHdr->info[packet_count].packet_length = skb->len;
+               mhdpHdr->packet_count++;
 
-       page = virt_to_page(skb->data);
+               page = virt_to_page(skb->data);
 
-       if (page == NULL) {
-               EPRINTK("kmap_atomic_to_page returns NULL");
-               goto tx_error;
-       }
+               if (page == NULL) {
+                       EPRINTK("kmap_atomic_to_page returns NULL");
+                       goto tx_error;
+               }
 
-       get_page(page);
+               get_page(page);
 
-       offset = ((unsigned long)skb->data -
-                 (unsigned long)page_address(page));
+               offset = ((unsigned long)skb->data -
+                         (unsigned long)page_address(page));
 
-       skb_add_rx_frag(tunnel->skb, skb_shinfo(tunnel->skb)->nr_frags,
+               skb_add_rx_frag(tunnel->skb, skb_shinfo(tunnel->skb)->nr_frags,
                        page, offset, skb_headlen(skb), PAGE_SIZE);
 
-       if (skb_shinfo(skb)->nr_frags) {
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       skb_frag_t *frag = &skb_shinfo(tunnel->skb)->frags[i];
-                       get_page(skb_frag_page(frag));
-                       skb_add_rx_frag(tunnel->skb,
+               if (skb_shinfo(skb)->nr_frags) {
+                       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                               skb_frag_t *frag =
+                                       &skb_shinfo(tunnel->skb)->frags[i];
+
+                               get_page(skb_frag_page(frag));
+                               skb_add_rx_frag(tunnel->skb,
                                        skb_shinfo(tunnel->skb)->nr_frags,
-                                       skb_frag_page(frag), frag->page_offset,
-                                       frag->size, PAGE_SIZE);
+                                       skb_frag_page(frag),
+                                       frag->page_offset,
+                                       frag->size,
+                                       PAGE_SIZE);
+                       }
                }
-       }
 
-       if (mhdpHdr->packet_count == MAX_MHDPHDR_SIZE) {
-               mhdp_submit_queued_skb(tunnel);
-       } else {
-           tunnel->tx_timer.function = &tx_timer_timeout;
-           tunnel->tx_timer.data     = (unsigned long) tunnel;
-           tunnel->tx_timer.expires = jiffies + ((HZ + 999) / 1000) ;
-           add_timer(&tunnel->tx_timer);
+               if (mhdpHdr->packet_count >= MAX_MHDPHDR_SIZE) {
+                       mhdp_submit_queued_skb(tunnel, 1);
+               } else {
+                       tunnel->tx_timer.function = &tx_timer_timeout;
+                       tunnel->tx_timer.data     = (unsigned long) tunnel;
+                       tunnel->tx_timer.expires =
+                               jiffies + ((HZ + 999) / 1000);
+                       add_timer(&tunnel->tx_timer);
+               }
        }
 
        spin_unlock(&tunnel->timer_lock);
@@ -730,7 +832,8 @@ mhdp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
                        iter; prev = iter, iter = iter->next) {
                        if (event_dev == iter->master_dev) {
                                if (!prev)
-                                       mhdpn->tunnels = mhdpn->tunnels->next;
+                                       mhdpn->tunnels =
+                                               mhdpn->tunnels->next;
                                else
                                        prev->next = iter->next;
                                mhdp_tunnel_destroy(iter->dev);
@@ -817,6 +920,11 @@ static int __net_init mhdp_init_net(struct net *net)
                return err;
        }
 
+#ifdef MHDP_USE_NAPI
+       netif_napi_add(mhdpn->ctl_dev, &mhdpn->napi, mhdp_poll, NAPI_WEIGHT);
+       napi_enable(&mhdpn->napi);
+       skb_queue_head_init(&mhdpn->skb_list);
+#endif /*#ifdef MHDP_USE_NAPI*/
        return 0;
 }
 
@@ -842,7 +950,11 @@ static int __init mhdp_init(void)
 {
        int err;
 
+#ifdef MHDP_USE_NAPI
+       err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx_napi);
+#else
        err = l2mux_netif_rx_register(MHI_L3_MHDP_DL, mhdp_netdev_rx);
+#endif /* MHDP_USE_NAPI */
        if (err)
                goto rollback0;