vmxnet3: use dev_dbg, fix build for CONFIG_BLOCK=n
[linux-3.10.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
1 /*
2  * Linux driver for VMware's vmxnet3 ethernet NIC.
3  *
4  * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT. See the GNU General Public License for more
14  * details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * The full GNU General Public License is included in this distribution in
21  * the file called "COPYING".
22  *
23  * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24  *
25  */
26
27 #include "vmxnet3_int.h"
28
29 char vmxnet3_driver_name[] = "vmxnet3";
30 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31
32
33 /*
34  * PCI Device ID Table
35  * Last entry must be all 0s
36  */
37 static const struct pci_device_id vmxnet3_pciid_table[] = {
38         {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
39         {0}
40 };
41
42 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
43
44 static atomic_t devices_found;
45
46
47 /*
48  *    Enable/Disable the given intr
49  */
50 static void
51 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
52 {
53         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
54 }
55
56
57 static void
58 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
59 {
60         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
61 }
62
63
64 /*
65  *    Enable/Disable all intrs used by the device
66  */
67 static void
68 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
69 {
70         int i;
71
72         for (i = 0; i < adapter->intr.num_intrs; i++)
73                 vmxnet3_enable_intr(adapter, i);
74 }
75
76
77 static void
78 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
79 {
80         int i;
81
82         for (i = 0; i < adapter->intr.num_intrs; i++)
83                 vmxnet3_disable_intr(adapter, i);
84 }
85
86
87 static void
88 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
89 {
90         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
91 }
92
93
94 static bool
95 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
96 {
97         return netif_queue_stopped(adapter->netdev);
98 }
99
100
101 static void
102 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
103 {
104         tq->stopped = false;
105         netif_start_queue(adapter->netdev);
106 }
107
108
109 static void
110 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
111 {
112         tq->stopped = false;
113         netif_wake_queue(adapter->netdev);
114 }
115
116
117 static void
118 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
119 {
120         tq->stopped = true;
121         tq->num_stop++;
122         netif_stop_queue(adapter->netdev);
123 }
124
125
126 /*
127  * Check the link state. This may start or stop the tx queue.
128  */
129 static void
130 vmxnet3_check_link(struct vmxnet3_adapter *adapter)
131 {
132         u32 ret;
133
134         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
135         ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
136         adapter->link_speed = ret >> 16;
137         if (ret & 1) { /* Link is up. */
138                 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
139                        adapter->netdev->name, adapter->link_speed);
140                 if (!netif_carrier_ok(adapter->netdev))
141                         netif_carrier_on(adapter->netdev);
142
143                 vmxnet3_tq_start(&adapter->tx_queue, adapter);
144         } else {
145                 printk(KERN_INFO "%s: NIC Link is Down\n",
146                        adapter->netdev->name);
147                 if (netif_carrier_ok(adapter->netdev))
148                         netif_carrier_off(adapter->netdev);
149
150                 vmxnet3_tq_stop(&adapter->tx_queue, adapter);
151         }
152 }
153
154
155 static void
156 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157 {
158         u32 events = adapter->shared->ecr;
159         if (!events)
160                 return;
161
162         vmxnet3_ack_events(adapter, events);
163
164         /* Check if link state has changed */
165         if (events & VMXNET3_ECR_LINK)
166                 vmxnet3_check_link(adapter);
167
168         /* Check if there is an error on xmit/recv queues */
169         if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
170                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
171                                        VMXNET3_CMD_GET_QUEUE_STATUS);
172
173                 if (adapter->tqd_start->status.stopped) {
174                         printk(KERN_ERR "%s: tq error 0x%x\n",
175                                adapter->netdev->name,
176                                adapter->tqd_start->status.error);
177                 }
178                 if (adapter->rqd_start->status.stopped) {
179                         printk(KERN_ERR "%s: rq error 0x%x\n",
180                                adapter->netdev->name,
181                                adapter->rqd_start->status.error);
182                 }
183
184                 schedule_work(&adapter->work);
185         }
186 }
187
188
189 static void
190 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
191                      struct pci_dev *pdev)
192 {
193         if (tbi->map_type == VMXNET3_MAP_SINGLE)
194                 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
195                                  PCI_DMA_TODEVICE);
196         else if (tbi->map_type == VMXNET3_MAP_PAGE)
197                 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
198                                PCI_DMA_TODEVICE);
199         else
200                 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
201
202         tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
203 }
204
205
206 static int
207 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
208                   struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
209 {
210         struct sk_buff *skb;
211         int entries = 0;
212
213         /* no out of order completion */
214         BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215         BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1);
216
217         skb = tq->buf_info[eop_idx].skb;
218         BUG_ON(skb == NULL);
219         tq->buf_info[eop_idx].skb = NULL;
220
221         VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
222
223         while (tq->tx_ring.next2comp != eop_idx) {
224                 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
225                                      pdev);
226
227                 /* update next2comp w/o tx_lock. Since we are marking more,
228                  * instead of less, tx ring entries avail, the worst case is
229                  * that the tx routine incorrectly re-queues a pkt due to
230                  * insufficient tx ring entries.
231                  */
232                 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
233                 entries++;
234         }
235
236         dev_kfree_skb_any(skb);
237         return entries;
238 }
239
240
241 static int
242 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
243                         struct vmxnet3_adapter *adapter)
244 {
245         int completed = 0;
246         union Vmxnet3_GenericDesc *gdesc;
247
248         gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249         while (gdesc->tcd.gen == tq->comp_ring.gen) {
250                 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq,
251                                                adapter->pdev, adapter);
252
253                 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254                 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
255         }
256
257         if (completed) {
258                 spin_lock(&tq->tx_lock);
259                 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
260                              vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
261                              VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
262                              netif_carrier_ok(adapter->netdev))) {
263                         vmxnet3_tq_wake(tq, adapter);
264                 }
265                 spin_unlock(&tq->tx_lock);
266         }
267         return completed;
268 }
269
270
271 static void
272 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
273                    struct vmxnet3_adapter *adapter)
274 {
275         int i;
276
277         while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
278                 struct vmxnet3_tx_buf_info *tbi;
279                 union Vmxnet3_GenericDesc *gdesc;
280
281                 tbi = tq->buf_info + tq->tx_ring.next2comp;
282                 gdesc = tq->tx_ring.base + tq->tx_ring.next2comp;
283
284                 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
285                 if (tbi->skb) {
286                         dev_kfree_skb_any(tbi->skb);
287                         tbi->skb = NULL;
288                 }
289                 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
290         }
291
292         /* sanity check, verify all buffers are indeed unmapped and freed */
293         for (i = 0; i < tq->tx_ring.size; i++) {
294                 BUG_ON(tq->buf_info[i].skb != NULL ||
295                        tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
296         }
297
298         tq->tx_ring.gen = VMXNET3_INIT_GEN;
299         tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
300
301         tq->comp_ring.gen = VMXNET3_INIT_GEN;
302         tq->comp_ring.next2proc = 0;
303 }
304
305
306 void
307 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
308                    struct vmxnet3_adapter *adapter)
309 {
310         if (tq->tx_ring.base) {
311                 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
312                                     sizeof(struct Vmxnet3_TxDesc),
313                                     tq->tx_ring.base, tq->tx_ring.basePA);
314                 tq->tx_ring.base = NULL;
315         }
316         if (tq->data_ring.base) {
317                 pci_free_consistent(adapter->pdev, tq->data_ring.size *
318                                     sizeof(struct Vmxnet3_TxDataDesc),
319                                     tq->data_ring.base, tq->data_ring.basePA);
320                 tq->data_ring.base = NULL;
321         }
322         if (tq->comp_ring.base) {
323                 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
324                                     sizeof(struct Vmxnet3_TxCompDesc),
325                                     tq->comp_ring.base, tq->comp_ring.basePA);
326                 tq->comp_ring.base = NULL;
327         }
328         kfree(tq->buf_info);
329         tq->buf_info = NULL;
330 }
331
332
333 static void
334 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
335                 struct vmxnet3_adapter *adapter)
336 {
337         int i;
338
339         /* reset the tx ring contents to 0 and reset the tx ring states */
340         memset(tq->tx_ring.base, 0, tq->tx_ring.size *
341                sizeof(struct Vmxnet3_TxDesc));
342         tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
343         tq->tx_ring.gen = VMXNET3_INIT_GEN;
344
345         memset(tq->data_ring.base, 0, tq->data_ring.size *
346                sizeof(struct Vmxnet3_TxDataDesc));
347
348         /* reset the tx comp ring contents to 0 and reset comp ring states */
349         memset(tq->comp_ring.base, 0, tq->comp_ring.size *
350                sizeof(struct Vmxnet3_TxCompDesc));
351         tq->comp_ring.next2proc = 0;
352         tq->comp_ring.gen = VMXNET3_INIT_GEN;
353
354         /* reset the bookkeeping data */
355         memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
356         for (i = 0; i < tq->tx_ring.size; i++)
357                 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
358
359         /* stats are not reset */
360 }
361
362
363 static int
364 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
365                   struct vmxnet3_adapter *adapter)
366 {
367         BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
368                tq->comp_ring.base || tq->buf_info);
369
370         tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
371                            * sizeof(struct Vmxnet3_TxDesc),
372                            &tq->tx_ring.basePA);
373         if (!tq->tx_ring.base) {
374                 printk(KERN_ERR "%s: failed to allocate tx ring\n",
375                        adapter->netdev->name);
376                 goto err;
377         }
378
379         tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
380                              tq->data_ring.size *
381                              sizeof(struct Vmxnet3_TxDataDesc),
382                              &tq->data_ring.basePA);
383         if (!tq->data_ring.base) {
384                 printk(KERN_ERR "%s: failed to allocate data ring\n",
385                        adapter->netdev->name);
386                 goto err;
387         }
388
389         tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
390                              tq->comp_ring.size *
391                              sizeof(struct Vmxnet3_TxCompDesc),
392                              &tq->comp_ring.basePA);
393         if (!tq->comp_ring.base) {
394                 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
395                        adapter->netdev->name);
396                 goto err;
397         }
398
399         tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
400                                GFP_KERNEL);
401         if (!tq->buf_info) {
402                 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
403                        adapter->netdev->name);
404                 goto err;
405         }
406
407         return 0;
408
409 err:
410         vmxnet3_tq_destroy(tq, adapter);
411         return -ENOMEM;
412 }
413
414
415 /*
416  *    starting from ring->next2fill, allocate rx buffers for the given ring
417  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
418  *    are allocated or allocation fails
419  */
420
421 static int
422 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
423                         int num_to_alloc, struct vmxnet3_adapter *adapter)
424 {
425         int num_allocated = 0;
426         struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
427         struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
428         u32 val;
429
430         while (num_allocated < num_to_alloc) {
431                 struct vmxnet3_rx_buf_info *rbi;
432                 union Vmxnet3_GenericDesc *gd;
433
434                 rbi = rbi_base + ring->next2fill;
435                 gd = ring->base + ring->next2fill;
436
437                 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
438                         if (rbi->skb == NULL) {
439                                 rbi->skb = dev_alloc_skb(rbi->len +
440                                                          NET_IP_ALIGN);
441                                 if (unlikely(rbi->skb == NULL)) {
442                                         rq->stats.rx_buf_alloc_failure++;
443                                         break;
444                                 }
445                                 rbi->skb->dev = adapter->netdev;
446
447                                 skb_reserve(rbi->skb, NET_IP_ALIGN);
448                                 rbi->dma_addr = pci_map_single(adapter->pdev,
449                                                 rbi->skb->data, rbi->len,
450                                                 PCI_DMA_FROMDEVICE);
451                         } else {
452                                 /* rx buffer skipped by the device */
453                         }
454                         val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
455                 } else {
456                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
457                                rbi->len  != PAGE_SIZE);
458
459                         if (rbi->page == NULL) {
460                                 rbi->page = alloc_page(GFP_ATOMIC);
461                                 if (unlikely(rbi->page == NULL)) {
462                                         rq->stats.rx_buf_alloc_failure++;
463                                         break;
464                                 }
465                                 rbi->dma_addr = pci_map_page(adapter->pdev,
466                                                 rbi->page, 0, PAGE_SIZE,
467                                                 PCI_DMA_FROMDEVICE);
468                         } else {
469                                 /* rx buffers skipped by the device */
470                         }
471                         val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
472                 }
473
474                 BUG_ON(rbi->dma_addr == 0);
475                 gd->rxd.addr = rbi->dma_addr;
476                 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val |
477                                 rbi->len;
478
479                 num_allocated++;
480                 vmxnet3_cmd_ring_adv_next2fill(ring);
481         }
482         rq->uncommitted[ring_idx] += num_allocated;
483
484         dev_dbg(&adapter->netdev->dev,
485                 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
486                 "%u, uncommited %u\n", num_allocated, ring->next2fill,
487                 ring->next2comp, rq->uncommitted[ring_idx]);
488
489         /* so that the device can distinguish a full ring and an empty ring */
490         BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
491
492         return num_allocated;
493 }
494
495
496 static void
497 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
498                     struct vmxnet3_rx_buf_info *rbi)
499 {
500         struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
501                 skb_shinfo(skb)->nr_frags;
502
503         BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
504
505         frag->page = rbi->page;
506         frag->page_offset = 0;
507         frag->size = rcd->len;
508         skb->data_len += frag->size;
509         skb_shinfo(skb)->nr_frags++;
510 }
511
512
513 static void
514 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
515                 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
516                 struct vmxnet3_adapter *adapter)
517 {
518         u32 dw2, len;
519         unsigned long buf_offset;
520         int i;
521         union Vmxnet3_GenericDesc *gdesc;
522         struct vmxnet3_tx_buf_info *tbi = NULL;
523
524         BUG_ON(ctx->copy_size > skb_headlen(skb));
525
526         /* use the previous gen bit for the SOP desc */
527         dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
528
529         ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
530         gdesc = ctx->sop_txd; /* both loops below can be skipped */
531
532         /* no need to map the buffer if headers are copied */
533         if (ctx->copy_size) {
534                 ctx->sop_txd->txd.addr = tq->data_ring.basePA +
535                                         tq->tx_ring.next2fill *
536                                         sizeof(struct Vmxnet3_TxDataDesc);
537                 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size;
538                 ctx->sop_txd->dword[3] = 0;
539
540                 tbi = tq->buf_info + tq->tx_ring.next2fill;
541                 tbi->map_type = VMXNET3_MAP_NONE;
542
543                 dev_dbg(&adapter->netdev->dev,
544                         "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545                         tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
546                         ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548
549                 /* use the right gen for non-SOP desc */
550                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
551         }
552
553         /* linear part can use multiple tx desc if it's big */
554         len = skb_headlen(skb) - ctx->copy_size;
555         buf_offset = ctx->copy_size;
556         while (len) {
557                 u32 buf_size;
558
559                 buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ?
560                            VMXNET3_MAX_TX_BUF_SIZE : len;
561
562                 tbi = tq->buf_info + tq->tx_ring.next2fill;
563                 tbi->map_type = VMXNET3_MAP_SINGLE;
564                 tbi->dma_addr = pci_map_single(adapter->pdev,
565                                 skb->data + buf_offset, buf_size,
566                                 PCI_DMA_TODEVICE);
567
568                 tbi->len = buf_size; /* this automatically convert 2^14 to 0 */
569
570                 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571                 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572
573                 gdesc->txd.addr = tbi->dma_addr;
574                 gdesc->dword[2] = dw2 | buf_size;
575                 gdesc->dword[3] = 0;
576
577                 dev_dbg(&adapter->netdev->dev,
578                         "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579                         tq->tx_ring.next2fill, gdesc->txd.addr,
580                         gdesc->dword[2], gdesc->dword[3]);
581                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583
584                 len -= buf_size;
585                 buf_offset += buf_size;
586         }
587
588         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
589                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
590
591                 tbi = tq->buf_info + tq->tx_ring.next2fill;
592                 tbi->map_type = VMXNET3_MAP_PAGE;
593                 tbi->dma_addr = pci_map_page(adapter->pdev, frag->page,
594                                              frag->page_offset, frag->size,
595                                              PCI_DMA_TODEVICE);
596
597                 tbi->len = frag->size;
598
599                 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600                 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601
602                 gdesc->txd.addr = tbi->dma_addr;
603                 gdesc->dword[2] = dw2 | frag->size;
604                 gdesc->dword[3] = 0;
605
606                 dev_dbg(&adapter->netdev->dev,
607                         "txd[%u]: 0x%llu %u %u\n",
608                         tq->tx_ring.next2fill, gdesc->txd.addr,
609                         gdesc->dword[2], gdesc->dword[3]);
610                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612         }
613
614         ctx->eop_txd = gdesc;
615
616         /* set the last buf_info for the pkt */
617         tbi->skb = skb;
618         tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
619 }
620
621
622 /*
623  *    parse and copy relevant protocol headers:
624  *      For a tso pkt, relevant headers are L2/3/4 including options
625  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
626  *      if it's a TCP/UDP pkt
627  *
628  * Returns:
629  *    -1:  error happens during parsing
630  *     0:  protocol headers parsed, but too big to be copied
631  *     1:  protocol headers parsed and copied
632  *
633  * Other effects:
634  *    1. related *ctx fields are updated.
635  *    2. ctx->copy_size is # of bytes copied
636  *    3. the portion copied is guaranteed to be in the linear part
637  *
638  */
639 static int
640 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
641                            struct vmxnet3_tx_ctx *ctx,
642                            struct vmxnet3_adapter *adapter)
643 {
644         struct Vmxnet3_TxDataDesc *tdd;
645
646         if (ctx->mss) {
647                 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
648                 ctx->l4_hdr_size = ((struct tcphdr *)
649                                    skb_transport_header(skb))->doff * 4;
650                 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
651         } else {
652                 unsigned int pull_size;
653
654                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
655                         ctx->eth_ip_hdr_size = skb_transport_offset(skb);
656
657                         if (ctx->ipv4) {
658                                 struct iphdr *iph = (struct iphdr *)
659                                                     skb_network_header(skb);
660                                 if (iph->protocol == IPPROTO_TCP) {
661                                         pull_size = ctx->eth_ip_hdr_size +
662                                                     sizeof(struct tcphdr);
663
664                                         if (unlikely(!pskb_may_pull(skb,
665                                                                 pull_size))) {
666                                                 goto err;
667                                         }
668                                         ctx->l4_hdr_size = ((struct tcphdr *)
669                                            skb_transport_header(skb))->doff * 4;
670                                 } else if (iph->protocol == IPPROTO_UDP) {
671                                         ctx->l4_hdr_size =
672                                                         sizeof(struct udphdr);
673                                 } else {
674                                         ctx->l4_hdr_size = 0;
675                                 }
676                         } else {
677                                 /* for simplicity, don't copy L4 headers */
678                                 ctx->l4_hdr_size = 0;
679                         }
680                         ctx->copy_size = ctx->eth_ip_hdr_size +
681                                          ctx->l4_hdr_size;
682                 } else {
683                         ctx->eth_ip_hdr_size = 0;
684                         ctx->l4_hdr_size = 0;
685                         /* copy as much as allowed */
686                         ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
687                                              , skb_headlen(skb));
688                 }
689
690                 /* make sure headers are accessible directly */
691                 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
692                         goto err;
693         }
694
695         if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
696                 tq->stats.oversized_hdr++;
697                 ctx->copy_size = 0;
698                 return 0;
699         }
700
701         tdd = tq->data_ring.base + tq->tx_ring.next2fill;
702
703         memcpy(tdd->data, skb->data, ctx->copy_size);
704         dev_dbg(&adapter->netdev->dev,
705                 "copy %u bytes to dataRing[%u]\n",
706                 ctx->copy_size, tq->tx_ring.next2fill);
707         return 1;
708
709 err:
710         return -1;
711 }
712
713
714 static void
715 vmxnet3_prepare_tso(struct sk_buff *skb,
716                     struct vmxnet3_tx_ctx *ctx)
717 {
718         struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
719         if (ctx->ipv4) {
720                 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
721                 iph->check = 0;
722                 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
723                                                  IPPROTO_TCP, 0);
724         } else {
725                 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
726                 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
727                                                IPPROTO_TCP, 0);
728         }
729 }
730
731
732 /*
733  * Transmits a pkt thru a given tq
734  * Returns:
735  *    NETDEV_TX_OK:      descriptors are setup successfully
736  *    NETDEV_TX_OK:      error occured, the pkt is dropped
737  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
738  *
739  * Side-effects:
740  *    1. tx ring may be changed
741  *    2. tq stats may be updated accordingly
742  *    3. shared->txNumDeferred may be updated
743  */
744
745 static int
746 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
747                 struct vmxnet3_adapter *adapter, struct net_device *netdev)
748 {
749         int ret;
750         u32 count;
751         unsigned long flags;
752         struct vmxnet3_tx_ctx ctx;
753         union Vmxnet3_GenericDesc *gdesc;
754
755         /* conservatively estimate # of descriptors to use */
756         count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
757                 skb_shinfo(skb)->nr_frags + 1;
758
759         ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
760
761         ctx.mss = skb_shinfo(skb)->gso_size;
762         if (ctx.mss) {
763                 if (skb_header_cloned(skb)) {
764                         if (unlikely(pskb_expand_head(skb, 0, 0,
765                                                       GFP_ATOMIC) != 0)) {
766                                 tq->stats.drop_tso++;
767                                 goto drop_pkt;
768                         }
769                         tq->stats.copy_skb_header++;
770                 }
771                 vmxnet3_prepare_tso(skb, &ctx);
772         } else {
773                 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
774
775                         /* non-tso pkts must not use more than
776                          * VMXNET3_MAX_TXD_PER_PKT entries
777                          */
778                         if (skb_linearize(skb) != 0) {
779                                 tq->stats.drop_too_many_frags++;
780                                 goto drop_pkt;
781                         }
782                         tq->stats.linearized++;
783
784                         /* recalculate the # of descriptors to use */
785                         count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
786                 }
787         }
788
789         ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
790         if (ret >= 0) {
791                 BUG_ON(ret <= 0 && ctx.copy_size != 0);
792                 /* hdrs parsed, check against other limits */
793                 if (ctx.mss) {
794                         if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
795                                      VMXNET3_MAX_TX_BUF_SIZE)) {
796                                 goto hdr_too_big;
797                         }
798                 } else {
799                         if (skb->ip_summed == CHECKSUM_PARTIAL) {
800                                 if (unlikely(ctx.eth_ip_hdr_size +
801                                              skb->csum_offset >
802                                              VMXNET3_MAX_CSUM_OFFSET)) {
803                                         goto hdr_too_big;
804                                 }
805                         }
806                 }
807         } else {
808                 tq->stats.drop_hdr_inspect_err++;
809                 goto drop_pkt;
810         }
811
812         spin_lock_irqsave(&tq->tx_lock, flags);
813
814         if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
815                 tq->stats.tx_ring_full++;
816                 dev_dbg(&adapter->netdev->dev,
817                         "tx queue stopped on %s, next2comp %u"
818                         " next2fill %u\n", adapter->netdev->name,
819                         tq->tx_ring.next2comp, tq->tx_ring.next2fill);
820
821                 vmxnet3_tq_stop(tq, adapter);
822                 spin_unlock_irqrestore(&tq->tx_lock, flags);
823                 return NETDEV_TX_BUSY;
824         }
825
826         /* fill tx descs related to addr & len */
827         vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828
829         /* setup the EOP desc */
830         ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;
831
832         /* setup the SOP desc */
833         gdesc = ctx.sop_txd;
834         if (ctx.mss) {
835                 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836                 gdesc->txd.om = VMXNET3_OM_TSO;
837                 gdesc->txd.msscof = ctx.mss;
838                 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen +
839                                              ctx.mss - 1) / ctx.mss;
840         } else {
841                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842                         gdesc->txd.hlen = ctx.eth_ip_hdr_size;
843                         gdesc->txd.om = VMXNET3_OM_CSUM;
844                         gdesc->txd.msscof = ctx.eth_ip_hdr_size +
845                                             skb->csum_offset;
846                 } else {
847                         gdesc->txd.om = 0;
848                         gdesc->txd.msscof = 0;
849                 }
850                 tq->shared->txNumDeferred++;
851         }
852
853         if (vlan_tx_tag_present(skb)) {
854                 gdesc->txd.ti = 1;
855                 gdesc->txd.tci = vlan_tx_tag_get(skb);
856         }
857
858         wmb();
859
860         /* finally flips the GEN bit of the SOP desc */
861         gdesc->dword[2] ^= VMXNET3_TXD_GEN;
862         dev_dbg(&adapter->netdev->dev,
863                 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864                 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865                 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
866                 gdesc->dword[3]);
867
868         spin_unlock_irqrestore(&tq->tx_lock, flags);
869
870         if (tq->shared->txNumDeferred >= tq->shared->txThreshold) {
871                 tq->shared->txNumDeferred = 0;
872                 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873                                        tq->tx_ring.next2fill);
874         }
875         netdev->trans_start = jiffies;
876
877         return NETDEV_TX_OK;
878
879 hdr_too_big:
880         tq->stats.drop_oversized_hdr++;
881 drop_pkt:
882         tq->stats.drop_total++;
883         dev_kfree_skb(skb);
884         return NETDEV_TX_OK;
885 }
886
887
888 static netdev_tx_t
889 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890 {
891         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892         struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893
894         return vmxnet3_tq_xmit(skb, tq, adapter, netdev);
895 }
896
897
898 static void
899 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
900                 struct sk_buff *skb,
901                 union Vmxnet3_GenericDesc *gdesc)
902 {
903         if (!gdesc->rcd.cnc && adapter->rxcsum) {
904                 /* typical case: TCP/UDP over IP and both csums are correct */
905                 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) ==
906                                                         VMXNET3_RCD_CSUM_OK) {
907                         skb->ip_summed = CHECKSUM_UNNECESSARY;
908                         BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
909                         BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
910                         BUG_ON(gdesc->rcd.frg);
911                 } else {
912                         if (gdesc->rcd.csum) {
913                                 skb->csum = htons(gdesc->rcd.csum);
914                                 skb->ip_summed = CHECKSUM_PARTIAL;
915                         } else {
916                                 skb->ip_summed = CHECKSUM_NONE;
917                         }
918                 }
919         } else {
920                 skb->ip_summed = CHECKSUM_NONE;
921         }
922 }
923
924
925 static void
926 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
927                  struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
928 {
929         rq->stats.drop_err++;
930         if (!rcd->fcs)
931                 rq->stats.drop_fcs++;
932
933         rq->stats.drop_total++;
934
935         /*
936          * We do not unmap and chain the rx buffer to the skb.
937          * We basically pretend this buffer is not used and will be recycled
938          * by vmxnet3_rq_alloc_rx_buf()
939          */
940
941         /*
942          * ctx->skb may be NULL if this is the first and the only one
943          * desc for the pkt
944          */
945         if (ctx->skb)
946                 dev_kfree_skb_irq(ctx->skb);
947
948         ctx->skb = NULL;
949 }
950
951
952 static int
953 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
954                        struct vmxnet3_adapter *adapter, int quota)
955 {
956         static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
957         u32 num_rxd = 0;
958         struct Vmxnet3_RxCompDesc *rcd;
959         struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960
961         rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
962         while (rcd->gen == rq->comp_ring.gen) {
963                 struct vmxnet3_rx_buf_info *rbi;
964                 struct sk_buff *skb;
965                 int num_to_alloc;
966                 struct Vmxnet3_RxDesc *rxd;
967                 u32 idx, ring_idx;
968
969                 if (num_rxd >= quota) {
970                         /* we may stop even before we see the EOP desc of
971                          * the current pkt
972                          */
973                         break;
974                 }
975                 num_rxd++;
976
977                 idx = rcd->rxdIdx;
978                 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979
980                 rxd = &rq->rx_ring[ring_idx].base[idx].rxd;
981                 rbi = rq->buf_info[ring_idx] + idx;
982
983                 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len);
984
985                 if (unlikely(rcd->eop && rcd->err)) {
986                         vmxnet3_rx_error(rq, rcd, ctx, adapter);
987                         goto rcd_done;
988                 }
989
990                 if (rcd->sop) { /* first buf of the pkt */
991                         BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
992                                rcd->rqID != rq->qid);
993
994                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
995                         BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
996
997                         if (unlikely(rcd->len == 0)) {
998                                 /* Pretend the rx buffer is skipped. */
999                                 BUG_ON(!(rcd->sop && rcd->eop));
1000                                 dev_dbg(&adapter->netdev->dev,
1001                                         "rxRing[%u][%u] 0 length\n",
1002                                         ring_idx, idx);
1003                                 goto rcd_done;
1004                         }
1005
1006                         ctx->skb = rbi->skb;
1007                         rbi->skb = NULL;
1008
1009                         pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1010                                          PCI_DMA_FROMDEVICE);
1011
1012                         skb_put(ctx->skb, rcd->len);
1013                 } else {
1014                         BUG_ON(ctx->skb == NULL);
1015                         /* non SOP buffer must be type 1 in most cases */
1016                         if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
1017                                 BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1018
1019                                 if (rcd->len) {
1020                                         pci_unmap_page(adapter->pdev,
1021                                                        rbi->dma_addr, rbi->len,
1022                                                        PCI_DMA_FROMDEVICE);
1023
1024                                         vmxnet3_append_frag(ctx->skb, rcd, rbi);
1025                                         rbi->page = NULL;
1026                                 }
1027                         } else {
1028                                 /*
1029                                  * The only time a non-SOP buffer is type 0 is
1030                                  * when it's EOP and error flag is raised, which
1031                                  * has already been handled.
1032                                  */
1033                                 BUG_ON(true);
1034                         }
1035                 }
1036
1037                 skb = ctx->skb;
1038                 if (rcd->eop) {
1039                         skb->len += skb->data_len;
1040                         skb->truesize += skb->data_len;
1041
1042                         vmxnet3_rx_csum(adapter, skb,
1043                                         (union Vmxnet3_GenericDesc *)rcd);
1044                         skb->protocol = eth_type_trans(skb, adapter->netdev);
1045
1046                         if (unlikely(adapter->vlan_grp && rcd->ts)) {
1047                                 vlan_hwaccel_receive_skb(skb,
1048                                                 adapter->vlan_grp, rcd->tci);
1049                         } else {
1050                                 netif_receive_skb(skb);
1051                         }
1052
1053                         adapter->netdev->last_rx = jiffies;
1054                         ctx->skb = NULL;
1055                 }
1056
1057 rcd_done:
1058                 /* device may skip some rx descs */
1059                 rq->rx_ring[ring_idx].next2comp = idx;
1060                 VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
1061                                           rq->rx_ring[ring_idx].size);
1062
1063                 /* refill rx buffers frequently to avoid starving the h/w */
1064                 num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
1065                                                            ring_idx);
1066                 if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
1067                                                         ring_idx, adapter))) {
1068                         vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
1069                                                 adapter);
1070
1071                         /* if needed, update the register */
1072                         if (unlikely(rq->shared->updateRxProd)) {
1073                                 VMXNET3_WRITE_BAR0_REG(adapter,
1074                                         rxprod_reg[ring_idx] + rq->qid * 8,
1075                                         rq->rx_ring[ring_idx].next2fill);
1076                                 rq->uncommitted[ring_idx] = 0;
1077                         }
1078                 }
1079
1080                 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081                 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd;
1082         }
1083
1084         return num_rxd;
1085 }
1086
1087
1088 static void
1089 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1090                    struct vmxnet3_adapter *adapter)
1091 {
1092         u32 i, ring_idx;
1093         struct Vmxnet3_RxDesc *rxd;
1094
1095         for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096                 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097                         rxd = &rq->rx_ring[ring_idx].base[i].rxd;
1098
1099                         if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100                                         rq->buf_info[ring_idx][i].skb) {
1101                                 pci_unmap_single(adapter->pdev, rxd->addr,
1102                                                  rxd->len, PCI_DMA_FROMDEVICE);
1103                                 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1104                                 rq->buf_info[ring_idx][i].skb = NULL;
1105                         } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1106                                         rq->buf_info[ring_idx][i].page) {
1107                                 pci_unmap_page(adapter->pdev, rxd->addr,
1108                                                rxd->len, PCI_DMA_FROMDEVICE);
1109                                 put_page(rq->buf_info[ring_idx][i].page);
1110                                 rq->buf_info[ring_idx][i].page = NULL;
1111                         }
1112                 }
1113
1114                 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1115                 rq->rx_ring[ring_idx].next2fill =
1116                                         rq->rx_ring[ring_idx].next2comp = 0;
1117                 rq->uncommitted[ring_idx] = 0;
1118         }
1119
1120         rq->comp_ring.gen = VMXNET3_INIT_GEN;
1121         rq->comp_ring.next2proc = 0;
1122 }
1123
1124
1125 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1126                         struct vmxnet3_adapter *adapter)
1127 {
1128         int i;
1129         int j;
1130
1131         /* all rx buffers must have already been freed */
1132         for (i = 0; i < 2; i++) {
1133                 if (rq->buf_info[i]) {
1134                         for (j = 0; j < rq->rx_ring[i].size; j++)
1135                                 BUG_ON(rq->buf_info[i][j].page != NULL);
1136                 }
1137         }
1138
1139
1140         kfree(rq->buf_info[0]);
1141
1142         for (i = 0; i < 2; i++) {
1143                 if (rq->rx_ring[i].base) {
1144                         pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1145                                             * sizeof(struct Vmxnet3_RxDesc),
1146                                             rq->rx_ring[i].base,
1147                                             rq->rx_ring[i].basePA);
1148                         rq->rx_ring[i].base = NULL;
1149                 }
1150                 rq->buf_info[i] = NULL;
1151         }
1152
1153         if (rq->comp_ring.base) {
1154                 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1155                                     sizeof(struct Vmxnet3_RxCompDesc),
1156                                     rq->comp_ring.base, rq->comp_ring.basePA);
1157                 rq->comp_ring.base = NULL;
1158         }
1159 }
1160
1161
1162 static int
1163 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1164                 struct vmxnet3_adapter  *adapter)
1165 {
1166         int i;
1167
1168         /* initialize buf_info */
1169         for (i = 0; i < rq->rx_ring[0].size; i++) {
1170
1171                 /* 1st buf for a pkt is skbuff */
1172                 if (i % adapter->rx_buf_per_pkt == 0) {
1173                         rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1174                         rq->buf_info[0][i].len = adapter->skb_buf_size;
1175                 } else { /* subsequent bufs for a pkt is frag */
1176                         rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1177                         rq->buf_info[0][i].len = PAGE_SIZE;
1178                 }
1179         }
1180         for (i = 0; i < rq->rx_ring[1].size; i++) {
1181                 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1182                 rq->buf_info[1][i].len = PAGE_SIZE;
1183         }
1184
1185         /* reset internal state and allocate buffers for both rings */
1186         for (i = 0; i < 2; i++) {
1187                 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1188                 rq->uncommitted[i] = 0;
1189
1190                 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1191                        sizeof(struct Vmxnet3_RxDesc));
1192                 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1193         }
1194         if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1195                                     adapter) == 0) {
1196                 /* at least has 1 rx buffer for the 1st ring */
1197                 return -ENOMEM;
1198         }
1199         vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1200
1201         /* reset the comp ring */
1202         rq->comp_ring.next2proc = 0;
1203         memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1204                sizeof(struct Vmxnet3_RxCompDesc));
1205         rq->comp_ring.gen = VMXNET3_INIT_GEN;
1206
1207         /* reset rxctx */
1208         rq->rx_ctx.skb = NULL;
1209
1210         /* stats are not reset */
1211         return 0;
1212 }
1213
1214
1215 static int
1216 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1217 {
1218         int i;
1219         size_t sz;
1220         struct vmxnet3_rx_buf_info *bi;
1221
1222         for (i = 0; i < 2; i++) {
1223
1224                 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1225                 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1226                                                         &rq->rx_ring[i].basePA);
1227                 if (!rq->rx_ring[i].base) {
1228                         printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1229                                adapter->netdev->name, i);
1230                         goto err;
1231                 }
1232         }
1233
1234         sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1235         rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1236                                                   &rq->comp_ring.basePA);
1237         if (!rq->comp_ring.base) {
1238                 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1239                        adapter->netdev->name);
1240                 goto err;
1241         }
1242
1243         sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1244                                                    rq->rx_ring[1].size);
1245         bi = kmalloc(sz, GFP_KERNEL);
1246         if (!bi) {
1247                 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1248                        adapter->netdev->name);
1249                 goto err;
1250         }
1251         memset(bi, 0, sz);
1252         rq->buf_info[0] = bi;
1253         rq->buf_info[1] = bi + rq->rx_ring[0].size;
1254
1255         return 0;
1256
1257 err:
1258         vmxnet3_rq_destroy(rq, adapter);
1259         return -ENOMEM;
1260 }
1261
1262
1263 static int
1264 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1265 {
1266         if (unlikely(adapter->shared->ecr))
1267                 vmxnet3_process_events(adapter);
1268
1269         vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
1270         return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
1271 }
1272
1273
1274 static int
1275 vmxnet3_poll(struct napi_struct *napi, int budget)
1276 {
1277         struct vmxnet3_adapter *adapter = container_of(napi,
1278                                           struct vmxnet3_adapter, napi);
1279         int rxd_done;
1280
1281         rxd_done = vmxnet3_do_poll(adapter, budget);
1282
1283         if (rxd_done < budget) {
1284                 napi_complete(napi);
1285                 vmxnet3_enable_intr(adapter, 0);
1286         }
1287         return rxd_done;
1288 }
1289
1290
1291 /* Interrupt handler for vmxnet3  */
1292 static irqreturn_t
1293 vmxnet3_intr(int irq, void *dev_id)
1294 {
1295         struct net_device *dev = dev_id;
1296         struct vmxnet3_adapter *adapter = netdev_priv(dev);
1297
1298         if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
1299                 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1300                 if (unlikely(icr == 0))
1301                         /* not ours */
1302                         return IRQ_NONE;
1303         }
1304
1305
1306         /* disable intr if needed */
1307         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1308                 vmxnet3_disable_intr(adapter, 0);
1309
1310         napi_schedule(&adapter->napi);
1311
1312         return IRQ_HANDLED;
1313 }
1314
1315 #ifdef CONFIG_NET_POLL_CONTROLLER
1316
1317
1318 /* netpoll callback. */
1319 static void
1320 vmxnet3_netpoll(struct net_device *netdev)
1321 {
1322         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1323         int irq;
1324
1325 #ifdef CONFIG_PCI_MSI
1326         if (adapter->intr.type == VMXNET3_IT_MSIX)
1327                 irq = adapter->intr.msix_entries[0].vector;
1328         else
1329 #endif
1330                 irq = adapter->pdev->irq;
1331
1332         disable_irq(irq);
1333         vmxnet3_intr(irq, netdev);
1334         enable_irq(irq);
1335 }
1336 #endif
1337
1338 static int
1339 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1340 {
1341         int err;
1342
1343 #ifdef CONFIG_PCI_MSI
1344         if (adapter->intr.type == VMXNET3_IT_MSIX) {
1345                 /* we only use 1 MSI-X vector */
1346                 err = request_irq(adapter->intr.msix_entries[0].vector,
1347                                   vmxnet3_intr, 0, adapter->netdev->name,
1348                                   adapter->netdev);
1349         } else
1350 #endif
1351         if (adapter->intr.type == VMXNET3_IT_MSI) {
1352                 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353                                   adapter->netdev->name, adapter->netdev);
1354         } else {
1355                 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356                                   IRQF_SHARED, adapter->netdev->name,
1357                                   adapter->netdev);
1358         }
1359
1360         if (err)
1361                 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1362                        ":%d\n", adapter->netdev->name, adapter->intr.type, err);
1363
1364
1365         if (!err) {
1366                 int i;
1367                 /* init our intr settings */
1368                 for (i = 0; i < adapter->intr.num_intrs; i++)
1369                         adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1370
1371                 /* next setup intr index for all intr sources */
1372                 adapter->tx_queue.comp_ring.intr_idx = 0;
1373                 adapter->rx_queue.comp_ring.intr_idx = 0;
1374                 adapter->intr.event_intr_idx = 0;
1375
1376                 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1377                        "allocated\n", adapter->netdev->name, adapter->intr.type,
1378                        adapter->intr.mask_mode, adapter->intr.num_intrs);
1379         }
1380
1381         return err;
1382 }
1383
1384
1385 static void
1386 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1387 {
1388         BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
1389                adapter->intr.num_intrs <= 0);
1390
1391         switch (adapter->intr.type) {
1392 #ifdef CONFIG_PCI_MSI
1393         case VMXNET3_IT_MSIX:
1394         {
1395                 int i;
1396
1397                 for (i = 0; i < adapter->intr.num_intrs; i++)
1398                         free_irq(adapter->intr.msix_entries[i].vector,
1399                                  adapter->netdev);
1400                 break;
1401         }
1402 #endif
1403         case VMXNET3_IT_MSI:
1404                 free_irq(adapter->pdev->irq, adapter->netdev);
1405                 break;
1406         case VMXNET3_IT_INTX:
1407                 free_irq(adapter->pdev->irq, adapter->netdev);
1408                 break;
1409         default:
1410                 BUG_ON(true);
1411         }
1412 }
1413
1414
1415 static void
1416 vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417 {
1418         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1419         struct Vmxnet3_DriverShared *shared = adapter->shared;
1420         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1421
1422         if (grp) {
1423                 /* add vlan rx stripping. */
1424                 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1425                         int i;
1426                         struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1427                         adapter->vlan_grp = grp;
1428
1429                         /* update FEATURES to device */
1430                         devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1431                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432                                                VMXNET3_CMD_UPDATE_FEATURE);
1433                         /*
1434                          *  Clear entire vfTable; then enable untagged pkts.
1435                          *  Note: setting one entry in vfTable to non-zero turns
1436                          *  on VLAN rx filtering.
1437                          */
1438                         for (i = 0; i < VMXNET3_VFT_SIZE; i++)
1439                                 vfTable[i] = 0;
1440
1441                         VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1442                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1443                                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1444                 } else {
1445                         printk(KERN_ERR "%s: vlan_rx_register when device has "
1446                                "no NETIF_F_HW_VLAN_RX\n", netdev->name);
1447                 }
1448         } else {
1449                 /* remove vlan rx stripping. */
1450                 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451                 adapter->vlan_grp = NULL;
1452
1453                 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
1454                         int i;
1455
1456                         for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
1457                                 /* clear entire vfTable; this also disables
1458                                  * VLAN rx filtering
1459                                  */
1460                                 vfTable[i] = 0;
1461                         }
1462                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1463                                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464
1465                         /* update FEATURES to device */
1466                         devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1467                         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468                                                VMXNET3_CMD_UPDATE_FEATURE);
1469                 }
1470         }
1471 }
1472
1473
1474 static void
1475 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1476 {
1477         if (adapter->vlan_grp) {
1478                 u16 vid;
1479                 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1480                 bool activeVlan = false;
1481
1482                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1483                         if (vlan_group_get_device(adapter->vlan_grp, vid)) {
1484                                 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1485                                 activeVlan = true;
1486                         }
1487                 }
1488                 if (activeVlan) {
1489                         /* continue to allow untagged pkts */
1490                         VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1491                 }
1492         }
1493 }
1494
1495
1496 static void
1497 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1498 {
1499         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1500         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1501
1502         VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1503         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1504                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1505 }
1506
1507
1508 static void
1509 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1510 {
1511         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1512         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1513
1514         VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1515         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1516                                VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1517 }
1518
1519
1520 static u8 *
1521 vmxnet3_copy_mc(struct net_device *netdev)
1522 {
1523         u8 *buf = NULL;
1524         u32 sz = netdev->mc_count * ETH_ALEN;
1525
1526         /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1527         if (sz <= 0xffff) {
1528                 /* We may be called with BH disabled */
1529                 buf = kmalloc(sz, GFP_ATOMIC);
1530                 if (buf) {
1531                         int i;
1532                         struct dev_mc_list *mc = netdev->mc_list;
1533
1534                         for (i = 0; i < netdev->mc_count; i++) {
1535                                 BUG_ON(!mc);
1536                                 memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
1537                                        ETH_ALEN);
1538                                 mc = mc->next;
1539                         }
1540                 }
1541         }
1542         return buf;
1543 }
1544
1545
1546 static void
1547 vmxnet3_set_mc(struct net_device *netdev)
1548 {
1549         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1550         struct Vmxnet3_RxFilterConf *rxConf =
1551                                         &adapter->shared->devRead.rxFilterConf;
1552         u8 *new_table = NULL;
1553         u32 new_mode = VMXNET3_RXM_UCAST;
1554
1555         if (netdev->flags & IFF_PROMISC)
1556                 new_mode |= VMXNET3_RXM_PROMISC;
1557
1558         if (netdev->flags & IFF_BROADCAST)
1559                 new_mode |= VMXNET3_RXM_BCAST;
1560
1561         if (netdev->flags & IFF_ALLMULTI)
1562                 new_mode |= VMXNET3_RXM_ALL_MULTI;
1563         else
1564                 if (netdev->mc_count > 0) {
1565                         new_table = vmxnet3_copy_mc(netdev);
1566                         if (new_table) {
1567                                 new_mode |= VMXNET3_RXM_MCAST;
1568                                 rxConf->mfTableLen = netdev->mc_count *
1569                                                      ETH_ALEN;
1570                                 rxConf->mfTablePA = virt_to_phys(new_table);
1571                         } else {
1572                                 printk(KERN_INFO "%s: failed to copy mcast list"
1573                                        ", setting ALL_MULTI\n", netdev->name);
1574                                 new_mode |= VMXNET3_RXM_ALL_MULTI;
1575                         }
1576                 }
1577
1578
1579         if (!(new_mode & VMXNET3_RXM_MCAST)) {
1580                 rxConf->mfTableLen = 0;
1581                 rxConf->mfTablePA = 0;
1582         }
1583
1584         if (new_mode != rxConf->rxMode) {
1585                 rxConf->rxMode = new_mode;
1586                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587                                        VMXNET3_CMD_UPDATE_RX_MODE);
1588         }
1589
1590         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1591                                VMXNET3_CMD_UPDATE_MAC_FILTERS);
1592
1593         kfree(new_table);
1594 }
1595
1596
1597 /*
1598  *   Set up driver_shared based on settings in adapter.
1599  */
1600
1601 static void
1602 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1603 {
1604         struct Vmxnet3_DriverShared *shared = adapter->shared;
1605         struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1606         struct Vmxnet3_TxQueueConf *tqc;
1607         struct Vmxnet3_RxQueueConf *rqc;
1608         int i;
1609
1610         memset(shared, 0, sizeof(*shared));
1611
1612         /* driver settings */
1613         shared->magic = VMXNET3_REV1_MAGIC;
1614         devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
1615         devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616                                 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617         devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618         devRead->misc.driverInfo.vmxnet3RevSpt = 1;
1619         devRead->misc.driverInfo.uptVerSpt = 1;
1620
1621         devRead->misc.ddPA = virt_to_phys(adapter);
1622         devRead->misc.ddLen = sizeof(struct vmxnet3_adapter);
1623
1624         /* set up feature flags */
1625         if (adapter->rxcsum)
1626                 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
1627
1628         if (adapter->lro) {
1629                 devRead->misc.uptFeatures |= UPT1_F_LRO;
1630                 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS;
1631         }
1632         if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633                         && adapter->vlan_grp) {
1634                 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1635         }
1636
1637         devRead->misc.mtu = adapter->netdev->mtu;
1638         devRead->misc.queueDescPA = adapter->queue_desc_pa;
1639         devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) +
1640                                      sizeof(struct Vmxnet3_RxQueueDesc);
1641
1642         /* tx queue settings */
1643         BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644
1645         devRead->misc.numTxQueues = 1;
1646         tqc = &adapter->tqd_start->conf;
1647         tqc->txRingBasePA   = adapter->tx_queue.tx_ring.basePA;
1648         tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA;
1649         tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA;
1650         tqc->ddPA           = virt_to_phys(adapter->tx_queue.buf_info);
1651         tqc->txRingSize     = adapter->tx_queue.tx_ring.size;
1652         tqc->dataRingSize   = adapter->tx_queue.data_ring.size;
1653         tqc->compRingSize   = adapter->tx_queue.comp_ring.size;
1654         tqc->ddLen          = sizeof(struct vmxnet3_tx_buf_info) *
1655                               tqc->txRingSize;
1656         tqc->intrIdx        = adapter->tx_queue.comp_ring.intr_idx;
1657
1658         /* rx queue settings */
1659         devRead->misc.numRxQueues = 1;
1660         rqc = &adapter->rqd_start->conf;
1661         rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA;
1662         rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA;
1663         rqc->compRingBasePA  = adapter->rx_queue.comp_ring.basePA;
1664         rqc->ddPA            = virt_to_phys(adapter->rx_queue.buf_info);
1665         rqc->rxRingSize[0]   = adapter->rx_queue.rx_ring[0].size;
1666         rqc->rxRingSize[1]   = adapter->rx_queue.rx_ring[1].size;
1667         rqc->compRingSize    = adapter->rx_queue.comp_ring.size;
1668         rqc->ddLen           = sizeof(struct vmxnet3_rx_buf_info) *
1669                                (rqc->rxRingSize[0] + rqc->rxRingSize[1]);
1670         rqc->intrIdx         = adapter->rx_queue.comp_ring.intr_idx;
1671
1672         /* intr settings */
1673         devRead->intrConf.autoMask = adapter->intr.mask_mode ==
1674                                      VMXNET3_IMM_AUTO;
1675         devRead->intrConf.numIntrs = adapter->intr.num_intrs;
1676         for (i = 0; i < adapter->intr.num_intrs; i++)
1677                 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
1678
1679         devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
1680
1681         /* rx filter settings */
1682         devRead->rxFilterConf.rxMode = 0;
1683         vmxnet3_restore_vlan(adapter);
1684         /* the rest are already zeroed */
1685 }
1686
1687
1688 int
1689 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1690 {
1691         int err;
1692         u32 ret;
1693
1694         dev_dbg(&adapter->netdev->dev,
1695                 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
1696                 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
1697                 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
1698                 adapter->rx_queue.rx_ring[0].size,
1699                 adapter->rx_queue.rx_ring[1].size);
1700
1701         vmxnet3_tq_init(&adapter->tx_queue, adapter);
1702         err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
1703         if (err) {
1704                 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1705                        adapter->netdev->name, err);
1706                 goto rq_err;
1707         }
1708
1709         err = vmxnet3_request_irqs(adapter);
1710         if (err) {
1711                 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
1712                        adapter->netdev->name, err);
1713                 goto irq_err;
1714         }
1715
1716         vmxnet3_setup_driver_shared(adapter);
1717
1718         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL,
1719                                VMXNET3_GET_ADDR_LO(adapter->shared_pa));
1720         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH,
1721                                VMXNET3_GET_ADDR_HI(adapter->shared_pa));
1722
1723         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724                                VMXNET3_CMD_ACTIVATE_DEV);
1725         ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
1726
1727         if (ret != 0) {
1728                 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
1729                        adapter->netdev->name, ret);
1730                 err = -EINVAL;
1731                 goto activate_err;
1732         }
1733         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
1734                                adapter->rx_queue.rx_ring[0].next2fill);
1735         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
1736                                adapter->rx_queue.rx_ring[1].next2fill);
1737
1738         /* Apply the rx filter settins last. */
1739         vmxnet3_set_mc(adapter->netdev);
1740
1741         /*
1742          * Check link state when first activating device. It will start the
1743          * tx queue if the link is up.
1744          */
1745         vmxnet3_check_link(adapter);
1746
1747         napi_enable(&adapter->napi);
1748         vmxnet3_enable_all_intrs(adapter);
1749         clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1750         return 0;
1751
1752 activate_err:
1753         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
1754         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
1755         vmxnet3_free_irqs(adapter);
1756 irq_err:
1757 rq_err:
1758         /* free up buffers we allocated */
1759         vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1760         return err;
1761 }
1762
1763
1764 void
1765 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1766 {
1767         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
1768 }
1769
1770
1771 int
1772 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1773 {
1774         if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1775                 return 0;
1776
1777
1778         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1779                                VMXNET3_CMD_QUIESCE_DEV);
1780         vmxnet3_disable_all_intrs(adapter);
1781
1782         napi_disable(&adapter->napi);
1783         netif_tx_disable(adapter->netdev);
1784         adapter->link_speed = 0;
1785         netif_carrier_off(adapter->netdev);
1786
1787         vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
1788         vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
1789         vmxnet3_free_irqs(adapter);
1790         return 0;
1791 }
1792
1793
1794 static void
1795 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
1796 {
1797         u32 tmp;
1798
1799         tmp = *(u32 *)mac;
1800         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
1801
1802         tmp = (mac[5] << 8) | mac[4];
1803         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
1804 }
1805
1806
1807 static int
1808 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
1809 {
1810         struct sockaddr *addr = p;
1811         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1812
1813         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1814         vmxnet3_write_mac_addr(adapter, addr->sa_data);
1815
1816         return 0;
1817 }
1818
1819
1820 /* ==================== initialization and cleanup routines ============ */
1821
1822 static int
1823 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
1824 {
1825         int err;
1826         unsigned long mmio_start, mmio_len;
1827         struct pci_dev *pdev = adapter->pdev;
1828
1829         err = pci_enable_device(pdev);
1830         if (err) {
1831                 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
1832                        pci_name(pdev), err);
1833                 return err;
1834         }
1835
1836         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
1837                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
1838                         printk(KERN_ERR "pci_set_consistent_dma_mask failed "
1839                                "for adapter %s\n", pci_name(pdev));
1840                         err = -EIO;
1841                         goto err_set_mask;
1842                 }
1843                 *dma64 = true;
1844         } else {
1845                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
1846                         printk(KERN_ERR "pci_set_dma_mask failed for adapter "
1847                                "%s\n",  pci_name(pdev));
1848                         err = -EIO;
1849                         goto err_set_mask;
1850                 }
1851                 *dma64 = false;
1852         }
1853
1854         err = pci_request_selected_regions(pdev, (1 << 2) - 1,
1855                                            vmxnet3_driver_name);
1856         if (err) {
1857                 printk(KERN_ERR "Failed to request region for adapter %s: "
1858                        "error %d\n", pci_name(pdev), err);
1859                 goto err_set_mask;
1860         }
1861
1862         pci_set_master(pdev);
1863
1864         mmio_start = pci_resource_start(pdev, 0);
1865         mmio_len = pci_resource_len(pdev, 0);
1866         adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
1867         if (!adapter->hw_addr0) {
1868                 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
1869                        pci_name(pdev));
1870                 err = -EIO;
1871                 goto err_ioremap;
1872         }
1873
1874         mmio_start = pci_resource_start(pdev, 1);
1875         mmio_len = pci_resource_len(pdev, 1);
1876         adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
1877         if (!adapter->hw_addr1) {
1878                 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
1879                        pci_name(pdev));
1880                 err = -EIO;
1881                 goto err_bar1;
1882         }
1883         return 0;
1884
1885 err_bar1:
1886         iounmap(adapter->hw_addr0);
1887 err_ioremap:
1888         pci_release_selected_regions(pdev, (1 << 2) - 1);
1889 err_set_mask:
1890         pci_disable_device(pdev);
1891         return err;
1892 }
1893
1894
1895 static void
1896 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
1897 {
1898         BUG_ON(!adapter->pdev);
1899
1900         iounmap(adapter->hw_addr0);
1901         iounmap(adapter->hw_addr1);
1902         pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
1903         pci_disable_device(adapter->pdev);
1904 }
1905
1906
1907 static void
1908 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
1909 {
1910         size_t sz;
1911
1912         if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
1913                                     VMXNET3_MAX_ETH_HDR_SIZE) {
1914                 adapter->skb_buf_size = adapter->netdev->mtu +
1915                                         VMXNET3_MAX_ETH_HDR_SIZE;
1916                 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
1917                         adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
1918
1919                 adapter->rx_buf_per_pkt = 1;
1920         } else {
1921                 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
1922                 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
1923                                             VMXNET3_MAX_ETH_HDR_SIZE;
1924                 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
1925         }
1926
1927         /*
1928          * for simplicity, force the ring0 size to be a multiple of
1929          * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
1930          */
1931         sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
1932         adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
1933                                              sz - 1) / sz * sz;
1934         adapter->rx_queue.rx_ring[0].size = min_t(u32,
1935                                             adapter->rx_queue.rx_ring[0].size,
1936                                             VMXNET3_RX_RING_MAX_SIZE / sz * sz);
1937 }
1938
1939
1940 int
1941 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
1942                       u32 rx_ring_size, u32 rx_ring2_size)
1943 {
1944         int err;
1945
1946         adapter->tx_queue.tx_ring.size   = tx_ring_size;
1947         adapter->tx_queue.data_ring.size = tx_ring_size;
1948         adapter->tx_queue.comp_ring.size = tx_ring_size;
1949         adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
1950         adapter->tx_queue.stopped = true;
1951         err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
1952         if (err)
1953                 return err;
1954
1955         adapter->rx_queue.rx_ring[0].size = rx_ring_size;
1956         adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
1957         vmxnet3_adjust_rx_ring_size(adapter);
1958         adapter->rx_queue.comp_ring.size  = adapter->rx_queue.rx_ring[0].size +
1959                                             adapter->rx_queue.rx_ring[1].size;
1960         adapter->rx_queue.qid  = 0;
1961         adapter->rx_queue.qid2 = 1;
1962         adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
1963         err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
1964         if (err)
1965                 vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1966
1967         return err;
1968 }
1969
1970 static int
1971 vmxnet3_open(struct net_device *netdev)
1972 {
1973         struct vmxnet3_adapter *adapter;
1974         int err;
1975
1976         adapter = netdev_priv(netdev);
1977
1978         spin_lock_init(&adapter->tx_queue.tx_lock);
1979
1980         err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
1981                                     VMXNET3_DEF_RX_RING_SIZE,
1982                                     VMXNET3_DEF_RX_RING_SIZE);
1983         if (err)
1984                 goto queue_err;
1985
1986         err = vmxnet3_activate_dev(adapter);
1987         if (err)
1988                 goto activate_err;
1989
1990         return 0;
1991
1992 activate_err:
1993         vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
1994         vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
1995 queue_err:
1996         return err;
1997 }
1998
1999
2000 static int
2001 vmxnet3_close(struct net_device *netdev)
2002 {
2003         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2004
2005         /*
2006          * Reset_work may be in the middle of resetting the device, wait for its
2007          * completion.
2008          */
2009         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2010                 msleep(1);
2011
2012         vmxnet3_quiesce_dev(adapter);
2013
2014         vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2015         vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
2016
2017         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2018
2019
2020         return 0;
2021 }
2022
2023
2024 void
2025 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2026 {
2027         /*
2028          * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2029          * vmxnet3_close() will deadlock.
2030          */
2031         BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2032
2033         /* we need to enable NAPI, otherwise dev_close will deadlock */
2034         napi_enable(&adapter->napi);
2035         dev_close(adapter->netdev);
2036 }
2037
2038
2039 static int
2040 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2041 {
2042         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2043         int err = 0;
2044
2045         if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2046                 return -EINVAL;
2047
2048         if (new_mtu > 1500 && !adapter->jumbo_frame)
2049                 return -EINVAL;
2050
2051         netdev->mtu = new_mtu;
2052
2053         /*
2054          * Reset_work may be in the middle of resetting the device, wait for its
2055          * completion.
2056          */
2057         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2058                 msleep(1);
2059
2060         if (netif_running(netdev)) {
2061                 vmxnet3_quiesce_dev(adapter);
2062                 vmxnet3_reset_dev(adapter);
2063
2064                 /* we need to re-create the rx queue based on the new mtu */
2065                 vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
2066                 vmxnet3_adjust_rx_ring_size(adapter);
2067                 adapter->rx_queue.comp_ring.size  =
2068                                         adapter->rx_queue.rx_ring[0].size +
2069                                         adapter->rx_queue.rx_ring[1].size;
2070                 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2071                 if (err) {
2072                         printk(KERN_ERR "%s: failed to re-create rx queue,"
2073                                 " error %d. Closing it.\n", netdev->name, err);
2074                         goto out;
2075                 }
2076
2077                 err = vmxnet3_activate_dev(adapter);
2078                 if (err) {
2079                         printk(KERN_ERR "%s: failed to re-activate, error %d. "
2080                                 "Closing it\n", netdev->name, err);
2081                         goto out;
2082                 }
2083         }
2084
2085 out:
2086         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2087         if (err)
2088                 vmxnet3_force_close(adapter);
2089
2090         return err;
2091 }
2092
2093
2094 static void
2095 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2096 {
2097         struct net_device *netdev = adapter->netdev;
2098
2099         netdev->features = NETIF_F_SG |
2100                 NETIF_F_HW_CSUM |
2101                 NETIF_F_HW_VLAN_TX |
2102                 NETIF_F_HW_VLAN_RX |
2103                 NETIF_F_HW_VLAN_FILTER |
2104                 NETIF_F_TSO |
2105                 NETIF_F_TSO6 |
2106                 NETIF_F_LRO;
2107
2108         printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
2109
2110         adapter->rxcsum = true;
2111         adapter->jumbo_frame = true;
2112         adapter->lro = true;
2113
2114         if (dma64) {
2115                 netdev->features |= NETIF_F_HIGHDMA;
2116                 printk(" highDMA");
2117         }
2118
2119         netdev->vlan_features = netdev->features;
2120         printk("\n");
2121 }
2122
2123
2124 static void
2125 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2126 {
2127         u32 tmp;
2128
2129         tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2130         *(u32 *)mac = tmp;
2131
2132         tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2133         mac[4] = tmp & 0xff;
2134         mac[5] = (tmp >> 8) & 0xff;
2135 }
2136
2137
2138 static void
2139 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2140 {
2141         u32 cfg;
2142
2143         /* intr settings */
2144         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2145                                VMXNET3_CMD_GET_CONF_INTR);
2146         cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2147         adapter->intr.type = cfg & 0x3;
2148         adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2149
2150         if (adapter->intr.type == VMXNET3_IT_AUTO) {
2151                 int err;
2152
2153 #ifdef CONFIG_PCI_MSI
2154                 adapter->intr.msix_entries[0].entry = 0;
2155                 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2156                                       VMXNET3_LINUX_MAX_MSIX_VECT);
2157                 if (!err) {
2158                         adapter->intr.num_intrs = 1;
2159                         adapter->intr.type = VMXNET3_IT_MSIX;
2160                         return;
2161                 }
2162 #endif
2163
2164                 err = pci_enable_msi(adapter->pdev);
2165                 if (!err) {
2166                         adapter->intr.num_intrs = 1;
2167                         adapter->intr.type = VMXNET3_IT_MSI;
2168                         return;
2169                 }
2170         }
2171
2172         adapter->intr.type = VMXNET3_IT_INTX;
2173
2174         /* INT-X related setting */
2175         adapter->intr.num_intrs = 1;
2176 }
2177
2178
2179 static void
2180 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2181 {
2182         if (adapter->intr.type == VMXNET3_IT_MSIX)
2183                 pci_disable_msix(adapter->pdev);
2184         else if (adapter->intr.type == VMXNET3_IT_MSI)
2185                 pci_disable_msi(adapter->pdev);
2186         else
2187                 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2188 }
2189
2190
2191 static void
2192 vmxnet3_tx_timeout(struct net_device *netdev)
2193 {
2194         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2195         adapter->tx_timeout_count++;
2196
2197         printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2198         schedule_work(&adapter->work);
2199 }
2200
2201
2202 static void
2203 vmxnet3_reset_work(struct work_struct *data)
2204 {
2205         struct vmxnet3_adapter *adapter;
2206
2207         adapter = container_of(data, struct vmxnet3_adapter, work);
2208
2209         /* if another thread is resetting the device, no need to proceed */
2210         if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2211                 return;
2212
2213         /* if the device is closed, we must leave it alone */
2214         if (netif_running(adapter->netdev)) {
2215                 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2216                 vmxnet3_quiesce_dev(adapter);
2217                 vmxnet3_reset_dev(adapter);
2218                 vmxnet3_activate_dev(adapter);
2219         } else {
2220                 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2221         }
2222
2223         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2224 }
2225
2226
2227 static int __devinit
2228 vmxnet3_probe_device(struct pci_dev *pdev,
2229                      const struct pci_device_id *id)
2230 {
2231         static const struct net_device_ops vmxnet3_netdev_ops = {
2232                 .ndo_open = vmxnet3_open,
2233                 .ndo_stop = vmxnet3_close,
2234                 .ndo_start_xmit = vmxnet3_xmit_frame,
2235                 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2236                 .ndo_change_mtu = vmxnet3_change_mtu,
2237                 .ndo_get_stats = vmxnet3_get_stats,
2238                 .ndo_tx_timeout = vmxnet3_tx_timeout,
2239                 .ndo_set_multicast_list = vmxnet3_set_mc,
2240                 .ndo_vlan_rx_register = vmxnet3_vlan_rx_register,
2241                 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2242                 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2243 #ifdef CONFIG_NET_POLL_CONTROLLER
2244                 .ndo_poll_controller = vmxnet3_netpoll,
2245 #endif
2246         };
2247         int err;
2248         bool dma64 = false; /* stupid gcc */
2249         u32 ver;
2250         struct net_device *netdev;
2251         struct vmxnet3_adapter *adapter;
2252         u8 mac[ETH_ALEN];
2253
2254         netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2255         if (!netdev) {
2256                 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2257                         "%s\n", pci_name(pdev));
2258                 return -ENOMEM;
2259         }
2260
2261         pci_set_drvdata(pdev, netdev);
2262         adapter = netdev_priv(netdev);
2263         adapter->netdev = netdev;
2264         adapter->pdev = pdev;
2265
2266         adapter->shared = pci_alloc_consistent(adapter->pdev,
2267                           sizeof(struct Vmxnet3_DriverShared),
2268                           &adapter->shared_pa);
2269         if (!adapter->shared) {
2270                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2271                         pci_name(pdev));
2272                 err = -ENOMEM;
2273                 goto err_alloc_shared;
2274         }
2275
2276         adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
2277                              sizeof(struct Vmxnet3_TxQueueDesc) +
2278                              sizeof(struct Vmxnet3_RxQueueDesc),
2279                              &adapter->queue_desc_pa);
2280
2281         if (!adapter->tqd_start) {
2282                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2283                         pci_name(pdev));
2284                 err = -ENOMEM;
2285                 goto err_alloc_queue_desc;
2286         }
2287         adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
2288                                                             + 1);
2289
2290         adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2291         if (adapter->pm_conf == NULL) {
2292                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2293                         pci_name(pdev));
2294                 err = -ENOMEM;
2295                 goto err_alloc_pm;
2296         }
2297
2298         err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2299         if (err < 0)
2300                 goto err_alloc_pci;
2301
2302         ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2303         if (ver & 1) {
2304                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2305         } else {
2306                 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2307                        " %s\n", ver, pci_name(pdev));
2308                 err = -EBUSY;
2309                 goto err_ver;
2310         }
2311
2312         ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2313         if (ver & 1) {
2314                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2315         } else {
2316                 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2317                        "adapter %s\n", ver, pci_name(pdev));
2318                 err = -EBUSY;
2319                 goto err_ver;
2320         }
2321
2322         vmxnet3_declare_features(adapter, dma64);
2323
2324         adapter->dev_number = atomic_read(&devices_found);
2325         vmxnet3_alloc_intr_resources(adapter);
2326
2327         vmxnet3_read_mac_addr(adapter, mac);
2328         memcpy(netdev->dev_addr,  mac, netdev->addr_len);
2329
2330         netdev->netdev_ops = &vmxnet3_netdev_ops;
2331         netdev->watchdog_timeo = 5 * HZ;
2332         vmxnet3_set_ethtool_ops(netdev);
2333
2334         INIT_WORK(&adapter->work, vmxnet3_reset_work);
2335
2336         netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
2337         SET_NETDEV_DEV(netdev, &pdev->dev);
2338         err = register_netdev(netdev);
2339
2340         if (err) {
2341                 printk(KERN_ERR "Failed to register adapter %s\n",
2342                         pci_name(pdev));
2343                 goto err_register;
2344         }
2345
2346         set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2347         atomic_inc(&devices_found);
2348         return 0;
2349
2350 err_register:
2351         vmxnet3_free_intr_resources(adapter);
2352 err_ver:
2353         vmxnet3_free_pci_resources(adapter);
2354 err_alloc_pci:
2355         kfree(adapter->pm_conf);
2356 err_alloc_pm:
2357         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2358                             sizeof(struct Vmxnet3_RxQueueDesc),
2359                             adapter->tqd_start, adapter->queue_desc_pa);
2360 err_alloc_queue_desc:
2361         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2362                             adapter->shared, adapter->shared_pa);
2363 err_alloc_shared:
2364         pci_set_drvdata(pdev, NULL);
2365         free_netdev(netdev);
2366         return err;
2367 }
2368
2369
2370 static void __devexit
2371 vmxnet3_remove_device(struct pci_dev *pdev)
2372 {
2373         struct net_device *netdev = pci_get_drvdata(pdev);
2374         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2375
2376         flush_scheduled_work();
2377
2378         unregister_netdev(netdev);
2379
2380         vmxnet3_free_intr_resources(adapter);
2381         vmxnet3_free_pci_resources(adapter);
2382         kfree(adapter->pm_conf);
2383         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
2384                             sizeof(struct Vmxnet3_RxQueueDesc),
2385                             adapter->tqd_start, adapter->queue_desc_pa);
2386         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2387                             adapter->shared, adapter->shared_pa);
2388         free_netdev(netdev);
2389 }
2390
2391
2392 #ifdef CONFIG_PM
2393
2394 static int
2395 vmxnet3_suspend(struct device *device)
2396 {
2397         struct pci_dev *pdev = to_pci_dev(device);
2398         struct net_device *netdev = pci_get_drvdata(pdev);
2399         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2400         struct Vmxnet3_PMConf *pmConf;
2401         struct ethhdr *ehdr;
2402         struct arphdr *ahdr;
2403         u8 *arpreq;
2404         struct in_device *in_dev;
2405         struct in_ifaddr *ifa;
2406         int i = 0;
2407
2408         if (!netif_running(netdev))
2409                 return 0;
2410
2411         vmxnet3_disable_all_intrs(adapter);
2412         vmxnet3_free_irqs(adapter);
2413         vmxnet3_free_intr_resources(adapter);
2414
2415         netif_device_detach(netdev);
2416         netif_stop_queue(netdev);
2417
2418         /* Create wake-up filters. */
2419         pmConf = adapter->pm_conf;
2420         memset(pmConf, 0, sizeof(*pmConf));
2421
2422         if (adapter->wol & WAKE_UCAST) {
2423                 pmConf->filters[i].patternSize = ETH_ALEN;
2424                 pmConf->filters[i].maskSize = 1;
2425                 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426                 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427
2428                 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2429                 i++;
2430         }
2431
2432         if (adapter->wol & WAKE_ARP) {
2433                 in_dev = in_dev_get(netdev);
2434                 if (!in_dev)
2435                         goto skip_arp;
2436
2437                 ifa = (struct in_ifaddr *)in_dev->ifa_list;
2438                 if (!ifa)
2439                         goto skip_arp;
2440
2441                 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
2442                         sizeof(struct arphdr) +         /* ARP header */
2443                         2 * ETH_ALEN +          /* 2 Ethernet addresses*/
2444                         2 * sizeof(u32);        /*2 IPv4 addresses */
2445                 pmConf->filters[i].maskSize =
2446                         (pmConf->filters[i].patternSize - 1) / 8 + 1;
2447
2448                 /* ETH_P_ARP in Ethernet header. */
2449                 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
2450                 ehdr->h_proto = htons(ETH_P_ARP);
2451
2452                 /* ARPOP_REQUEST in ARP header. */
2453                 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
2454                 ahdr->ar_op = htons(ARPOP_REQUEST);
2455                 arpreq = (u8 *)(ahdr + 1);
2456
2457                 /* The Unicast IPv4 address in 'tip' field. */
2458                 arpreq += 2 * ETH_ALEN + sizeof(u32);
2459                 *(u32 *)arpreq = ifa->ifa_address;
2460
2461                 /* The mask for the relevant bits. */
2462                 pmConf->filters[i].mask[0] = 0x00;
2463                 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
2464                 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
2465                 pmConf->filters[i].mask[3] = 0x00;
2466                 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
2467                 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468                 in_dev_put(in_dev);
2469
2470                 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
2471                 i++;
2472         }
2473
2474 skip_arp:
2475         if (adapter->wol & WAKE_MAGIC)
2476                 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
2477
2478         pmConf->numFilters = i;
2479
2480         adapter->shared->devRead.pmConfDesc.confVer = 1;
2481         adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2482         adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2483
2484         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485                                VMXNET3_CMD_UPDATE_PMCFG);
2486
2487         pci_save_state(pdev);
2488         pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
2489                         adapter->wol);
2490         pci_disable_device(pdev);
2491         pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
2492
2493         return 0;
2494 }
2495
2496
2497 static int
2498 vmxnet3_resume(struct device *device)
2499 {
2500         int err;
2501         struct pci_dev *pdev = to_pci_dev(device);
2502         struct net_device *netdev = pci_get_drvdata(pdev);
2503         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2504         struct Vmxnet3_PMConf *pmConf;
2505
2506         if (!netif_running(netdev))
2507                 return 0;
2508
2509         /* Destroy wake-up filters. */
2510         pmConf = adapter->pm_conf;
2511         memset(pmConf, 0, sizeof(*pmConf));
2512
2513         adapter->shared->devRead.pmConfDesc.confVer = 1;
2514         adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf);
2515         adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf);
2516
2517         netif_device_attach(netdev);
2518         pci_set_power_state(pdev, PCI_D0);
2519         pci_restore_state(pdev);
2520         err = pci_enable_device_mem(pdev);
2521         if (err != 0)
2522                 return err;
2523
2524         pci_enable_wake(pdev, PCI_D0, 0);
2525
2526         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2527                                VMXNET3_CMD_UPDATE_PMCFG);
2528         vmxnet3_alloc_intr_resources(adapter);
2529         vmxnet3_request_irqs(adapter);
2530         vmxnet3_enable_all_intrs(adapter);
2531
2532         return 0;
2533 }
2534
2535 static struct dev_pm_ops vmxnet3_pm_ops = {
2536         .suspend = vmxnet3_suspend,
2537         .resume = vmxnet3_resume,
2538 };
2539 #endif
2540
2541 static struct pci_driver vmxnet3_driver = {
2542         .name           = vmxnet3_driver_name,
2543         .id_table       = vmxnet3_pciid_table,
2544         .probe          = vmxnet3_probe_device,
2545         .remove         = __devexit_p(vmxnet3_remove_device),
2546 #ifdef CONFIG_PM
2547         .driver.pm      = &vmxnet3_pm_ops,
2548 #endif
2549 };
2550
2551
2552 static int __init
2553 vmxnet3_init_module(void)
2554 {
2555         printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
2556                 VMXNET3_DRIVER_VERSION_REPORT);
2557         return pci_register_driver(&vmxnet3_driver);
2558 }
2559
2560 module_init(vmxnet3_init_module);
2561
2562
2563 static void
2564 vmxnet3_exit_module(void)
2565 {
2566         pci_unregister_driver(&vmxnet3_driver);
2567 }
2568
2569 module_exit(vmxnet3_exit_module);
2570
2571 MODULE_AUTHOR("VMware, Inc.");
2572 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
2573 MODULE_LICENSE("GPL v2");
2574 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);