typhoon: Use request_firmware()
[linux-2.6.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101
102 #define DRV_MODULE_NAME         "typhoon"
103 #define DRV_MODULE_VERSION      "1.5.8"
104 #define DRV_MODULE_RELDATE      "06/11/09"
105 #define PFX                     DRV_MODULE_NAME ": "
106 #define ERR_PFX                 KERN_ERR PFX
107
108 #include <linux/module.h>
109 #include <linux/kernel.h>
110 #include <linux/string.h>
111 #include <linux/timer.h>
112 #include <linux/errno.h>
113 #include <linux/ioport.h>
114 #include <linux/slab.h>
115 #include <linux/interrupt.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/mm.h>
121 #include <linux/init.h>
122 #include <linux/delay.h>
123 #include <linux/ethtool.h>
124 #include <linux/if_vlan.h>
125 #include <linux/crc32.h>
126 #include <linux/bitops.h>
127 #include <asm/processor.h>
128 #include <asm/io.h>
129 #include <asm/uaccess.h>
130 #include <linux/in6.h>
131 #include <linux/dma-mapping.h>
132 #include <linux/firmware.h>
133
134 #include "typhoon.h"
135
136 static char version[] __devinitdata =
137     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138
139 #define FIRMWARE_NAME           "3com/typhoon.bin"
140 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142 MODULE_LICENSE("GPL");
143 MODULE_FIRMWARE(FIRMWARE_NAME);
144 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
145 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
146                                "the buffer given back to the NIC. Default "
147                                "is 200.");
148 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
149                            "Default is to try MMIO and fallback to PIO.");
150 module_param(rx_copybreak, int, 0);
151 module_param(use_mmio, int, 0);
152
153 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
154 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
155 #undef NETIF_F_TSO
156 #endif
157
158 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
159 #error TX ring too small!
160 #endif
161
162 struct typhoon_card_info {
163         char *name;
164         int capabilities;
165 };
166
167 #define TYPHOON_CRYPTO_NONE             0x00
168 #define TYPHOON_CRYPTO_DES              0x01
169 #define TYPHOON_CRYPTO_3DES             0x02
170 #define TYPHOON_CRYPTO_VARIABLE         0x04
171 #define TYPHOON_FIBER                   0x08
172 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
173
174 enum typhoon_cards {
175         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
176         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
177         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
178         TYPHOON_FXM,
179 };
180
181 /* directly indexed by enum typhoon_cards, above */
182 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
183         { "3Com Typhoon (3C990-TX)",
184                 TYPHOON_CRYPTO_NONE},
185         { "3Com Typhoon (3CR990-TX-95)",
186                 TYPHOON_CRYPTO_DES},
187         { "3Com Typhoon (3CR990-TX-97)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189         { "3Com Typhoon (3C990SVR)",
190                 TYPHOON_CRYPTO_NONE},
191         { "3Com Typhoon (3CR990SVR95)",
192                 TYPHOON_CRYPTO_DES},
193         { "3Com Typhoon (3CR990SVR97)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
195         { "3Com Typhoon2 (3C990B-TX-M)",
196                 TYPHOON_CRYPTO_VARIABLE},
197         { "3Com Typhoon2 (3C990BSVR)",
198                 TYPHOON_CRYPTO_VARIABLE},
199         { "3Com Typhoon (3CR990-FX-95)",
200                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
201         { "3Com Typhoon (3CR990-FX-97)",
202                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
203         { "3Com Typhoon (3CR990-FX-95 Server)",
204                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
205         { "3Com Typhoon (3CR990-FX-97 Server)",
206                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
207         { "3Com Typhoon2 (3C990B-FX-97)",
208                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
209 };
210
211 /* Notes on the new subsystem numbering scheme:
212  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
213  * bit 4 indicates if this card has secured firmware (we don't support it)
214  * bit 8 indicates if this is a (0) copper or (1) fiber card
215  * bits 12-16 indicate card type: (0) client and (1) server
216  */
217 static struct pci_device_id typhoon_pci_tbl[] = {
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
225           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
227           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
229           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
233           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
234         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
235           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
236         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
237           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
238         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
239           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
240         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
241           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
242         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
243           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
244         { 0, }
245 };
246 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
247
248 /* Define the shared memory area
249  * Align everything the 3XP will normally be using.
250  * We'll need to move/align txHi if we start using that ring.
251  */
252 #define __3xp_aligned   ____cacheline_aligned
253 struct typhoon_shared {
254         struct typhoon_interface        iface;
255         struct typhoon_indexes          indexes                 __3xp_aligned;
256         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
257         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
258         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
259         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
260         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
261         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
262         u32                             zeroWord;
263         struct tx_desc                  txHi[TXHI_ENTRIES];
264 } __attribute__ ((packed));
265
266 struct rxbuff_ent {
267         struct sk_buff *skb;
268         dma_addr_t      dma_addr;
269 };
270
271 struct typhoon {
272         /* Tx cache line section */
273         struct transmit_ring    txLoRing        ____cacheline_aligned;
274         struct pci_dev *        tx_pdev;
275         void __iomem            *tx_ioaddr;
276         u32                     txlo_dma_addr;
277
278         /* Irq/Rx cache line section */
279         void __iomem            *ioaddr         ____cacheline_aligned;
280         struct typhoon_indexes *indexes;
281         u8                      awaiting_resp;
282         u8                      duplex;
283         u8                      speed;
284         u8                      card_state;
285         struct basic_ring       rxLoRing;
286         struct pci_dev *        pdev;
287         struct net_device *     dev;
288         struct napi_struct      napi;
289         spinlock_t              state_lock;
290         struct vlan_group *     vlgrp;
291         struct basic_ring       rxHiRing;
292         struct basic_ring       rxBuffRing;
293         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
294
295         /* general section */
296         spinlock_t              command_lock    ____cacheline_aligned;
297         struct basic_ring       cmdRing;
298         struct basic_ring       respRing;
299         struct net_device_stats stats;
300         struct net_device_stats stats_saved;
301         const char *            name;
302         struct typhoon_shared * shared;
303         dma_addr_t              shared_dma;
304         __le16                  xcvr_select;
305         __le16                  wol_events;
306         __le32                  offload;
307
308         /* unused stuff (future use) */
309         int                     capabilities;
310         struct transmit_ring    txHiRing;
311 };
312
313 enum completion_wait_values {
314         NoWait = 0, WaitNoSleep, WaitSleep,
315 };
316
317 /* These are the values for the typhoon.card_state variable.
318  * These determine where the statistics will come from in get_stats().
319  * The sleep image does not support the statistics we need.
320  */
321 enum state_values {
322         Sleeping = 0, Running,
323 };
324
325 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
326  * cannot pass a read, so this forces current writes to post.
327  */
328 #define typhoon_post_pci_writes(x) \
329         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
330
331 /* We'll wait up to six seconds for a reset, and half a second normally.
332  */
333 #define TYPHOON_UDELAY                  50
334 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
335 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
336 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
337
338 #if defined(NETIF_F_TSO)
339 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
340 #define TSO_NUM_DESCRIPTORS     2
341 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
342 #else
343 #define NETIF_F_TSO             0
344 #define skb_tso_size(x)         0
345 #define TSO_NUM_DESCRIPTORS     0
346 #define TSO_OFFLOAD_ON          0
347 #endif
348
349 static inline void
350 typhoon_inc_index(u32 *index, const int count, const int num_entries)
351 {
352         /* Increment a ring index -- we can use this for all rings execept
353          * the Rx rings, as they use different size descriptors
354          * otherwise, everything is the same size as a cmd_desc
355          */
356         *index += count * sizeof(struct cmd_desc);
357         *index %= num_entries * sizeof(struct cmd_desc);
358 }
359
360 static inline void
361 typhoon_inc_cmd_index(u32 *index, const int count)
362 {
363         typhoon_inc_index(index, count, COMMAND_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_resp_index(u32 *index, const int count)
368 {
369         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
370 }
371
372 static inline void
373 typhoon_inc_rxfree_index(u32 *index, const int count)
374 {
375         typhoon_inc_index(index, count, RXFREE_ENTRIES);
376 }
377
378 static inline void
379 typhoon_inc_tx_index(u32 *index, const int count)
380 {
381         /* if we start using the Hi Tx ring, this needs updateing */
382         typhoon_inc_index(index, count, TXLO_ENTRIES);
383 }
384
385 static inline void
386 typhoon_inc_rx_index(u32 *index, const int count)
387 {
388         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
389         *index += count * sizeof(struct rx_desc);
390         *index %= RX_ENTRIES * sizeof(struct rx_desc);
391 }
392
393 static int
394 typhoon_reset(void __iomem *ioaddr, int wait_type)
395 {
396         int i, err = 0;
397         int timeout;
398
399         if(wait_type == WaitNoSleep)
400                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
401         else
402                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
403
404         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
405         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
406
407         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
408         typhoon_post_pci_writes(ioaddr);
409         udelay(1);
410         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
411
412         if(wait_type != NoWait) {
413                 for(i = 0; i < timeout; i++) {
414                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
415                            TYPHOON_STATUS_WAITING_FOR_HOST)
416                                 goto out;
417
418                         if(wait_type == WaitSleep)
419                                 schedule_timeout_uninterruptible(1);
420                         else
421                                 udelay(TYPHOON_UDELAY);
422                 }
423
424                 err = -ETIMEDOUT;
425         }
426
427 out:
428         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
429         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
430
431         /* The 3XP seems to need a little extra time to complete the load
432          * of the sleep image before we can reliably boot it. Failure to
433          * do this occasionally results in a hung adapter after boot in
434          * typhoon_init_one() while trying to read the MAC address or
435          * putting the card to sleep. 3Com's driver waits 5ms, but
436          * that seems to be overkill. However, if we can sleep, we might
437          * as well give it that much time. Otherwise, we'll give it 500us,
438          * which should be enough (I've see it work well at 100us, but still
439          * saw occasional problems.)
440          */
441         if(wait_type == WaitSleep)
442                 msleep(5);
443         else
444                 udelay(500);
445         return err;
446 }
447
448 static int
449 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
450 {
451         int i, err = 0;
452
453         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
454                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
455                         goto out;
456                 udelay(TYPHOON_UDELAY);
457         }
458
459         err = -ETIMEDOUT;
460
461 out:
462         return err;
463 }
464
465 static inline void
466 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
467 {
468         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
469                 netif_carrier_off(dev);
470         else
471                 netif_carrier_on(dev);
472 }
473
474 static inline void
475 typhoon_hello(struct typhoon *tp)
476 {
477         struct basic_ring *ring = &tp->cmdRing;
478         struct cmd_desc *cmd;
479
480         /* We only get a hello request if we've not sent anything to the
481          * card in a long while. If the lock is held, then we're in the
482          * process of issuing a command, so we don't need to respond.
483          */
484         if(spin_trylock(&tp->command_lock)) {
485                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
486                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
487
488                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
489                 smp_wmb();
490                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
491                 spin_unlock(&tp->command_lock);
492         }
493 }
494
495 static int
496 typhoon_process_response(struct typhoon *tp, int resp_size,
497                                 struct resp_desc *resp_save)
498 {
499         struct typhoon_indexes *indexes = tp->indexes;
500         struct resp_desc *resp;
501         u8 *base = tp->respRing.ringBase;
502         int count, len, wrap_len;
503         u32 cleared;
504         u32 ready;
505
506         cleared = le32_to_cpu(indexes->respCleared);
507         ready = le32_to_cpu(indexes->respReady);
508         while(cleared != ready) {
509                 resp = (struct resp_desc *)(base + cleared);
510                 count = resp->numDesc + 1;
511                 if(resp_save && resp->seqNo) {
512                         if(count > resp_size) {
513                                 resp_save->flags = TYPHOON_RESP_ERROR;
514                                 goto cleanup;
515                         }
516
517                         wrap_len = 0;
518                         len = count * sizeof(*resp);
519                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
520                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
521                                 len = RESPONSE_RING_SIZE - cleared;
522                         }
523
524                         memcpy(resp_save, resp, len);
525                         if(unlikely(wrap_len)) {
526                                 resp_save += len / sizeof(*resp);
527                                 memcpy(resp_save, base, wrap_len);
528                         }
529
530                         resp_save = NULL;
531                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
532                         typhoon_media_status(tp->dev, resp);
533                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
534                         typhoon_hello(tp);
535                 } else {
536                         printk(KERN_ERR "%s: dumping unexpected response "
537                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
538                                tp->name, le16_to_cpu(resp->cmd),
539                                resp->numDesc, resp->flags,
540                                le16_to_cpu(resp->parm1),
541                                le32_to_cpu(resp->parm2),
542                                le32_to_cpu(resp->parm3));
543                 }
544
545 cleanup:
546                 typhoon_inc_resp_index(&cleared, count);
547         }
548
549         indexes->respCleared = cpu_to_le32(cleared);
550         wmb();
551         return (resp_save == NULL);
552 }
553
554 static inline int
555 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
556 {
557         /* this works for all descriptors but rx_desc, as they are a
558          * different size than the cmd_desc -- everyone else is the same
559          */
560         lastWrite /= sizeof(struct cmd_desc);
561         lastRead /= sizeof(struct cmd_desc);
562         return (ringSize + lastRead - lastWrite - 1) % ringSize;
563 }
564
565 static inline int
566 typhoon_num_free_cmd(struct typhoon *tp)
567 {
568         int lastWrite = tp->cmdRing.lastWrite;
569         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
570
571         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
572 }
573
574 static inline int
575 typhoon_num_free_resp(struct typhoon *tp)
576 {
577         int respReady = le32_to_cpu(tp->indexes->respReady);
578         int respCleared = le32_to_cpu(tp->indexes->respCleared);
579
580         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
581 }
582
583 static inline int
584 typhoon_num_free_tx(struct transmit_ring *ring)
585 {
586         /* if we start using the Hi Tx ring, this needs updating */
587         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
588 }
589
590 static int
591 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
592                       int num_resp, struct resp_desc *resp)
593 {
594         struct typhoon_indexes *indexes = tp->indexes;
595         struct basic_ring *ring = &tp->cmdRing;
596         struct resp_desc local_resp;
597         int i, err = 0;
598         int got_resp;
599         int freeCmd, freeResp;
600         int len, wrap_len;
601
602         spin_lock(&tp->command_lock);
603
604         freeCmd = typhoon_num_free_cmd(tp);
605         freeResp = typhoon_num_free_resp(tp);
606
607         if(freeCmd < num_cmd || freeResp < num_resp) {
608                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
609                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
610                         freeResp, num_resp);
611                 err = -ENOMEM;
612                 goto out;
613         }
614
615         if(cmd->flags & TYPHOON_CMD_RESPOND) {
616                 /* If we're expecting a response, but the caller hasn't given
617                  * us a place to put it, we'll provide one.
618                  */
619                 tp->awaiting_resp = 1;
620                 if(resp == NULL) {
621                         resp = &local_resp;
622                         num_resp = 1;
623                 }
624         }
625
626         wrap_len = 0;
627         len = num_cmd * sizeof(*cmd);
628         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
629                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
630                 len = COMMAND_RING_SIZE - ring->lastWrite;
631         }
632
633         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
634         if(unlikely(wrap_len)) {
635                 struct cmd_desc *wrap_ptr = cmd;
636                 wrap_ptr += len / sizeof(*cmd);
637                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
638         }
639
640         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
641
642         /* "I feel a presence... another warrior is on the mesa."
643          */
644         wmb();
645         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
646         typhoon_post_pci_writes(tp->ioaddr);
647
648         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
649                 goto out;
650
651         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
652          * preempt or do anything other than take interrupts. So, don't
653          * wait for a response unless you have to.
654          *
655          * I've thought about trying to sleep here, but we're called
656          * from many contexts that don't allow that. Also, given the way
657          * 3Com has implemented irq coalescing, we would likely timeout --
658          * this has been observed in real life!
659          *
660          * The big killer is we have to wait to get stats from the card,
661          * though we could go to a periodic refresh of those if we don't
662          * mind them getting somewhat stale. The rest of the waiting
663          * commands occur during open/close/suspend/resume, so they aren't
664          * time critical. Creating SAs in the future will also have to
665          * wait here.
666          */
667         got_resp = 0;
668         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
669                 if(indexes->respCleared != indexes->respReady)
670                         got_resp = typhoon_process_response(tp, num_resp,
671                                                                 resp);
672                 udelay(TYPHOON_UDELAY);
673         }
674
675         if(!got_resp) {
676                 err = -ETIMEDOUT;
677                 goto out;
678         }
679
680         /* Collect the error response even if we don't care about the
681          * rest of the response
682          */
683         if(resp->flags & TYPHOON_RESP_ERROR)
684                 err = -EIO;
685
686 out:
687         if(tp->awaiting_resp) {
688                 tp->awaiting_resp = 0;
689                 smp_wmb();
690
691                 /* Ugh. If a response was added to the ring between
692                  * the call to typhoon_process_response() and the clearing
693                  * of tp->awaiting_resp, we could have missed the interrupt
694                  * and it could hang in the ring an indeterminate amount of
695                  * time. So, check for it, and interrupt ourselves if this
696                  * is the case.
697                  */
698                 if(indexes->respCleared != indexes->respReady)
699                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
700         }
701
702         spin_unlock(&tp->command_lock);
703         return err;
704 }
705
706 static void
707 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
708 {
709         struct typhoon *tp = netdev_priv(dev);
710         struct cmd_desc xp_cmd;
711         int err;
712
713         spin_lock_bh(&tp->state_lock);
714         if(!tp->vlgrp != !grp) {
715                 /* We've either been turned on for the first time, or we've
716                  * been turned off. Update the 3XP.
717                  */
718                 if(grp)
719                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
720                 else
721                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
722
723                 /* If the interface is up, the runtime is running -- and we
724                  * must be up for the vlan core to call us.
725                  *
726                  * Do the command outside of the spin lock, as it is slow.
727                  */
728                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
729                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
730                 xp_cmd.parm2 = tp->offload;
731                 xp_cmd.parm3 = tp->offload;
732                 spin_unlock_bh(&tp->state_lock);
733                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
734                 if(err < 0)
735                         printk("%s: vlan offload error %d\n", tp->name, -err);
736                 spin_lock_bh(&tp->state_lock);
737         }
738
739         /* now make the change visible */
740         tp->vlgrp = grp;
741         spin_unlock_bh(&tp->state_lock);
742 }
743
744 static inline void
745 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
746                         u32 ring_dma)
747 {
748         struct tcpopt_desc *tcpd;
749         u32 tcpd_offset = ring_dma;
750
751         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
752         tcpd_offset += txRing->lastWrite;
753         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
754         typhoon_inc_tx_index(&txRing->lastWrite, 1);
755
756         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
757         tcpd->numDesc = 1;
758         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
759         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
760         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
761         tcpd->bytesTx = cpu_to_le32(skb->len);
762         tcpd->status = 0;
763 }
764
765 static int
766 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
767 {
768         struct typhoon *tp = netdev_priv(dev);
769         struct transmit_ring *txRing;
770         struct tx_desc *txd, *first_txd;
771         dma_addr_t skb_dma;
772         int numDesc;
773
774         /* we have two rings to choose from, but we only use txLo for now
775          * If we start using the Hi ring as well, we'll need to update
776          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
777          * and TXHI_ENTRIES to match, as well as update the TSO code below
778          * to get the right DMA address
779          */
780         txRing = &tp->txLoRing;
781
782         /* We need one descriptor for each fragment of the sk_buff, plus the
783          * one for the ->data area of it.
784          *
785          * The docs say a maximum of 16 fragment descriptors per TCP option
786          * descriptor, then make a new packet descriptor and option descriptor
787          * for the next 16 fragments. The engineers say just an option
788          * descriptor is needed. I've tested up to 26 fragments with a single
789          * packet descriptor/option descriptor combo, so I use that for now.
790          *
791          * If problems develop with TSO, check this first.
792          */
793         numDesc = skb_shinfo(skb)->nr_frags + 1;
794         if (skb_is_gso(skb))
795                 numDesc++;
796
797         /* When checking for free space in the ring, we need to also
798          * account for the initial Tx descriptor, and we always must leave
799          * at least one descriptor unused in the ring so that it doesn't
800          * wrap and look empty.
801          *
802          * The only time we should loop here is when we hit the race
803          * between marking the queue awake and updating the cleared index.
804          * Just loop and it will appear. This comes from the acenic driver.
805          */
806         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
807                 smp_rmb();
808
809         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
810         typhoon_inc_tx_index(&txRing->lastWrite, 1);
811
812         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
813         first_txd->numDesc = 0;
814         first_txd->len = 0;
815         first_txd->tx_addr = (u64)((unsigned long) skb);
816         first_txd->processFlags = 0;
817
818         if(skb->ip_summed == CHECKSUM_PARTIAL) {
819                 /* The 3XP will figure out if this is UDP/TCP */
820                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
821                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
822                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
823         }
824
825         if(vlan_tx_tag_present(skb)) {
826                 first_txd->processFlags |=
827                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
828                 first_txd->processFlags |=
829                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
830                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
831         }
832
833         if (skb_is_gso(skb)) {
834                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
835                 first_txd->numDesc++;
836
837                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
838         }
839
840         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
841         typhoon_inc_tx_index(&txRing->lastWrite, 1);
842
843         /* No need to worry about padding packet -- the firmware pads
844          * it with zeros to ETH_ZLEN for us.
845          */
846         if(skb_shinfo(skb)->nr_frags == 0) {
847                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
848                                        PCI_DMA_TODEVICE);
849                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
850                 txd->len = cpu_to_le16(skb->len);
851                 txd->frag.addr = cpu_to_le32(skb_dma);
852                 txd->frag.addrHi = 0;
853                 first_txd->numDesc++;
854         } else {
855                 int i, len;
856
857                 len = skb_headlen(skb);
858                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
859                                          PCI_DMA_TODEVICE);
860                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
861                 txd->len = cpu_to_le16(len);
862                 txd->frag.addr = cpu_to_le32(skb_dma);
863                 txd->frag.addrHi = 0;
864                 first_txd->numDesc++;
865
866                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
867                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
868                         void *frag_addr;
869
870                         txd = (struct tx_desc *) (txRing->ringBase +
871                                                 txRing->lastWrite);
872                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
873
874                         len = frag->size;
875                         frag_addr = (void *) page_address(frag->page) +
876                                                 frag->page_offset;
877                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
878                                          PCI_DMA_TODEVICE);
879                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
880                         txd->len = cpu_to_le16(len);
881                         txd->frag.addr = cpu_to_le32(skb_dma);
882                         txd->frag.addrHi = 0;
883                         first_txd->numDesc++;
884                 }
885         }
886
887         /* Kick the 3XP
888          */
889         wmb();
890         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
891
892         dev->trans_start = jiffies;
893
894         /* If we don't have room to put the worst case packet on the
895          * queue, then we must stop the queue. We need 2 extra
896          * descriptors -- one to prevent ring wrap, and one for the
897          * Tx header.
898          */
899         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
900
901         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
902                 netif_stop_queue(dev);
903
904                 /* A Tx complete IRQ could have gotten inbetween, making
905                  * the ring free again. Only need to recheck here, since
906                  * Tx is serialized.
907                  */
908                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
909                         netif_wake_queue(dev);
910         }
911
912         return 0;
913 }
914
915 static void
916 typhoon_set_rx_mode(struct net_device *dev)
917 {
918         struct typhoon *tp = netdev_priv(dev);
919         struct cmd_desc xp_cmd;
920         u32 mc_filter[2];
921         __le16 filter;
922
923         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
924         if(dev->flags & IFF_PROMISC) {
925                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
926         } else if((dev->mc_count > multicast_filter_limit) ||
927                   (dev->flags & IFF_ALLMULTI)) {
928                 /* Too many to match, or accept all multicasts. */
929                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
930         } else if(dev->mc_count) {
931                 struct dev_mc_list *mclist;
932                 int i;
933
934                 memset(mc_filter, 0, sizeof(mc_filter));
935                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
936                     i++, mclist = mclist->next) {
937                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
938                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
939                 }
940
941                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
942                                          TYPHOON_CMD_SET_MULTICAST_HASH);
943                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
944                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
945                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
946                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
947
948                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
949         }
950
951         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
952         xp_cmd.parm1 = filter;
953         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
954 }
955
956 static int
957 typhoon_do_get_stats(struct typhoon *tp)
958 {
959         struct net_device_stats *stats = &tp->stats;
960         struct net_device_stats *saved = &tp->stats_saved;
961         struct cmd_desc xp_cmd;
962         struct resp_desc xp_resp[7];
963         struct stats_resp *s = (struct stats_resp *) xp_resp;
964         int err;
965
966         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
967         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
968         if(err < 0)
969                 return err;
970
971         /* 3Com's Linux driver uses txMultipleCollisions as it's
972          * collisions value, but there is some other collision info as well...
973          *
974          * The extra status reported would be a good candidate for
975          * ethtool_ops->get_{strings,stats}()
976          */
977         stats->tx_packets = le32_to_cpu(s->txPackets);
978         stats->tx_bytes = le64_to_cpu(s->txBytes);
979         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
980         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
981         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
982         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
983         stats->rx_bytes = le64_to_cpu(s->rxBytesGood);
984         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
985         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
986                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
987         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
988         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
989         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
990                         SPEED_100 : SPEED_10;
991         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
992                         DUPLEX_FULL : DUPLEX_HALF;
993
994         /* add in the saved statistics
995          */
996         stats->tx_packets += saved->tx_packets;
997         stats->tx_bytes += saved->tx_bytes;
998         stats->tx_errors += saved->tx_errors;
999         stats->collisions += saved->collisions;
1000         stats->rx_packets += saved->rx_packets;
1001         stats->rx_bytes += saved->rx_bytes;
1002         stats->rx_fifo_errors += saved->rx_fifo_errors;
1003         stats->rx_errors += saved->rx_errors;
1004         stats->rx_crc_errors += saved->rx_crc_errors;
1005         stats->rx_length_errors += saved->rx_length_errors;
1006
1007         return 0;
1008 }
1009
1010 static struct net_device_stats *
1011 typhoon_get_stats(struct net_device *dev)
1012 {
1013         struct typhoon *tp = netdev_priv(dev);
1014         struct net_device_stats *stats = &tp->stats;
1015         struct net_device_stats *saved = &tp->stats_saved;
1016
1017         smp_rmb();
1018         if(tp->card_state == Sleeping)
1019                 return saved;
1020
1021         if(typhoon_do_get_stats(tp) < 0) {
1022                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1023                 return saved;
1024         }
1025
1026         return stats;
1027 }
1028
1029 static int
1030 typhoon_set_mac_address(struct net_device *dev, void *addr)
1031 {
1032         struct sockaddr *saddr = (struct sockaddr *) addr;
1033
1034         if(netif_running(dev))
1035                 return -EBUSY;
1036
1037         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1038         return 0;
1039 }
1040
1041 static void
1042 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1043 {
1044         struct typhoon *tp = netdev_priv(dev);
1045         struct pci_dev *pci_dev = tp->pdev;
1046         struct cmd_desc xp_cmd;
1047         struct resp_desc xp_resp[3];
1048
1049         smp_rmb();
1050         if(tp->card_state == Sleeping) {
1051                 strcpy(info->fw_version, "Sleep image");
1052         } else {
1053                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1054                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1055                         strcpy(info->fw_version, "Unknown runtime");
1056                 } else {
1057                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1058                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1059                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1060                                  sleep_ver & 0xfff);
1061                 }
1062         }
1063
1064         strcpy(info->driver, DRV_MODULE_NAME);
1065         strcpy(info->version, DRV_MODULE_VERSION);
1066         strcpy(info->bus_info, pci_name(pci_dev));
1067 }
1068
1069 static int
1070 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1071 {
1072         struct typhoon *tp = netdev_priv(dev);
1073
1074         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1075                                 SUPPORTED_Autoneg;
1076
1077         switch (tp->xcvr_select) {
1078         case TYPHOON_XCVR_10HALF:
1079                 cmd->advertising = ADVERTISED_10baseT_Half;
1080                 break;
1081         case TYPHOON_XCVR_10FULL:
1082                 cmd->advertising = ADVERTISED_10baseT_Full;
1083                 break;
1084         case TYPHOON_XCVR_100HALF:
1085                 cmd->advertising = ADVERTISED_100baseT_Half;
1086                 break;
1087         case TYPHOON_XCVR_100FULL:
1088                 cmd->advertising = ADVERTISED_100baseT_Full;
1089                 break;
1090         case TYPHOON_XCVR_AUTONEG:
1091                 cmd->advertising = ADVERTISED_10baseT_Half |
1092                                             ADVERTISED_10baseT_Full |
1093                                             ADVERTISED_100baseT_Half |
1094                                             ADVERTISED_100baseT_Full |
1095                                             ADVERTISED_Autoneg;
1096                 break;
1097         }
1098
1099         if(tp->capabilities & TYPHOON_FIBER) {
1100                 cmd->supported |= SUPPORTED_FIBRE;
1101                 cmd->advertising |= ADVERTISED_FIBRE;
1102                 cmd->port = PORT_FIBRE;
1103         } else {
1104                 cmd->supported |= SUPPORTED_10baseT_Half |
1105                                         SUPPORTED_10baseT_Full |
1106                                         SUPPORTED_TP;
1107                 cmd->advertising |= ADVERTISED_TP;
1108                 cmd->port = PORT_TP;
1109         }
1110
1111         /* need to get stats to make these link speed/duplex valid */
1112         typhoon_do_get_stats(tp);
1113         cmd->speed = tp->speed;
1114         cmd->duplex = tp->duplex;
1115         cmd->phy_address = 0;
1116         cmd->transceiver = XCVR_INTERNAL;
1117         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1118                 cmd->autoneg = AUTONEG_ENABLE;
1119         else
1120                 cmd->autoneg = AUTONEG_DISABLE;
1121         cmd->maxtxpkt = 1;
1122         cmd->maxrxpkt = 1;
1123
1124         return 0;
1125 }
1126
1127 static int
1128 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1129 {
1130         struct typhoon *tp = netdev_priv(dev);
1131         struct cmd_desc xp_cmd;
1132         __le16 xcvr;
1133         int err;
1134
1135         err = -EINVAL;
1136         if(cmd->autoneg == AUTONEG_ENABLE) {
1137                 xcvr = TYPHOON_XCVR_AUTONEG;
1138         } else {
1139                 if(cmd->duplex == DUPLEX_HALF) {
1140                         if(cmd->speed == SPEED_10)
1141                                 xcvr = TYPHOON_XCVR_10HALF;
1142                         else if(cmd->speed == SPEED_100)
1143                                 xcvr = TYPHOON_XCVR_100HALF;
1144                         else
1145                                 goto out;
1146                 } else if(cmd->duplex == DUPLEX_FULL) {
1147                         if(cmd->speed == SPEED_10)
1148                                 xcvr = TYPHOON_XCVR_10FULL;
1149                         else if(cmd->speed == SPEED_100)
1150                                 xcvr = TYPHOON_XCVR_100FULL;
1151                         else
1152                                 goto out;
1153                 } else
1154                         goto out;
1155         }
1156
1157         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1158         xp_cmd.parm1 = xcvr;
1159         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1160         if(err < 0)
1161                 goto out;
1162
1163         tp->xcvr_select = xcvr;
1164         if(cmd->autoneg == AUTONEG_ENABLE) {
1165                 tp->speed = 0xff;       /* invalid */
1166                 tp->duplex = 0xff;      /* invalid */
1167         } else {
1168                 tp->speed = cmd->speed;
1169                 tp->duplex = cmd->duplex;
1170         }
1171
1172 out:
1173         return err;
1174 }
1175
1176 static void
1177 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1178 {
1179         struct typhoon *tp = netdev_priv(dev);
1180
1181         wol->supported = WAKE_PHY | WAKE_MAGIC;
1182         wol->wolopts = 0;
1183         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1184                 wol->wolopts |= WAKE_PHY;
1185         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1186                 wol->wolopts |= WAKE_MAGIC;
1187         memset(&wol->sopass, 0, sizeof(wol->sopass));
1188 }
1189
1190 static int
1191 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1192 {
1193         struct typhoon *tp = netdev_priv(dev);
1194
1195         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1196                 return -EINVAL;
1197
1198         tp->wol_events = 0;
1199         if(wol->wolopts & WAKE_PHY)
1200                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1201         if(wol->wolopts & WAKE_MAGIC)
1202                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1203
1204         return 0;
1205 }
1206
1207 static u32
1208 typhoon_get_rx_csum(struct net_device *dev)
1209 {
1210         /* For now, we don't allow turning off RX checksums.
1211          */
1212         return 1;
1213 }
1214
1215 static void
1216 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1217 {
1218         ering->rx_max_pending = RXENT_ENTRIES;
1219         ering->rx_mini_max_pending = 0;
1220         ering->rx_jumbo_max_pending = 0;
1221         ering->tx_max_pending = TXLO_ENTRIES - 1;
1222
1223         ering->rx_pending = RXENT_ENTRIES;
1224         ering->rx_mini_pending = 0;
1225         ering->rx_jumbo_pending = 0;
1226         ering->tx_pending = TXLO_ENTRIES - 1;
1227 }
1228
1229 static const struct ethtool_ops typhoon_ethtool_ops = {
1230         .get_settings           = typhoon_get_settings,
1231         .set_settings           = typhoon_set_settings,
1232         .get_drvinfo            = typhoon_get_drvinfo,
1233         .get_wol                = typhoon_get_wol,
1234         .set_wol                = typhoon_set_wol,
1235         .get_link               = ethtool_op_get_link,
1236         .get_rx_csum            = typhoon_get_rx_csum,
1237         .set_tx_csum            = ethtool_op_set_tx_csum,
1238         .set_sg                 = ethtool_op_set_sg,
1239         .set_tso                = ethtool_op_set_tso,
1240         .get_ringparam          = typhoon_get_ringparam,
1241 };
1242
1243 static int
1244 typhoon_wait_interrupt(void __iomem *ioaddr)
1245 {
1246         int i, err = 0;
1247
1248         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1249                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1250                    TYPHOON_INTR_BOOTCMD)
1251                         goto out;
1252                 udelay(TYPHOON_UDELAY);
1253         }
1254
1255         err = -ETIMEDOUT;
1256
1257 out:
1258         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1259         return err;
1260 }
1261
1262 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1263
1264 static void
1265 typhoon_init_interface(struct typhoon *tp)
1266 {
1267         struct typhoon_interface *iface = &tp->shared->iface;
1268         dma_addr_t shared_dma;
1269
1270         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1271
1272         /* The *Hi members of iface are all init'd to zero by the memset().
1273          */
1274         shared_dma = tp->shared_dma + shared_offset(indexes);
1275         iface->ringIndex = cpu_to_le32(shared_dma);
1276
1277         shared_dma = tp->shared_dma + shared_offset(txLo);
1278         iface->txLoAddr = cpu_to_le32(shared_dma);
1279         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1280
1281         shared_dma = tp->shared_dma + shared_offset(txHi);
1282         iface->txHiAddr = cpu_to_le32(shared_dma);
1283         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1284
1285         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1286         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1287         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1288                                         sizeof(struct rx_free));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxLo);
1291         iface->rxLoAddr = cpu_to_le32(shared_dma);
1292         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293
1294         shared_dma = tp->shared_dma + shared_offset(rxHi);
1295         iface->rxHiAddr = cpu_to_le32(shared_dma);
1296         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1297
1298         shared_dma = tp->shared_dma + shared_offset(cmd);
1299         iface->cmdAddr = cpu_to_le32(shared_dma);
1300         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1301
1302         shared_dma = tp->shared_dma + shared_offset(resp);
1303         iface->respAddr = cpu_to_le32(shared_dma);
1304         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1305
1306         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1307         iface->zeroAddr = cpu_to_le32(shared_dma);
1308
1309         tp->indexes = &tp->shared->indexes;
1310         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1311         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1312         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1313         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1314         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1315         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1316         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1317
1318         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1319         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1320
1321         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1322         tp->card_state = Sleeping;
1323         smp_wmb();
1324
1325         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1326         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1327
1328         spin_lock_init(&tp->command_lock);
1329         spin_lock_init(&tp->state_lock);
1330 }
1331
1332 static void
1333 typhoon_init_rings(struct typhoon *tp)
1334 {
1335         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1336
1337         tp->txLoRing.lastWrite = 0;
1338         tp->txHiRing.lastWrite = 0;
1339         tp->rxLoRing.lastWrite = 0;
1340         tp->rxHiRing.lastWrite = 0;
1341         tp->rxBuffRing.lastWrite = 0;
1342         tp->cmdRing.lastWrite = 0;
1343         tp->cmdRing.lastWrite = 0;
1344
1345         tp->txLoRing.lastRead = 0;
1346         tp->txHiRing.lastRead = 0;
1347 }
1348
1349 static const struct firmware *typhoon_fw;
1350
1351 static int
1352 typhoon_request_firmware(struct typhoon *tp)
1353 {
1354         int err;
1355
1356         if (typhoon_fw)
1357                 return 0;
1358
1359         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1360         if (err) {
1361                 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
1362                        tp->name, FIRMWARE_NAME);
1363                 return err;
1364         }
1365
1366         if (typhoon_fw->size < sizeof(struct typhoon_file_header) ||
1367             memcmp(typhoon_fw->data, "TYPHOON", 8)) {
1368                 printk(KERN_ERR "%s: Invalid firmware image\n",
1369                        tp->name);
1370                 release_firmware(typhoon_fw);
1371                 typhoon_fw = NULL;
1372                 return -EINVAL;
1373         }
1374
1375         return 0;
1376 }
1377
1378 static int
1379 typhoon_download_firmware(struct typhoon *tp)
1380 {
1381         void __iomem *ioaddr = tp->ioaddr;
1382         struct pci_dev *pdev = tp->pdev;
1383         const struct typhoon_file_header *fHdr;
1384         const struct typhoon_section_header *sHdr;
1385         const u8 *image_data;
1386         dma_addr_t image_dma;
1387         __sum16 csum;
1388         u32 irqEnabled;
1389         u32 irqMasked;
1390         u32 numSections;
1391         u32 section_len;
1392         u32 load_addr;
1393         u32 hmac;
1394         int i;
1395         int err;
1396
1397         image_data = typhoon_fw->data;
1398         fHdr = (struct typhoon_file_header *) image_data;
1399
1400         err = -ENOMEM;
1401         image_dma = pci_map_single(pdev, (u8 *) typhoon_fw->data,
1402                                    typhoon_fw->size, PCI_DMA_TODEVICE);
1403         if (pci_dma_mapping_error(pdev, image_dma)) {
1404                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1405                 goto err_out;
1406         }
1407
1408         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1409         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1410                ioaddr + TYPHOON_REG_INTR_ENABLE);
1411         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1412         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1413                ioaddr + TYPHOON_REG_INTR_MASK);
1414
1415         err = -ETIMEDOUT;
1416         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1417                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1418                 goto err_out_irq;
1419         }
1420
1421         numSections = le32_to_cpu(fHdr->numSections);
1422         load_addr = le32_to_cpu(fHdr->startAddr);
1423
1424         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1425         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1426         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1427         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1428         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1429         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1430         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1431         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1432         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1433         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1434         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1435         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1436         typhoon_post_pci_writes(ioaddr);
1437         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1438
1439         image_data += sizeof(struct typhoon_file_header);
1440
1441         /* The ioread32() in typhoon_wait_interrupt() will force the
1442          * last write to the command register to post, so
1443          * we don't need a typhoon_post_pci_writes() after it.
1444          */
1445         for(i = 0; i < numSections; i++) {
1446                 sHdr = (struct typhoon_section_header *) image_data;
1447                 image_data += sizeof(struct typhoon_section_header);
1448                 load_addr = le32_to_cpu(sHdr->startAddr);
1449                 section_len = le32_to_cpu(sHdr->len);
1450
1451                 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1452                     ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1453                     TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1454                         printk(KERN_ERR "%s: segment ready timeout\n",
1455                                tp->name);
1456                         goto err_out_irq;
1457                 }
1458
1459                 /* Do an pseudo IPv4 checksum on the data -- first
1460                  * need to convert each u16 to cpu order before
1461                  * summing. Fortunately, due to the properties of
1462                  * the checksum, we can do this once, at the end.
1463                  */
1464                 csum = csum_fold(csum_partial(image_data, section_len, 0));
1465
1466                 iowrite32(section_len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1467                 iowrite32(le16_to_cpu((__force __le16)csum),
1468                           ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1469                 iowrite32(load_addr,
1470                           ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1471                 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1472                 iowrite32(image_dma + (image_data - typhoon_fw->data),
1473                           ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1474                 typhoon_post_pci_writes(ioaddr);
1475                 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1476                           ioaddr + TYPHOON_REG_COMMAND);
1477
1478                 image_data += section_len;
1479         }
1480
1481         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1482            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1483            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1484                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1485                 goto err_out_irq;
1486         }
1487
1488         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1489
1490         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1491                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1492                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1493                 goto err_out_irq;
1494         }
1495
1496         err = 0;
1497
1498 err_out_irq:
1499         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1500         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1501
1502         pci_unmap_single(pdev, image_dma,  typhoon_fw->size, PCI_DMA_TODEVICE);
1503
1504 err_out:
1505         return err;
1506 }
1507
1508 static int
1509 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1510 {
1511         void __iomem *ioaddr = tp->ioaddr;
1512
1513         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1514                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1515                 goto out_timeout;
1516         }
1517
1518         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1519         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1520         typhoon_post_pci_writes(ioaddr);
1521         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1522                                 ioaddr + TYPHOON_REG_COMMAND);
1523
1524         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1525                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1526                        tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
1527                 goto out_timeout;
1528         }
1529
1530         /* Clear the Transmit and Command ready registers
1531          */
1532         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1533         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1534         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1535         typhoon_post_pci_writes(ioaddr);
1536         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1537
1538         return 0;
1539
1540 out_timeout:
1541         return -ETIMEDOUT;
1542 }
1543
1544 static u32
1545 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1546                         volatile __le32 * index)
1547 {
1548         u32 lastRead = txRing->lastRead;
1549         struct tx_desc *tx;
1550         dma_addr_t skb_dma;
1551         int dma_len;
1552         int type;
1553
1554         while(lastRead != le32_to_cpu(*index)) {
1555                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1556                 type = tx->flags & TYPHOON_TYPE_MASK;
1557
1558                 if(type == TYPHOON_TX_DESC) {
1559                         /* This tx_desc describes a packet.
1560                          */
1561                         unsigned long ptr = tx->tx_addr;
1562                         struct sk_buff *skb = (struct sk_buff *) ptr;
1563                         dev_kfree_skb_irq(skb);
1564                 } else if(type == TYPHOON_FRAG_DESC) {
1565                         /* This tx_desc describes a memory mapping. Free it.
1566                          */
1567                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1568                         dma_len = le16_to_cpu(tx->len);
1569                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1570                                        PCI_DMA_TODEVICE);
1571                 }
1572
1573                 tx->flags = 0;
1574                 typhoon_inc_tx_index(&lastRead, 1);
1575         }
1576
1577         return lastRead;
1578 }
1579
1580 static void
1581 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1582                         volatile __le32 * index)
1583 {
1584         u32 lastRead;
1585         int numDesc = MAX_SKB_FRAGS + 1;
1586
1587         /* This will need changing if we start to use the Hi Tx ring. */
1588         lastRead = typhoon_clean_tx(tp, txRing, index);
1589         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1590                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1591                 netif_wake_queue(tp->dev);
1592
1593         txRing->lastRead = lastRead;
1594         smp_wmb();
1595 }
1596
1597 static void
1598 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1599 {
1600         struct typhoon_indexes *indexes = tp->indexes;
1601         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1602         struct basic_ring *ring = &tp->rxBuffRing;
1603         struct rx_free *r;
1604
1605         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1606                                 le32_to_cpu(indexes->rxBuffCleared)) {
1607                 /* no room in ring, just drop the skb
1608                  */
1609                 dev_kfree_skb_any(rxb->skb);
1610                 rxb->skb = NULL;
1611                 return;
1612         }
1613
1614         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1615         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1616         r->virtAddr = idx;
1617         r->physAddr = cpu_to_le32(rxb->dma_addr);
1618
1619         /* Tell the card about it */
1620         wmb();
1621         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1622 }
1623
1624 static int
1625 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1626 {
1627         struct typhoon_indexes *indexes = tp->indexes;
1628         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1629         struct basic_ring *ring = &tp->rxBuffRing;
1630         struct rx_free *r;
1631         struct sk_buff *skb;
1632         dma_addr_t dma_addr;
1633
1634         rxb->skb = NULL;
1635
1636         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1637                                 le32_to_cpu(indexes->rxBuffCleared))
1638                 return -ENOMEM;
1639
1640         skb = dev_alloc_skb(PKT_BUF_SZ);
1641         if(!skb)
1642                 return -ENOMEM;
1643
1644 #if 0
1645         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1646          * address! Pretty please?
1647          */
1648         skb_reserve(skb, 2);
1649 #endif
1650
1651         skb->dev = tp->dev;
1652         dma_addr = pci_map_single(tp->pdev, skb->data,
1653                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1654
1655         /* Since no card does 64 bit DAC, the high bits will never
1656          * change from zero.
1657          */
1658         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1659         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1660         r->virtAddr = idx;
1661         r->physAddr = cpu_to_le32(dma_addr);
1662         rxb->skb = skb;
1663         rxb->dma_addr = dma_addr;
1664
1665         /* Tell the card about it */
1666         wmb();
1667         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1668         return 0;
1669 }
1670
1671 static int
1672 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1673            volatile __le32 * cleared, int budget)
1674 {
1675         struct rx_desc *rx;
1676         struct sk_buff *skb, *new_skb;
1677         struct rxbuff_ent *rxb;
1678         dma_addr_t dma_addr;
1679         u32 local_ready;
1680         u32 rxaddr;
1681         int pkt_len;
1682         u32 idx;
1683         __le32 csum_bits;
1684         int received;
1685
1686         received = 0;
1687         local_ready = le32_to_cpu(*ready);
1688         rxaddr = le32_to_cpu(*cleared);
1689         while(rxaddr != local_ready && budget > 0) {
1690                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1691                 idx = rx->addr;
1692                 rxb = &tp->rxbuffers[idx];
1693                 skb = rxb->skb;
1694                 dma_addr = rxb->dma_addr;
1695
1696                 typhoon_inc_rx_index(&rxaddr, 1);
1697
1698                 if(rx->flags & TYPHOON_RX_ERROR) {
1699                         typhoon_recycle_rx_skb(tp, idx);
1700                         continue;
1701                 }
1702
1703                 pkt_len = le16_to_cpu(rx->frameLen);
1704
1705                 if(pkt_len < rx_copybreak &&
1706                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1707                         skb_reserve(new_skb, 2);
1708                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1709                                                     PKT_BUF_SZ,
1710                                                     PCI_DMA_FROMDEVICE);
1711                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1712                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1713                                                        PKT_BUF_SZ,
1714                                                        PCI_DMA_FROMDEVICE);
1715                         skb_put(new_skb, pkt_len);
1716                         typhoon_recycle_rx_skb(tp, idx);
1717                 } else {
1718                         new_skb = skb;
1719                         skb_put(new_skb, pkt_len);
1720                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1721                                        PCI_DMA_FROMDEVICE);
1722                         typhoon_alloc_rx_skb(tp, idx);
1723                 }
1724                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1725                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1726                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1727                 if(csum_bits ==
1728                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1729                    || csum_bits ==
1730                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1731                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1732                 } else
1733                         new_skb->ip_summed = CHECKSUM_NONE;
1734
1735                 spin_lock(&tp->state_lock);
1736                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1737                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1738                                                  ntohl(rx->vlanTag) & 0xffff);
1739                 else
1740                         netif_receive_skb(new_skb);
1741                 spin_unlock(&tp->state_lock);
1742
1743                 received++;
1744                 budget--;
1745         }
1746         *cleared = cpu_to_le32(rxaddr);
1747
1748         return received;
1749 }
1750
1751 static void
1752 typhoon_fill_free_ring(struct typhoon *tp)
1753 {
1754         u32 i;
1755
1756         for(i = 0; i < RXENT_ENTRIES; i++) {
1757                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1758                 if(rxb->skb)
1759                         continue;
1760                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1761                         break;
1762         }
1763 }
1764
1765 static int
1766 typhoon_poll(struct napi_struct *napi, int budget)
1767 {
1768         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1769         struct typhoon_indexes *indexes = tp->indexes;
1770         int work_done;
1771
1772         rmb();
1773         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1774                         typhoon_process_response(tp, 0, NULL);
1775
1776         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1777                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1778
1779         work_done = 0;
1780
1781         if(indexes->rxHiCleared != indexes->rxHiReady) {
1782                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1783                                         &indexes->rxHiCleared, budget);
1784         }
1785
1786         if(indexes->rxLoCleared != indexes->rxLoReady) {
1787                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1788                                         &indexes->rxLoCleared, budget - work_done);
1789         }
1790
1791         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1792                 /* rxBuff ring is empty, try to fill it. */
1793                 typhoon_fill_free_ring(tp);
1794         }
1795
1796         if (work_done < budget) {
1797                 napi_complete(napi);
1798                 iowrite32(TYPHOON_INTR_NONE,
1799                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1800                 typhoon_post_pci_writes(tp->ioaddr);
1801         }
1802
1803         return work_done;
1804 }
1805
1806 static irqreturn_t
1807 typhoon_interrupt(int irq, void *dev_instance)
1808 {
1809         struct net_device *dev = dev_instance;
1810         struct typhoon *tp = netdev_priv(dev);
1811         void __iomem *ioaddr = tp->ioaddr;
1812         u32 intr_status;
1813
1814         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1815         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1816                 return IRQ_NONE;
1817
1818         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1819
1820         if (napi_schedule_prep(&tp->napi)) {
1821                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1822                 typhoon_post_pci_writes(ioaddr);
1823                 __napi_schedule(&tp->napi);
1824         } else {
1825                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1826                        dev->name);
1827         }
1828         return IRQ_HANDLED;
1829 }
1830
1831 static void
1832 typhoon_free_rx_rings(struct typhoon *tp)
1833 {
1834         u32 i;
1835
1836         for(i = 0; i < RXENT_ENTRIES; i++) {
1837                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1838                 if(rxb->skb) {
1839                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1840                                        PCI_DMA_FROMDEVICE);
1841                         dev_kfree_skb(rxb->skb);
1842                         rxb->skb = NULL;
1843                 }
1844         }
1845 }
1846
1847 static int
1848 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1849 {
1850         struct pci_dev *pdev = tp->pdev;
1851         void __iomem *ioaddr = tp->ioaddr;
1852         struct cmd_desc xp_cmd;
1853         int err;
1854
1855         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1856         xp_cmd.parm1 = events;
1857         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1858         if(err < 0) {
1859                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1860                                 tp->name, err);
1861                 return err;
1862         }
1863
1864         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1865         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1866         if(err < 0) {
1867                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1868                                 tp->name, err);
1869                 return err;
1870         }
1871
1872         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1873                 return -ETIMEDOUT;
1874
1875         /* Since we cannot monitor the status of the link while sleeping,
1876          * tell the world it went away.
1877          */
1878         netif_carrier_off(tp->dev);
1879
1880         pci_enable_wake(tp->pdev, state, 1);
1881         pci_disable_device(pdev);
1882         return pci_set_power_state(pdev, state);
1883 }
1884
1885 static int
1886 typhoon_wakeup(struct typhoon *tp, int wait_type)
1887 {
1888         struct pci_dev *pdev = tp->pdev;
1889         void __iomem *ioaddr = tp->ioaddr;
1890
1891         pci_set_power_state(pdev, PCI_D0);
1892         pci_restore_state(pdev);
1893
1894         /* Post 2.x.x versions of the Sleep Image require a reset before
1895          * we can download the Runtime Image. But let's not make users of
1896          * the old firmware pay for the reset.
1897          */
1898         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1899         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1900                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1901                 return typhoon_reset(ioaddr, wait_type);
1902
1903         return 0;
1904 }
1905
1906 static int
1907 typhoon_start_runtime(struct typhoon *tp)
1908 {
1909         struct net_device *dev = tp->dev;
1910         void __iomem *ioaddr = tp->ioaddr;
1911         struct cmd_desc xp_cmd;
1912         int err;
1913
1914         typhoon_init_rings(tp);
1915         typhoon_fill_free_ring(tp);
1916
1917         err = typhoon_download_firmware(tp);
1918         if(err < 0) {
1919                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1920                 goto error_out;
1921         }
1922
1923         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1924                 printk("%s: cannot boot 3XP\n", tp->name);
1925                 err = -EIO;
1926                 goto error_out;
1927         }
1928
1929         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1930         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1931         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1932         if(err < 0)
1933                 goto error_out;
1934
1935         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1936         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1937         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1938         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1939         if(err < 0)
1940                 goto error_out;
1941
1942         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1943          * us some more information on how to control it.
1944          */
1945         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1946         xp_cmd.parm1 = 0;
1947         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1948         if(err < 0)
1949                 goto error_out;
1950
1951         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1952         xp_cmd.parm1 = tp->xcvr_select;
1953         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1954         if(err < 0)
1955                 goto error_out;
1956
1957         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1958         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1959         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1960         if(err < 0)
1961                 goto error_out;
1962
1963         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1964         spin_lock_bh(&tp->state_lock);
1965         xp_cmd.parm2 = tp->offload;
1966         xp_cmd.parm3 = tp->offload;
1967         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1968         spin_unlock_bh(&tp->state_lock);
1969         if(err < 0)
1970                 goto error_out;
1971
1972         typhoon_set_rx_mode(dev);
1973
1974         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1975         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1976         if(err < 0)
1977                 goto error_out;
1978
1979         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1980         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1981         if(err < 0)
1982                 goto error_out;
1983
1984         tp->card_state = Running;
1985         smp_wmb();
1986
1987         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1988         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1989         typhoon_post_pci_writes(ioaddr);
1990
1991         return 0;
1992
1993 error_out:
1994         typhoon_reset(ioaddr, WaitNoSleep);
1995         typhoon_free_rx_rings(tp);
1996         typhoon_init_rings(tp);
1997         return err;
1998 }
1999
2000 static int
2001 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2002 {
2003         struct typhoon_indexes *indexes = tp->indexes;
2004         struct transmit_ring *txLo = &tp->txLoRing;
2005         void __iomem *ioaddr = tp->ioaddr;
2006         struct cmd_desc xp_cmd;
2007         int i;
2008
2009         /* Disable interrupts early, since we can't schedule a poll
2010          * when called with !netif_running(). This will be posted
2011          * when we force the posting of the command.
2012          */
2013         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2014
2015         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2016         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2017
2018         /* Wait 1/2 sec for any outstanding transmits to occur
2019          * We'll cleanup after the reset if this times out.
2020          */
2021         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2022                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2023                         break;
2024                 udelay(TYPHOON_UDELAY);
2025         }
2026
2027         if(i == TYPHOON_WAIT_TIMEOUT)
2028                 printk(KERN_ERR
2029                        "%s: halt timed out waiting for Tx to complete\n",
2030                        tp->name);
2031
2032         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2033         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2034
2035         /* save the statistics so when we bring the interface up again,
2036          * the values reported to userspace are correct.
2037          */
2038         tp->card_state = Sleeping;
2039         smp_wmb();
2040         typhoon_do_get_stats(tp);
2041         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2042
2043         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2044         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2045
2046         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2047                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2048                        tp->name);
2049
2050         if(typhoon_reset(ioaddr, wait_type) < 0) {
2051                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2052                 return -ETIMEDOUT;
2053         }
2054
2055         /* cleanup any outstanding Tx packets */
2056         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2057                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2058                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2059         }
2060
2061         return 0;
2062 }
2063
2064 static void
2065 typhoon_tx_timeout(struct net_device *dev)
2066 {
2067         struct typhoon *tp = netdev_priv(dev);
2068
2069         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2070                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2071                                         dev->name);
2072                 goto truely_dead;
2073         }
2074
2075         /* If we ever start using the Hi ring, it will need cleaning too */
2076         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2077         typhoon_free_rx_rings(tp);
2078
2079         if(typhoon_start_runtime(tp) < 0) {
2080                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2081                                         dev->name);
2082                 goto truely_dead;
2083         }
2084
2085         netif_wake_queue(dev);
2086         return;
2087
2088 truely_dead:
2089         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2090         typhoon_reset(tp->ioaddr, NoWait);
2091         netif_carrier_off(dev);
2092 }
2093
2094 static int
2095 typhoon_open(struct net_device *dev)
2096 {
2097         struct typhoon *tp = netdev_priv(dev);
2098         int err;
2099
2100         err = typhoon_request_firmware(tp);
2101         if (err)
2102                 goto out;
2103
2104         err = typhoon_wakeup(tp, WaitSleep);
2105         if(err < 0) {
2106                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2107                 goto out_sleep;
2108         }
2109
2110         err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED,
2111                                 dev->name, dev);
2112         if(err < 0)
2113                 goto out_sleep;
2114
2115         napi_enable(&tp->napi);
2116
2117         err = typhoon_start_runtime(tp);
2118         if(err < 0) {
2119                 napi_disable(&tp->napi);
2120                 goto out_irq;
2121         }
2122
2123         netif_start_queue(dev);
2124         return 0;
2125
2126 out_irq:
2127         free_irq(dev->irq, dev);
2128
2129 out_sleep:
2130         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2131                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2132                                 dev->name);
2133                 typhoon_reset(tp->ioaddr, NoWait);
2134                 goto out;
2135         }
2136
2137         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2138                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2139
2140 out:
2141         return err;
2142 }
2143
2144 static int
2145 typhoon_close(struct net_device *dev)
2146 {
2147         struct typhoon *tp = netdev_priv(dev);
2148
2149         netif_stop_queue(dev);
2150         napi_disable(&tp->napi);
2151
2152         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2153                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2154
2155         /* Make sure there is no irq handler running on a different CPU. */
2156         free_irq(dev->irq, dev);
2157
2158         typhoon_free_rx_rings(tp);
2159         typhoon_init_rings(tp);
2160
2161         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2162                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2163
2164         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2165                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2166
2167         return 0;
2168 }
2169
2170 #ifdef CONFIG_PM
2171 static int
2172 typhoon_resume(struct pci_dev *pdev)
2173 {
2174         struct net_device *dev = pci_get_drvdata(pdev);
2175         struct typhoon *tp = netdev_priv(dev);
2176
2177         /* If we're down, resume when we are upped.
2178          */
2179         if(!netif_running(dev))
2180                 return 0;
2181
2182         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2183                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2184                                 dev->name);
2185                 goto reset;
2186         }
2187
2188         if(typhoon_start_runtime(tp) < 0) {
2189                 printk(KERN_ERR "%s: critical: could not start runtime in "
2190                                 "resume\n", dev->name);
2191                 goto reset;
2192         }
2193
2194         netif_device_attach(dev);
2195         return 0;
2196
2197 reset:
2198         typhoon_reset(tp->ioaddr, NoWait);
2199         return -EBUSY;
2200 }
2201
2202 static int
2203 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2204 {
2205         struct net_device *dev = pci_get_drvdata(pdev);
2206         struct typhoon *tp = netdev_priv(dev);
2207         struct cmd_desc xp_cmd;
2208
2209         /* If we're down, we're already suspended.
2210          */
2211         if(!netif_running(dev))
2212                 return 0;
2213
2214         spin_lock_bh(&tp->state_lock);
2215         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2216                 spin_unlock_bh(&tp->state_lock);
2217                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2218                                 dev->name);
2219                 return -EBUSY;
2220         }
2221         spin_unlock_bh(&tp->state_lock);
2222
2223         netif_device_detach(dev);
2224
2225         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2226                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2227                 goto need_resume;
2228         }
2229
2230         typhoon_free_rx_rings(tp);
2231         typhoon_init_rings(tp);
2232
2233         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2234                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2235                 goto need_resume;
2236         }
2237
2238         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2239         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2240         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2241         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2242                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2243                                 dev->name);
2244                 goto need_resume;
2245         }
2246
2247         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2248         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2249         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2250                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2251                                 dev->name);
2252                 goto need_resume;
2253         }
2254
2255         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2256                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2257                 goto need_resume;
2258         }
2259
2260         return 0;
2261
2262 need_resume:
2263         typhoon_resume(pdev);
2264         return -EBUSY;
2265 }
2266 #endif
2267
2268 static int __devinit
2269 typhoon_test_mmio(struct pci_dev *pdev)
2270 {
2271         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2272         int mode = 0;
2273         u32 val;
2274
2275         if(!ioaddr)
2276                 goto out;
2277
2278         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2279                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2280                 goto out_unmap;
2281
2282         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2283         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2284         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2285
2286         /* Ok, see if we can change our interrupt status register by
2287          * sending ourselves an interrupt. If so, then MMIO works.
2288          * The 50usec delay is arbitrary -- it could probably be smaller.
2289          */
2290         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2291         if((val & TYPHOON_INTR_SELF) == 0) {
2292                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2293                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2294                 udelay(50);
2295                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2296                 if(val & TYPHOON_INTR_SELF)
2297                         mode = 1;
2298         }
2299
2300         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2301         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2302         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2303         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2304
2305 out_unmap:
2306         pci_iounmap(pdev, ioaddr);
2307
2308 out:
2309         if(!mode)
2310                 printk(KERN_INFO PFX "falling back to port IO\n");
2311         return mode;
2312 }
2313
2314 static const struct net_device_ops typhoon_netdev_ops = {
2315         .ndo_open               = typhoon_open,
2316         .ndo_stop               = typhoon_close,
2317         .ndo_start_xmit         = typhoon_start_tx,
2318         .ndo_set_multicast_list = typhoon_set_rx_mode,
2319         .ndo_tx_timeout         = typhoon_tx_timeout,
2320         .ndo_get_stats          = typhoon_get_stats,
2321         .ndo_validate_addr      = eth_validate_addr,
2322         .ndo_set_mac_address    = typhoon_set_mac_address,
2323         .ndo_change_mtu         = eth_change_mtu,
2324         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2325 };
2326
2327 static int __devinit
2328 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2329 {
2330         static int did_version = 0;
2331         struct net_device *dev;
2332         struct typhoon *tp;
2333         int card_id = (int) ent->driver_data;
2334         void __iomem *ioaddr;
2335         void *shared;
2336         dma_addr_t shared_dma;
2337         struct cmd_desc xp_cmd;
2338         struct resp_desc xp_resp[3];
2339         int err = 0;
2340
2341         if(!did_version++)
2342                 printk(KERN_INFO "%s", version);
2343
2344         dev = alloc_etherdev(sizeof(*tp));
2345         if(dev == NULL) {
2346                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2347                        pci_name(pdev));
2348                 err = -ENOMEM;
2349                 goto error_out;
2350         }
2351         SET_NETDEV_DEV(dev, &pdev->dev);
2352
2353         err = pci_enable_device(pdev);
2354         if(err < 0) {
2355                 printk(ERR_PFX "%s: unable to enable device\n",
2356                        pci_name(pdev));
2357                 goto error_out_dev;
2358         }
2359
2360         err = pci_set_mwi(pdev);
2361         if(err < 0) {
2362                 printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
2363                 goto error_out_disable;
2364         }
2365
2366         err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2367         if(err < 0) {
2368                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2369                        pci_name(pdev));
2370                 goto error_out_mwi;
2371         }
2372
2373         /* sanity checks on IO and MMIO BARs
2374          */
2375         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2376                 printk(ERR_PFX
2377                        "%s: region #1 not a PCI IO resource, aborting\n",
2378                        pci_name(pdev));
2379                 err = -ENODEV;
2380                 goto error_out_mwi;
2381         }
2382         if(pci_resource_len(pdev, 0) < 128) {
2383                 printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
2384                        pci_name(pdev));
2385                 err = -ENODEV;
2386                 goto error_out_mwi;
2387         }
2388         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2389                 printk(ERR_PFX
2390                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2391                        pci_name(pdev));
2392                 err = -ENODEV;
2393                 goto error_out_mwi;
2394         }
2395         if(pci_resource_len(pdev, 1) < 128) {
2396                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2397                        pci_name(pdev));
2398                 err = -ENODEV;
2399                 goto error_out_mwi;
2400         }
2401
2402         err = pci_request_regions(pdev, "typhoon");
2403         if(err < 0) {
2404                 printk(ERR_PFX "%s: could not request regions\n",
2405                        pci_name(pdev));
2406                 goto error_out_mwi;
2407         }
2408
2409         /* map our registers
2410          */
2411         if(use_mmio != 0 && use_mmio != 1)
2412                 use_mmio = typhoon_test_mmio(pdev);
2413
2414         ioaddr = pci_iomap(pdev, use_mmio, 128);
2415         if (!ioaddr) {
2416                 printk(ERR_PFX "%s: cannot remap registers, aborting\n",
2417                        pci_name(pdev));
2418                 err = -EIO;
2419                 goto error_out_regions;
2420         }
2421
2422         /* allocate pci dma space for rx and tx descriptor rings
2423          */
2424         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2425                                       &shared_dma);
2426         if(!shared) {
2427                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2428                        pci_name(pdev));
2429                 err = -ENOMEM;
2430                 goto error_out_remap;
2431         }
2432
2433         dev->irq = pdev->irq;
2434         tp = netdev_priv(dev);
2435         tp->shared = (struct typhoon_shared *) shared;
2436         tp->shared_dma = shared_dma;
2437         tp->pdev = pdev;
2438         tp->tx_pdev = pdev;
2439         tp->ioaddr = ioaddr;
2440         tp->tx_ioaddr = ioaddr;
2441         tp->dev = dev;
2442
2443         /* Init sequence:
2444          * 1) Reset the adapter to clear any bad juju
2445          * 2) Reload the sleep image
2446          * 3) Boot the sleep image
2447          * 4) Get the hardware address.
2448          * 5) Put the card to sleep.
2449          */
2450         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2451                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2452                 err = -EIO;
2453                 goto error_out_dma;
2454         }
2455
2456         /* Now that we've reset the 3XP and are sure it's not going to
2457          * write all over memory, enable bus mastering, and save our
2458          * state for resuming after a suspend.
2459          */
2460         pci_set_master(pdev);
2461         pci_save_state(pdev);
2462
2463         /* dev->name is not valid until we register, but we need to
2464          * use some common routines to initialize the card. So that those
2465          * routines print the right name, we keep our oun pointer to the name
2466          */
2467         tp->name = pci_name(pdev);
2468
2469         typhoon_init_interface(tp);
2470         typhoon_init_rings(tp);
2471
2472         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2473                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2474                        pci_name(pdev));
2475                 err = -EIO;
2476                 goto error_out_reset;
2477         }
2478
2479         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2480         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2481                 printk(ERR_PFX "%s: cannot read MAC address\n",
2482                        pci_name(pdev));
2483                 err = -EIO;
2484                 goto error_out_reset;
2485         }
2486
2487         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2488         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2489
2490         if(!is_valid_ether_addr(dev->dev_addr)) {
2491                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2492                        "aborting\n", pci_name(pdev));
2493                 goto error_out_reset;
2494         }
2495
2496         /* Read the Sleep Image version last, so the response is valid
2497          * later when we print out the version reported.
2498          */
2499         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2500         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2501                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2502                         pci_name(pdev));
2503                 goto error_out_reset;
2504         }
2505
2506         tp->capabilities = typhoon_card_info[card_id].capabilities;
2507         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2508
2509         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2510          * READ_VERSIONS command. Those versions are OK after waking up
2511          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2512          * seem to need a little extra help to get started. Since we don't
2513          * know how to nudge it along, just kick it.
2514          */
2515         if(xp_resp[0].numDesc != 0)
2516                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2517
2518         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2519                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2520                        pci_name(pdev));
2521                 err = -EIO;
2522                 goto error_out_reset;
2523         }
2524
2525         /* The chip-specific entries in the device structure. */
2526         dev->netdev_ops         = &typhoon_netdev_ops;
2527         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2528         dev->watchdog_timeo     = TX_TIMEOUT;
2529
2530         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2531
2532         /* We can handle scatter gather, up to 16 entries, and
2533          * we can do IP checksumming (only version 4, doh...)
2534          */
2535         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2536         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2537         dev->features |= NETIF_F_TSO;
2538
2539         if(register_netdev(dev) < 0)
2540                 goto error_out_reset;
2541
2542         /* fixup our local name */
2543         tp->name = dev->name;
2544
2545         pci_set_drvdata(pdev, dev);
2546
2547         printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2548                dev->name, typhoon_card_info[card_id].name,
2549                use_mmio ? "MMIO" : "IO",
2550                (unsigned long long)pci_resource_start(pdev, use_mmio),
2551                dev->dev_addr);
2552
2553         /* xp_resp still contains the response to the READ_VERSIONS command.
2554          * For debugging, let the user know what version he has.
2555          */
2556         if(xp_resp[0].numDesc == 0) {
2557                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2558                  * of version is Month/Day of build.
2559                  */
2560                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2561                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2562                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2563                         monthday & 0xff);
2564         } else if(xp_resp[0].numDesc == 2) {
2565                 /* This is the Typhoon 1.1+ type Sleep Image
2566                  */
2567                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2568                 u8 *ver_string = (u8 *) &xp_resp[1];
2569                 ver_string[25] = 0;
2570                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2571                         "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
2572                         (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
2573                         ver_string);
2574         } else {
2575                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2576                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2577                         le32_to_cpu(xp_resp[0].parm2));
2578         }
2579
2580         return 0;
2581
2582 error_out_reset:
2583         typhoon_reset(ioaddr, NoWait);
2584
2585 error_out_dma:
2586         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2587                             shared, shared_dma);
2588 error_out_remap:
2589         pci_iounmap(pdev, ioaddr);
2590 error_out_regions:
2591         pci_release_regions(pdev);
2592 error_out_mwi:
2593         pci_clear_mwi(pdev);
2594 error_out_disable:
2595         pci_disable_device(pdev);
2596 error_out_dev:
2597         free_netdev(dev);
2598 error_out:
2599         return err;
2600 }
2601
2602 static void __devexit
2603 typhoon_remove_one(struct pci_dev *pdev)
2604 {
2605         struct net_device *dev = pci_get_drvdata(pdev);
2606         struct typhoon *tp = netdev_priv(dev);
2607
2608         unregister_netdev(dev);
2609         pci_set_power_state(pdev, PCI_D0);
2610         pci_restore_state(pdev);
2611         typhoon_reset(tp->ioaddr, NoWait);
2612         pci_iounmap(pdev, tp->ioaddr);
2613         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2614                             tp->shared, tp->shared_dma);
2615         pci_release_regions(pdev);
2616         pci_clear_mwi(pdev);
2617         pci_disable_device(pdev);
2618         pci_set_drvdata(pdev, NULL);
2619         free_netdev(dev);
2620 }
2621
2622 static struct pci_driver typhoon_driver = {
2623         .name           = DRV_MODULE_NAME,
2624         .id_table       = typhoon_pci_tbl,
2625         .probe          = typhoon_init_one,
2626         .remove         = __devexit_p(typhoon_remove_one),
2627 #ifdef CONFIG_PM
2628         .suspend        = typhoon_suspend,
2629         .resume         = typhoon_resume,
2630 #endif
2631 };
2632
2633 static int __init
2634 typhoon_init(void)
2635 {
2636         return pci_register_driver(&typhoon_driver);
2637 }
2638
2639 static void __exit
2640 typhoon_cleanup(void)
2641 {
2642         if (typhoon_fw)
2643                 release_firmware(typhoon_fw);
2644         pci_unregister_driver(&typhoon_driver);
2645 }
2646
2647 module_init(typhoon_init);
2648 module_exit(typhoon_cleanup);