vmxnet3: make bit twiddle routines inline
[linux-2.6.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36
37         TODO:
38         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
39         *) Add more support for ethtool (especially for NIC stats)
40         *) Allow disabling of RX checksum offloading
41         *) Fix MAC changing to work while the interface is up
42                 (Need to put commands on the TX ring, which changes
43                 the locking)
44         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
45                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
46 */
47
48 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
49  * Setting to > 1518 effectively disables this feature.
50  */
51 static int rx_copybreak = 200;
52
53 /* Should we use MMIO or Port IO?
54  * 0: Port IO
55  * 1: MMIO
56  * 2: Try MMIO, fallback to Port IO
57  */
58 static unsigned int use_mmio = 2;
59
60 /* end user-configurable values */
61
62 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
63  */
64 static const int multicast_filter_limit = 32;
65
66 /* Operational parameters that are set at compile time. */
67
68 /* Keep the ring sizes a power of two for compile efficiency.
69  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
70  * Making the Tx ring too large decreases the effectiveness of channel
71  * bonding and packet priority.
72  * There are no ill effects from too-large receive rings.
73  *
74  * We don't currently use the Hi Tx ring so, don't make it very big.
75  *
76  * Beware that if we start using the Hi Tx ring, we will need to change
77  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
78  */
79 #define TXHI_ENTRIES            2
80 #define TXLO_ENTRIES            128
81 #define RX_ENTRIES              32
82 #define COMMAND_ENTRIES         16
83 #define RESPONSE_ENTRIES        32
84
85 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
86 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
87
88 /* The 3XP will preload and remove 64 entries from the free buffer
89  * list, and we need one entry to keep the ring from wrapping, so
90  * to keep this a power of two, we use 128 entries.
91  */
92 #define RXFREE_ENTRIES          128
93 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
94
95 /* Operational parameters that usually are not changed. */
96
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT  (2*HZ)
99
100 #define PKT_BUF_SZ              1536
101 #define FIRMWARE_NAME           "3com/typhoon.bin"
102
103 #define pr_fmt(fmt)             KBUILD_MODNAME " " fmt
104
105 #include <linux/module.h>
106 #include <linux/kernel.h>
107 #include <linux/sched.h>
108 #include <linux/string.h>
109 #include <linux/timer.h>
110 #include <linux/errno.h>
111 #include <linux/ioport.h>
112 #include <linux/interrupt.h>
113 #include <linux/pci.h>
114 #include <linux/netdevice.h>
115 #include <linux/etherdevice.h>
116 #include <linux/skbuff.h>
117 #include <linux/mm.h>
118 #include <linux/init.h>
119 #include <linux/delay.h>
120 #include <linux/ethtool.h>
121 #include <linux/if_vlan.h>
122 #include <linux/crc32.h>
123 #include <linux/bitops.h>
124 #include <asm/processor.h>
125 #include <asm/io.h>
126 #include <asm/uaccess.h>
127 #include <linux/in6.h>
128 #include <linux/dma-mapping.h>
129 #include <linux/firmware.h>
130 #include <generated/utsrelease.h>
131
132 #include "typhoon.h"
133
134 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
135 MODULE_VERSION(UTS_RELEASE);
136 MODULE_LICENSE("GPL");
137 MODULE_FIRMWARE(FIRMWARE_NAME);
138 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
139 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
140                                "the buffer given back to the NIC. Default "
141                                "is 200.");
142 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
143                            "Default is to try MMIO and fallback to PIO.");
144 module_param(rx_copybreak, int, 0);
145 module_param(use_mmio, int, 0);
146
147 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
148 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
149 #undef NETIF_F_TSO
150 #endif
151
152 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
153 #error TX ring too small!
154 #endif
155
156 struct typhoon_card_info {
157         const char *name;
158         const int capabilities;
159 };
160
161 #define TYPHOON_CRYPTO_NONE             0x00
162 #define TYPHOON_CRYPTO_DES              0x01
163 #define TYPHOON_CRYPTO_3DES             0x02
164 #define TYPHOON_CRYPTO_VARIABLE         0x04
165 #define TYPHOON_FIBER                   0x08
166 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
167
168 enum typhoon_cards {
169         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
170         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
171         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
172         TYPHOON_FXM,
173 };
174
175 /* directly indexed by enum typhoon_cards, above */
176 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
177         { "3Com Typhoon (3C990-TX)",
178                 TYPHOON_CRYPTO_NONE},
179         { "3Com Typhoon (3CR990-TX-95)",
180                 TYPHOON_CRYPTO_DES},
181         { "3Com Typhoon (3CR990-TX-97)",
182                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
183         { "3Com Typhoon (3C990SVR)",
184                 TYPHOON_CRYPTO_NONE},
185         { "3Com Typhoon (3CR990SVR95)",
186                 TYPHOON_CRYPTO_DES},
187         { "3Com Typhoon (3CR990SVR97)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
189         { "3Com Typhoon2 (3C990B-TX-M)",
190                 TYPHOON_CRYPTO_VARIABLE},
191         { "3Com Typhoon2 (3C990BSVR)",
192                 TYPHOON_CRYPTO_VARIABLE},
193         { "3Com Typhoon (3CR990-FX-95)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
195         { "3Com Typhoon (3CR990-FX-97)",
196                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
197         { "3Com Typhoon (3CR990-FX-95 Server)",
198                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
199         { "3Com Typhoon (3CR990-FX-97 Server)",
200                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
201         { "3Com Typhoon2 (3C990B-FX-97)",
202                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
203 };
204
205 /* Notes on the new subsystem numbering scheme:
206  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
207  * bit 4 indicates if this card has secured firmware (we don't support it)
208  * bit 8 indicates if this is a (0) copper or (1) fiber card
209  * bits 12-16 indicate card type: (0) client and (1) server
210  */
211 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
212         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
214         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
216         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
219           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
221           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
223           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
227           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
229           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
231           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
234         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
235           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
236         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
237           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
238         { 0, }
239 };
240 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
241
242 /* Define the shared memory area
243  * Align everything the 3XP will normally be using.
244  * We'll need to move/align txHi if we start using that ring.
245  */
246 #define __3xp_aligned   ____cacheline_aligned
247 struct typhoon_shared {
248         struct typhoon_interface        iface;
249         struct typhoon_indexes          indexes                 __3xp_aligned;
250         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
251         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
252         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
253         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
254         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
255         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
256         u32                             zeroWord;
257         struct tx_desc                  txHi[TXHI_ENTRIES];
258 } __packed;
259
260 struct rxbuff_ent {
261         struct sk_buff *skb;
262         dma_addr_t      dma_addr;
263 };
264
265 struct typhoon {
266         /* Tx cache line section */
267         struct transmit_ring    txLoRing        ____cacheline_aligned;
268         struct pci_dev *        tx_pdev;
269         void __iomem            *tx_ioaddr;
270         u32                     txlo_dma_addr;
271
272         /* Irq/Rx cache line section */
273         void __iomem            *ioaddr         ____cacheline_aligned;
274         struct typhoon_indexes *indexes;
275         u8                      awaiting_resp;
276         u8                      duplex;
277         u8                      speed;
278         u8                      card_state;
279         struct basic_ring       rxLoRing;
280         struct pci_dev *        pdev;
281         struct net_device *     dev;
282         struct napi_struct      napi;
283         spinlock_t              state_lock;
284         struct vlan_group *     vlgrp;
285         struct basic_ring       rxHiRing;
286         struct basic_ring       rxBuffRing;
287         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
288
289         /* general section */
290         spinlock_t              command_lock    ____cacheline_aligned;
291         struct basic_ring       cmdRing;
292         struct basic_ring       respRing;
293         struct net_device_stats stats;
294         struct net_device_stats stats_saved;
295         struct typhoon_shared * shared;
296         dma_addr_t              shared_dma;
297         __le16                  xcvr_select;
298         __le16                  wol_events;
299         __le32                  offload;
300
301         /* unused stuff (future use) */
302         int                     capabilities;
303         struct transmit_ring    txHiRing;
304 };
305
306 enum completion_wait_values {
307         NoWait = 0, WaitNoSleep, WaitSleep,
308 };
309
310 /* These are the values for the typhoon.card_state variable.
311  * These determine where the statistics will come from in get_stats().
312  * The sleep image does not support the statistics we need.
313  */
314 enum state_values {
315         Sleeping = 0, Running,
316 };
317
318 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
319  * cannot pass a read, so this forces current writes to post.
320  */
321 #define typhoon_post_pci_writes(x) \
322         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
323
324 /* We'll wait up to six seconds for a reset, and half a second normally.
325  */
326 #define TYPHOON_UDELAY                  50
327 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
328 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
329 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
330
331 #if defined(NETIF_F_TSO)
332 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
333 #define TSO_NUM_DESCRIPTORS     2
334 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
335 #else
336 #define NETIF_F_TSO             0
337 #define skb_tso_size(x)         0
338 #define TSO_NUM_DESCRIPTORS     0
339 #define TSO_OFFLOAD_ON          0
340 #endif
341
342 static inline void
343 typhoon_inc_index(u32 *index, const int count, const int num_entries)
344 {
345         /* Increment a ring index -- we can use this for all rings execept
346          * the Rx rings, as they use different size descriptors
347          * otherwise, everything is the same size as a cmd_desc
348          */
349         *index += count * sizeof(struct cmd_desc);
350         *index %= num_entries * sizeof(struct cmd_desc);
351 }
352
353 static inline void
354 typhoon_inc_cmd_index(u32 *index, const int count)
355 {
356         typhoon_inc_index(index, count, COMMAND_ENTRIES);
357 }
358
359 static inline void
360 typhoon_inc_resp_index(u32 *index, const int count)
361 {
362         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
363 }
364
365 static inline void
366 typhoon_inc_rxfree_index(u32 *index, const int count)
367 {
368         typhoon_inc_index(index, count, RXFREE_ENTRIES);
369 }
370
371 static inline void
372 typhoon_inc_tx_index(u32 *index, const int count)
373 {
374         /* if we start using the Hi Tx ring, this needs updateing */
375         typhoon_inc_index(index, count, TXLO_ENTRIES);
376 }
377
378 static inline void
379 typhoon_inc_rx_index(u32 *index, const int count)
380 {
381         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
382         *index += count * sizeof(struct rx_desc);
383         *index %= RX_ENTRIES * sizeof(struct rx_desc);
384 }
385
386 static int
387 typhoon_reset(void __iomem *ioaddr, int wait_type)
388 {
389         int i, err = 0;
390         int timeout;
391
392         if(wait_type == WaitNoSleep)
393                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
394         else
395                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
396
397         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
398         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
399
400         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
401         typhoon_post_pci_writes(ioaddr);
402         udelay(1);
403         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
404
405         if(wait_type != NoWait) {
406                 for(i = 0; i < timeout; i++) {
407                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
408                            TYPHOON_STATUS_WAITING_FOR_HOST)
409                                 goto out;
410
411                         if(wait_type == WaitSleep)
412                                 schedule_timeout_uninterruptible(1);
413                         else
414                                 udelay(TYPHOON_UDELAY);
415                 }
416
417                 err = -ETIMEDOUT;
418         }
419
420 out:
421         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
422         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
423
424         /* The 3XP seems to need a little extra time to complete the load
425          * of the sleep image before we can reliably boot it. Failure to
426          * do this occasionally results in a hung adapter after boot in
427          * typhoon_init_one() while trying to read the MAC address or
428          * putting the card to sleep. 3Com's driver waits 5ms, but
429          * that seems to be overkill. However, if we can sleep, we might
430          * as well give it that much time. Otherwise, we'll give it 500us,
431          * which should be enough (I've see it work well at 100us, but still
432          * saw occasional problems.)
433          */
434         if(wait_type == WaitSleep)
435                 msleep(5);
436         else
437                 udelay(500);
438         return err;
439 }
440
441 static int
442 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
443 {
444         int i, err = 0;
445
446         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
447                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
448                         goto out;
449                 udelay(TYPHOON_UDELAY);
450         }
451
452         err = -ETIMEDOUT;
453
454 out:
455         return err;
456 }
457
458 static inline void
459 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
460 {
461         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
462                 netif_carrier_off(dev);
463         else
464                 netif_carrier_on(dev);
465 }
466
467 static inline void
468 typhoon_hello(struct typhoon *tp)
469 {
470         struct basic_ring *ring = &tp->cmdRing;
471         struct cmd_desc *cmd;
472
473         /* We only get a hello request if we've not sent anything to the
474          * card in a long while. If the lock is held, then we're in the
475          * process of issuing a command, so we don't need to respond.
476          */
477         if(spin_trylock(&tp->command_lock)) {
478                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
479                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
480
481                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
482                 wmb();
483                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
484                 spin_unlock(&tp->command_lock);
485         }
486 }
487
488 static int
489 typhoon_process_response(struct typhoon *tp, int resp_size,
490                                 struct resp_desc *resp_save)
491 {
492         struct typhoon_indexes *indexes = tp->indexes;
493         struct resp_desc *resp;
494         u8 *base = tp->respRing.ringBase;
495         int count, len, wrap_len;
496         u32 cleared;
497         u32 ready;
498
499         cleared = le32_to_cpu(indexes->respCleared);
500         ready = le32_to_cpu(indexes->respReady);
501         while(cleared != ready) {
502                 resp = (struct resp_desc *)(base + cleared);
503                 count = resp->numDesc + 1;
504                 if(resp_save && resp->seqNo) {
505                         if(count > resp_size) {
506                                 resp_save->flags = TYPHOON_RESP_ERROR;
507                                 goto cleanup;
508                         }
509
510                         wrap_len = 0;
511                         len = count * sizeof(*resp);
512                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
513                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
514                                 len = RESPONSE_RING_SIZE - cleared;
515                         }
516
517                         memcpy(resp_save, resp, len);
518                         if(unlikely(wrap_len)) {
519                                 resp_save += len / sizeof(*resp);
520                                 memcpy(resp_save, base, wrap_len);
521                         }
522
523                         resp_save = NULL;
524                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
525                         typhoon_media_status(tp->dev, resp);
526                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
527                         typhoon_hello(tp);
528                 } else {
529                         netdev_err(tp->dev,
530                                    "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
531                                    le16_to_cpu(resp->cmd),
532                                    resp->numDesc, resp->flags,
533                                    le16_to_cpu(resp->parm1),
534                                    le32_to_cpu(resp->parm2),
535                                    le32_to_cpu(resp->parm3));
536                 }
537
538 cleanup:
539                 typhoon_inc_resp_index(&cleared, count);
540         }
541
542         indexes->respCleared = cpu_to_le32(cleared);
543         wmb();
544         return resp_save == NULL;
545 }
546
547 static inline int
548 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
549 {
550         /* this works for all descriptors but rx_desc, as they are a
551          * different size than the cmd_desc -- everyone else is the same
552          */
553         lastWrite /= sizeof(struct cmd_desc);
554         lastRead /= sizeof(struct cmd_desc);
555         return (ringSize + lastRead - lastWrite - 1) % ringSize;
556 }
557
558 static inline int
559 typhoon_num_free_cmd(struct typhoon *tp)
560 {
561         int lastWrite = tp->cmdRing.lastWrite;
562         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
563
564         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
565 }
566
567 static inline int
568 typhoon_num_free_resp(struct typhoon *tp)
569 {
570         int respReady = le32_to_cpu(tp->indexes->respReady);
571         int respCleared = le32_to_cpu(tp->indexes->respCleared);
572
573         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
574 }
575
576 static inline int
577 typhoon_num_free_tx(struct transmit_ring *ring)
578 {
579         /* if we start using the Hi Tx ring, this needs updating */
580         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
581 }
582
583 static int
584 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
585                       int num_resp, struct resp_desc *resp)
586 {
587         struct typhoon_indexes *indexes = tp->indexes;
588         struct basic_ring *ring = &tp->cmdRing;
589         struct resp_desc local_resp;
590         int i, err = 0;
591         int got_resp;
592         int freeCmd, freeResp;
593         int len, wrap_len;
594
595         spin_lock(&tp->command_lock);
596
597         freeCmd = typhoon_num_free_cmd(tp);
598         freeResp = typhoon_num_free_resp(tp);
599
600         if(freeCmd < num_cmd || freeResp < num_resp) {
601                 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
602                            freeCmd, num_cmd, freeResp, num_resp);
603                 err = -ENOMEM;
604                 goto out;
605         }
606
607         if(cmd->flags & TYPHOON_CMD_RESPOND) {
608                 /* If we're expecting a response, but the caller hasn't given
609                  * us a place to put it, we'll provide one.
610                  */
611                 tp->awaiting_resp = 1;
612                 if(resp == NULL) {
613                         resp = &local_resp;
614                         num_resp = 1;
615                 }
616         }
617
618         wrap_len = 0;
619         len = num_cmd * sizeof(*cmd);
620         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
621                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
622                 len = COMMAND_RING_SIZE - ring->lastWrite;
623         }
624
625         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
626         if(unlikely(wrap_len)) {
627                 struct cmd_desc *wrap_ptr = cmd;
628                 wrap_ptr += len / sizeof(*cmd);
629                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
630         }
631
632         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
633
634         /* "I feel a presence... another warrior is on the mesa."
635          */
636         wmb();
637         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
638         typhoon_post_pci_writes(tp->ioaddr);
639
640         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
641                 goto out;
642
643         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
644          * preempt or do anything other than take interrupts. So, don't
645          * wait for a response unless you have to.
646          *
647          * I've thought about trying to sleep here, but we're called
648          * from many contexts that don't allow that. Also, given the way
649          * 3Com has implemented irq coalescing, we would likely timeout --
650          * this has been observed in real life!
651          *
652          * The big killer is we have to wait to get stats from the card,
653          * though we could go to a periodic refresh of those if we don't
654          * mind them getting somewhat stale. The rest of the waiting
655          * commands occur during open/close/suspend/resume, so they aren't
656          * time critical. Creating SAs in the future will also have to
657          * wait here.
658          */
659         got_resp = 0;
660         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
661                 if(indexes->respCleared != indexes->respReady)
662                         got_resp = typhoon_process_response(tp, num_resp,
663                                                                 resp);
664                 udelay(TYPHOON_UDELAY);
665         }
666
667         if(!got_resp) {
668                 err = -ETIMEDOUT;
669                 goto out;
670         }
671
672         /* Collect the error response even if we don't care about the
673          * rest of the response
674          */
675         if(resp->flags & TYPHOON_RESP_ERROR)
676                 err = -EIO;
677
678 out:
679         if(tp->awaiting_resp) {
680                 tp->awaiting_resp = 0;
681                 smp_wmb();
682
683                 /* Ugh. If a response was added to the ring between
684                  * the call to typhoon_process_response() and the clearing
685                  * of tp->awaiting_resp, we could have missed the interrupt
686                  * and it could hang in the ring an indeterminate amount of
687                  * time. So, check for it, and interrupt ourselves if this
688                  * is the case.
689                  */
690                 if(indexes->respCleared != indexes->respReady)
691                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
692         }
693
694         spin_unlock(&tp->command_lock);
695         return err;
696 }
697
698 static void
699 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
700 {
701         struct typhoon *tp = netdev_priv(dev);
702         struct cmd_desc xp_cmd;
703         int err;
704
705         spin_lock_bh(&tp->state_lock);
706         if(!tp->vlgrp != !grp) {
707                 /* We've either been turned on for the first time, or we've
708                  * been turned off. Update the 3XP.
709                  */
710                 if(grp)
711                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
712                 else
713                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
714
715                 /* If the interface is up, the runtime is running -- and we
716                  * must be up for the vlan core to call us.
717                  *
718                  * Do the command outside of the spin lock, as it is slow.
719                  */
720                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
721                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
722                 xp_cmd.parm2 = tp->offload;
723                 xp_cmd.parm3 = tp->offload;
724                 spin_unlock_bh(&tp->state_lock);
725                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
726                 if(err < 0)
727                         netdev_err(tp->dev, "vlan offload error %d\n", -err);
728                 spin_lock_bh(&tp->state_lock);
729         }
730
731         /* now make the change visible */
732         tp->vlgrp = grp;
733         spin_unlock_bh(&tp->state_lock);
734 }
735
736 static inline void
737 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738                         u32 ring_dma)
739 {
740         struct tcpopt_desc *tcpd;
741         u32 tcpd_offset = ring_dma;
742
743         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
744         tcpd_offset += txRing->lastWrite;
745         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
746         typhoon_inc_tx_index(&txRing->lastWrite, 1);
747
748         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
749         tcpd->numDesc = 1;
750         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
751         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
752         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
753         tcpd->bytesTx = cpu_to_le32(skb->len);
754         tcpd->status = 0;
755 }
756
757 static netdev_tx_t
758 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
759 {
760         struct typhoon *tp = netdev_priv(dev);
761         struct transmit_ring *txRing;
762         struct tx_desc *txd, *first_txd;
763         dma_addr_t skb_dma;
764         int numDesc;
765
766         /* we have two rings to choose from, but we only use txLo for now
767          * If we start using the Hi ring as well, we'll need to update
768          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
769          * and TXHI_ENTRIES to match, as well as update the TSO code below
770          * to get the right DMA address
771          */
772         txRing = &tp->txLoRing;
773
774         /* We need one descriptor for each fragment of the sk_buff, plus the
775          * one for the ->data area of it.
776          *
777          * The docs say a maximum of 16 fragment descriptors per TCP option
778          * descriptor, then make a new packet descriptor and option descriptor
779          * for the next 16 fragments. The engineers say just an option
780          * descriptor is needed. I've tested up to 26 fragments with a single
781          * packet descriptor/option descriptor combo, so I use that for now.
782          *
783          * If problems develop with TSO, check this first.
784          */
785         numDesc = skb_shinfo(skb)->nr_frags + 1;
786         if (skb_is_gso(skb))
787                 numDesc++;
788
789         /* When checking for free space in the ring, we need to also
790          * account for the initial Tx descriptor, and we always must leave
791          * at least one descriptor unused in the ring so that it doesn't
792          * wrap and look empty.
793          *
794          * The only time we should loop here is when we hit the race
795          * between marking the queue awake and updating the cleared index.
796          * Just loop and it will appear. This comes from the acenic driver.
797          */
798         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
799                 smp_rmb();
800
801         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
802         typhoon_inc_tx_index(&txRing->lastWrite, 1);
803
804         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
805         first_txd->numDesc = 0;
806         first_txd->len = 0;
807         first_txd->tx_addr = (u64)((unsigned long) skb);
808         first_txd->processFlags = 0;
809
810         if(skb->ip_summed == CHECKSUM_PARTIAL) {
811                 /* The 3XP will figure out if this is UDP/TCP */
812                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
813                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
814                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
815         }
816
817         if(vlan_tx_tag_present(skb)) {
818                 first_txd->processFlags |=
819                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
820                 first_txd->processFlags |=
821                     cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
822                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
823         }
824
825         if (skb_is_gso(skb)) {
826                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
827                 first_txd->numDesc++;
828
829                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
830         }
831
832         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
833         typhoon_inc_tx_index(&txRing->lastWrite, 1);
834
835         /* No need to worry about padding packet -- the firmware pads
836          * it with zeros to ETH_ZLEN for us.
837          */
838         if(skb_shinfo(skb)->nr_frags == 0) {
839                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
840                                        PCI_DMA_TODEVICE);
841                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
842                 txd->len = cpu_to_le16(skb->len);
843                 txd->frag.addr = cpu_to_le32(skb_dma);
844                 txd->frag.addrHi = 0;
845                 first_txd->numDesc++;
846         } else {
847                 int i, len;
848
849                 len = skb_headlen(skb);
850                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
851                                          PCI_DMA_TODEVICE);
852                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
853                 txd->len = cpu_to_le16(len);
854                 txd->frag.addr = cpu_to_le32(skb_dma);
855                 txd->frag.addrHi = 0;
856                 first_txd->numDesc++;
857
858                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
859                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
860                         void *frag_addr;
861
862                         txd = (struct tx_desc *) (txRing->ringBase +
863                                                 txRing->lastWrite);
864                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
865
866                         len = frag->size;
867                         frag_addr = (void *) page_address(frag->page) +
868                                                 frag->page_offset;
869                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
870                                          PCI_DMA_TODEVICE);
871                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
872                         txd->len = cpu_to_le16(len);
873                         txd->frag.addr = cpu_to_le32(skb_dma);
874                         txd->frag.addrHi = 0;
875                         first_txd->numDesc++;
876                 }
877         }
878
879         /* Kick the 3XP
880          */
881         wmb();
882         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
883
884         /* If we don't have room to put the worst case packet on the
885          * queue, then we must stop the queue. We need 2 extra
886          * descriptors -- one to prevent ring wrap, and one for the
887          * Tx header.
888          */
889         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
890
891         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
892                 netif_stop_queue(dev);
893
894                 /* A Tx complete IRQ could have gotten inbetween, making
895                  * the ring free again. Only need to recheck here, since
896                  * Tx is serialized.
897                  */
898                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
899                         netif_wake_queue(dev);
900         }
901
902         return NETDEV_TX_OK;
903 }
904
905 static void
906 typhoon_set_rx_mode(struct net_device *dev)
907 {
908         struct typhoon *tp = netdev_priv(dev);
909         struct cmd_desc xp_cmd;
910         u32 mc_filter[2];
911         __le16 filter;
912
913         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
914         if(dev->flags & IFF_PROMISC) {
915                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
916         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
917                   (dev->flags & IFF_ALLMULTI)) {
918                 /* Too many to match, or accept all multicasts. */
919                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
920         } else if (!netdev_mc_empty(dev)) {
921                 struct netdev_hw_addr *ha;
922
923                 memset(mc_filter, 0, sizeof(mc_filter));
924                 netdev_for_each_mc_addr(ha, dev) {
925                         int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
926                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
927                 }
928
929                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
930                                          TYPHOON_CMD_SET_MULTICAST_HASH);
931                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
932                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
933                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
934                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
935
936                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
937         }
938
939         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
940         xp_cmd.parm1 = filter;
941         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
942 }
943
944 static int
945 typhoon_do_get_stats(struct typhoon *tp)
946 {
947         struct net_device_stats *stats = &tp->stats;
948         struct net_device_stats *saved = &tp->stats_saved;
949         struct cmd_desc xp_cmd;
950         struct resp_desc xp_resp[7];
951         struct stats_resp *s = (struct stats_resp *) xp_resp;
952         int err;
953
954         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
955         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
956         if(err < 0)
957                 return err;
958
959         /* 3Com's Linux driver uses txMultipleCollisions as it's
960          * collisions value, but there is some other collision info as well...
961          *
962          * The extra status reported would be a good candidate for
963          * ethtool_ops->get_{strings,stats}()
964          */
965         stats->tx_packets = le32_to_cpu(s->txPackets) +
966                         saved->tx_packets;
967         stats->tx_bytes = le64_to_cpu(s->txBytes) +
968                         saved->tx_bytes;
969         stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
970                         saved->tx_errors;
971         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
972                         saved->tx_carrier_errors;
973         stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
974                         saved->collisions;
975         stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
976                         saved->rx_packets;
977         stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
978                         saved->rx_bytes;
979         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
980                         saved->rx_fifo_errors;
981         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
982                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
983                         saved->rx_errors;
984         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
985                         saved->rx_crc_errors;
986         stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
987                         saved->rx_length_errors;
988         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
989                         SPEED_100 : SPEED_10;
990         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
991                         DUPLEX_FULL : DUPLEX_HALF;
992
993         return 0;
994 }
995
996 static struct net_device_stats *
997 typhoon_get_stats(struct net_device *dev)
998 {
999         struct typhoon *tp = netdev_priv(dev);
1000         struct net_device_stats *stats = &tp->stats;
1001         struct net_device_stats *saved = &tp->stats_saved;
1002
1003         smp_rmb();
1004         if(tp->card_state == Sleeping)
1005                 return saved;
1006
1007         if(typhoon_do_get_stats(tp) < 0) {
1008                 netdev_err(dev, "error getting stats\n");
1009                 return saved;
1010         }
1011
1012         return stats;
1013 }
1014
1015 static int
1016 typhoon_set_mac_address(struct net_device *dev, void *addr)
1017 {
1018         struct sockaddr *saddr = (struct sockaddr *) addr;
1019
1020         if(netif_running(dev))
1021                 return -EBUSY;
1022
1023         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1024         return 0;
1025 }
1026
1027 static void
1028 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1029 {
1030         struct typhoon *tp = netdev_priv(dev);
1031         struct pci_dev *pci_dev = tp->pdev;
1032         struct cmd_desc xp_cmd;
1033         struct resp_desc xp_resp[3];
1034
1035         smp_rmb();
1036         if(tp->card_state == Sleeping) {
1037                 strcpy(info->fw_version, "Sleep image");
1038         } else {
1039                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1040                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1041                         strcpy(info->fw_version, "Unknown runtime");
1042                 } else {
1043                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1044                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1045                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1046                                  sleep_ver & 0xfff);
1047                 }
1048         }
1049
1050         strcpy(info->driver, KBUILD_MODNAME);
1051         strcpy(info->version, UTS_RELEASE);
1052         strcpy(info->bus_info, pci_name(pci_dev));
1053 }
1054
1055 static int
1056 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1057 {
1058         struct typhoon *tp = netdev_priv(dev);
1059
1060         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1061                                 SUPPORTED_Autoneg;
1062
1063         switch (tp->xcvr_select) {
1064         case TYPHOON_XCVR_10HALF:
1065                 cmd->advertising = ADVERTISED_10baseT_Half;
1066                 break;
1067         case TYPHOON_XCVR_10FULL:
1068                 cmd->advertising = ADVERTISED_10baseT_Full;
1069                 break;
1070         case TYPHOON_XCVR_100HALF:
1071                 cmd->advertising = ADVERTISED_100baseT_Half;
1072                 break;
1073         case TYPHOON_XCVR_100FULL:
1074                 cmd->advertising = ADVERTISED_100baseT_Full;
1075                 break;
1076         case TYPHOON_XCVR_AUTONEG:
1077                 cmd->advertising = ADVERTISED_10baseT_Half |
1078                                             ADVERTISED_10baseT_Full |
1079                                             ADVERTISED_100baseT_Half |
1080                                             ADVERTISED_100baseT_Full |
1081                                             ADVERTISED_Autoneg;
1082                 break;
1083         }
1084
1085         if(tp->capabilities & TYPHOON_FIBER) {
1086                 cmd->supported |= SUPPORTED_FIBRE;
1087                 cmd->advertising |= ADVERTISED_FIBRE;
1088                 cmd->port = PORT_FIBRE;
1089         } else {
1090                 cmd->supported |= SUPPORTED_10baseT_Half |
1091                                         SUPPORTED_10baseT_Full |
1092                                         SUPPORTED_TP;
1093                 cmd->advertising |= ADVERTISED_TP;
1094                 cmd->port = PORT_TP;
1095         }
1096
1097         /* need to get stats to make these link speed/duplex valid */
1098         typhoon_do_get_stats(tp);
1099         cmd->speed = tp->speed;
1100         cmd->duplex = tp->duplex;
1101         cmd->phy_address = 0;
1102         cmd->transceiver = XCVR_INTERNAL;
1103         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1104                 cmd->autoneg = AUTONEG_ENABLE;
1105         else
1106                 cmd->autoneg = AUTONEG_DISABLE;
1107         cmd->maxtxpkt = 1;
1108         cmd->maxrxpkt = 1;
1109
1110         return 0;
1111 }
1112
1113 static int
1114 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1115 {
1116         struct typhoon *tp = netdev_priv(dev);
1117         struct cmd_desc xp_cmd;
1118         __le16 xcvr;
1119         int err;
1120
1121         err = -EINVAL;
1122         if(cmd->autoneg == AUTONEG_ENABLE) {
1123                 xcvr = TYPHOON_XCVR_AUTONEG;
1124         } else {
1125                 if(cmd->duplex == DUPLEX_HALF) {
1126                         if(cmd->speed == SPEED_10)
1127                                 xcvr = TYPHOON_XCVR_10HALF;
1128                         else if(cmd->speed == SPEED_100)
1129                                 xcvr = TYPHOON_XCVR_100HALF;
1130                         else
1131                                 goto out;
1132                 } else if(cmd->duplex == DUPLEX_FULL) {
1133                         if(cmd->speed == SPEED_10)
1134                                 xcvr = TYPHOON_XCVR_10FULL;
1135                         else if(cmd->speed == SPEED_100)
1136                                 xcvr = TYPHOON_XCVR_100FULL;
1137                         else
1138                                 goto out;
1139                 } else
1140                         goto out;
1141         }
1142
1143         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1144         xp_cmd.parm1 = xcvr;
1145         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1146         if(err < 0)
1147                 goto out;
1148
1149         tp->xcvr_select = xcvr;
1150         if(cmd->autoneg == AUTONEG_ENABLE) {
1151                 tp->speed = 0xff;       /* invalid */
1152                 tp->duplex = 0xff;      /* invalid */
1153         } else {
1154                 tp->speed = cmd->speed;
1155                 tp->duplex = cmd->duplex;
1156         }
1157
1158 out:
1159         return err;
1160 }
1161
1162 static void
1163 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1164 {
1165         struct typhoon *tp = netdev_priv(dev);
1166
1167         wol->supported = WAKE_PHY | WAKE_MAGIC;
1168         wol->wolopts = 0;
1169         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1170                 wol->wolopts |= WAKE_PHY;
1171         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1172                 wol->wolopts |= WAKE_MAGIC;
1173         memset(&wol->sopass, 0, sizeof(wol->sopass));
1174 }
1175
1176 static int
1177 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1178 {
1179         struct typhoon *tp = netdev_priv(dev);
1180
1181         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1182                 return -EINVAL;
1183
1184         tp->wol_events = 0;
1185         if(wol->wolopts & WAKE_PHY)
1186                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1187         if(wol->wolopts & WAKE_MAGIC)
1188                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1189
1190         return 0;
1191 }
1192
1193 static u32
1194 typhoon_get_rx_csum(struct net_device *dev)
1195 {
1196         /* For now, we don't allow turning off RX checksums.
1197          */
1198         return 1;
1199 }
1200
1201 static void
1202 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1203 {
1204         ering->rx_max_pending = RXENT_ENTRIES;
1205         ering->rx_mini_max_pending = 0;
1206         ering->rx_jumbo_max_pending = 0;
1207         ering->tx_max_pending = TXLO_ENTRIES - 1;
1208
1209         ering->rx_pending = RXENT_ENTRIES;
1210         ering->rx_mini_pending = 0;
1211         ering->rx_jumbo_pending = 0;
1212         ering->tx_pending = TXLO_ENTRIES - 1;
1213 }
1214
1215 static const struct ethtool_ops typhoon_ethtool_ops = {
1216         .get_settings           = typhoon_get_settings,
1217         .set_settings           = typhoon_set_settings,
1218         .get_drvinfo            = typhoon_get_drvinfo,
1219         .get_wol                = typhoon_get_wol,
1220         .set_wol                = typhoon_set_wol,
1221         .get_link               = ethtool_op_get_link,
1222         .get_rx_csum            = typhoon_get_rx_csum,
1223         .set_tx_csum            = ethtool_op_set_tx_csum,
1224         .set_sg                 = ethtool_op_set_sg,
1225         .set_tso                = ethtool_op_set_tso,
1226         .get_ringparam          = typhoon_get_ringparam,
1227 };
1228
1229 static int
1230 typhoon_wait_interrupt(void __iomem *ioaddr)
1231 {
1232         int i, err = 0;
1233
1234         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1235                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1236                    TYPHOON_INTR_BOOTCMD)
1237                         goto out;
1238                 udelay(TYPHOON_UDELAY);
1239         }
1240
1241         err = -ETIMEDOUT;
1242
1243 out:
1244         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1245         return err;
1246 }
1247
1248 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1249
1250 static void
1251 typhoon_init_interface(struct typhoon *tp)
1252 {
1253         struct typhoon_interface *iface = &tp->shared->iface;
1254         dma_addr_t shared_dma;
1255
1256         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1257
1258         /* The *Hi members of iface are all init'd to zero by the memset().
1259          */
1260         shared_dma = tp->shared_dma + shared_offset(indexes);
1261         iface->ringIndex = cpu_to_le32(shared_dma);
1262
1263         shared_dma = tp->shared_dma + shared_offset(txLo);
1264         iface->txLoAddr = cpu_to_le32(shared_dma);
1265         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1266
1267         shared_dma = tp->shared_dma + shared_offset(txHi);
1268         iface->txHiAddr = cpu_to_le32(shared_dma);
1269         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1270
1271         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1272         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1273         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1274                                         sizeof(struct rx_free));
1275
1276         shared_dma = tp->shared_dma + shared_offset(rxLo);
1277         iface->rxLoAddr = cpu_to_le32(shared_dma);
1278         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1279
1280         shared_dma = tp->shared_dma + shared_offset(rxHi);
1281         iface->rxHiAddr = cpu_to_le32(shared_dma);
1282         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1283
1284         shared_dma = tp->shared_dma + shared_offset(cmd);
1285         iface->cmdAddr = cpu_to_le32(shared_dma);
1286         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1287
1288         shared_dma = tp->shared_dma + shared_offset(resp);
1289         iface->respAddr = cpu_to_le32(shared_dma);
1290         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1291
1292         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1293         iface->zeroAddr = cpu_to_le32(shared_dma);
1294
1295         tp->indexes = &tp->shared->indexes;
1296         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1297         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1298         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1299         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1300         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1301         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1302         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1303
1304         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1305         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1306
1307         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1308         tp->card_state = Sleeping;
1309
1310         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1311         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1312
1313         spin_lock_init(&tp->command_lock);
1314         spin_lock_init(&tp->state_lock);
1315
1316         /* Force the writes to the shared memory area out before continuing. */
1317         wmb();
1318 }
1319
1320 static void
1321 typhoon_init_rings(struct typhoon *tp)
1322 {
1323         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1324
1325         tp->txLoRing.lastWrite = 0;
1326         tp->txHiRing.lastWrite = 0;
1327         tp->rxLoRing.lastWrite = 0;
1328         tp->rxHiRing.lastWrite = 0;
1329         tp->rxBuffRing.lastWrite = 0;
1330         tp->cmdRing.lastWrite = 0;
1331         tp->cmdRing.lastWrite = 0;
1332
1333         tp->txLoRing.lastRead = 0;
1334         tp->txHiRing.lastRead = 0;
1335 }
1336
1337 static const struct firmware *typhoon_fw;
1338
1339 static int
1340 typhoon_request_firmware(struct typhoon *tp)
1341 {
1342         const struct typhoon_file_header *fHdr;
1343         const struct typhoon_section_header *sHdr;
1344         const u8 *image_data;
1345         u32 numSections;
1346         u32 section_len;
1347         u32 remaining;
1348         int err;
1349
1350         if (typhoon_fw)
1351                 return 0;
1352
1353         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1354         if (err) {
1355                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1356                            FIRMWARE_NAME);
1357                 return err;
1358         }
1359
1360         image_data = (u8 *) typhoon_fw->data;
1361         remaining = typhoon_fw->size;
1362         if (remaining < sizeof(struct typhoon_file_header))
1363                 goto invalid_fw;
1364
1365         fHdr = (struct typhoon_file_header *) image_data;
1366         if (memcmp(fHdr->tag, "TYPHOON", 8))
1367                 goto invalid_fw;
1368
1369         numSections = le32_to_cpu(fHdr->numSections);
1370         image_data += sizeof(struct typhoon_file_header);
1371         remaining -= sizeof(struct typhoon_file_header);
1372
1373         while (numSections--) {
1374                 if (remaining < sizeof(struct typhoon_section_header))
1375                         goto invalid_fw;
1376
1377                 sHdr = (struct typhoon_section_header *) image_data;
1378                 image_data += sizeof(struct typhoon_section_header);
1379                 section_len = le32_to_cpu(sHdr->len);
1380
1381                 if (remaining < section_len)
1382                         goto invalid_fw;
1383
1384                 image_data += section_len;
1385                 remaining -= section_len;
1386         }
1387
1388         return 0;
1389
1390 invalid_fw:
1391         netdev_err(tp->dev, "Invalid firmware image\n");
1392         release_firmware(typhoon_fw);
1393         typhoon_fw = NULL;
1394         return -EINVAL;
1395 }
1396
1397 static int
1398 typhoon_download_firmware(struct typhoon *tp)
1399 {
1400         void __iomem *ioaddr = tp->ioaddr;
1401         struct pci_dev *pdev = tp->pdev;
1402         const struct typhoon_file_header *fHdr;
1403         const struct typhoon_section_header *sHdr;
1404         const u8 *image_data;
1405         void *dpage;
1406         dma_addr_t dpage_dma;
1407         __sum16 csum;
1408         u32 irqEnabled;
1409         u32 irqMasked;
1410         u32 numSections;
1411         u32 section_len;
1412         u32 len;
1413         u32 load_addr;
1414         u32 hmac;
1415         int i;
1416         int err;
1417
1418         image_data = (u8 *) typhoon_fw->data;
1419         fHdr = (struct typhoon_file_header *) image_data;
1420
1421         /* Cannot just map the firmware image using pci_map_single() as
1422          * the firmware is vmalloc()'d and may not be physically contiguous,
1423          * so we allocate some consistent memory to copy the sections into.
1424          */
1425         err = -ENOMEM;
1426         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1427         if(!dpage) {
1428                 netdev_err(tp->dev, "no DMA mem for firmware\n");
1429                 goto err_out;
1430         }
1431
1432         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1433         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1434                ioaddr + TYPHOON_REG_INTR_ENABLE);
1435         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1436         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1437                ioaddr + TYPHOON_REG_INTR_MASK);
1438
1439         err = -ETIMEDOUT;
1440         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1441                 netdev_err(tp->dev, "card ready timeout\n");
1442                 goto err_out_irq;
1443         }
1444
1445         numSections = le32_to_cpu(fHdr->numSections);
1446         load_addr = le32_to_cpu(fHdr->startAddr);
1447
1448         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1449         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1450         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1451         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1452         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1453         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1454         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1455         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1456         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1457         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1458         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1459         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1460         typhoon_post_pci_writes(ioaddr);
1461         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1462
1463         image_data += sizeof(struct typhoon_file_header);
1464
1465         /* The ioread32() in typhoon_wait_interrupt() will force the
1466          * last write to the command register to post, so
1467          * we don't need a typhoon_post_pci_writes() after it.
1468          */
1469         for(i = 0; i < numSections; i++) {
1470                 sHdr = (struct typhoon_section_header *) image_data;
1471                 image_data += sizeof(struct typhoon_section_header);
1472                 load_addr = le32_to_cpu(sHdr->startAddr);
1473                 section_len = le32_to_cpu(sHdr->len);
1474
1475                 while(section_len) {
1476                         len = min_t(u32, section_len, PAGE_SIZE);
1477
1478                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1479                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1480                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1481                                 netdev_err(tp->dev, "segment ready timeout\n");
1482                                 goto err_out_irq;
1483                         }
1484
1485                         /* Do an pseudo IPv4 checksum on the data -- first
1486                          * need to convert each u16 to cpu order before
1487                          * summing. Fortunately, due to the properties of
1488                          * the checksum, we can do this once, at the end.
1489                          */
1490                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1491                                                                    dpage, len,
1492                                                                    0));
1493
1494                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1495                         iowrite32(le16_to_cpu((__force __le16)csum),
1496                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1497                         iowrite32(load_addr,
1498                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1499                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1500                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1501                         typhoon_post_pci_writes(ioaddr);
1502                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1503                                         ioaddr + TYPHOON_REG_COMMAND);
1504
1505                         image_data += len;
1506                         load_addr += len;
1507                         section_len -= len;
1508                 }
1509         }
1510
1511         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1512            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1513            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1514                 netdev_err(tp->dev, "final segment ready timeout\n");
1515                 goto err_out_irq;
1516         }
1517
1518         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1519
1520         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1521                 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1522                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1523                 goto err_out_irq;
1524         }
1525
1526         err = 0;
1527
1528 err_out_irq:
1529         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1530         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1531
1532         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1533
1534 err_out:
1535         return err;
1536 }
1537
1538 static int
1539 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1540 {
1541         void __iomem *ioaddr = tp->ioaddr;
1542
1543         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1544                 netdev_err(tp->dev, "boot ready timeout\n");
1545                 goto out_timeout;
1546         }
1547
1548         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1549         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1550         typhoon_post_pci_writes(ioaddr);
1551         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1552                                 ioaddr + TYPHOON_REG_COMMAND);
1553
1554         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1555                 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1556                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1557                 goto out_timeout;
1558         }
1559
1560         /* Clear the Transmit and Command ready registers
1561          */
1562         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1563         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1564         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1565         typhoon_post_pci_writes(ioaddr);
1566         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1567
1568         return 0;
1569
1570 out_timeout:
1571         return -ETIMEDOUT;
1572 }
1573
1574 static u32
1575 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1576                         volatile __le32 * index)
1577 {
1578         u32 lastRead = txRing->lastRead;
1579         struct tx_desc *tx;
1580         dma_addr_t skb_dma;
1581         int dma_len;
1582         int type;
1583
1584         while(lastRead != le32_to_cpu(*index)) {
1585                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1586                 type = tx->flags & TYPHOON_TYPE_MASK;
1587
1588                 if(type == TYPHOON_TX_DESC) {
1589                         /* This tx_desc describes a packet.
1590                          */
1591                         unsigned long ptr = tx->tx_addr;
1592                         struct sk_buff *skb = (struct sk_buff *) ptr;
1593                         dev_kfree_skb_irq(skb);
1594                 } else if(type == TYPHOON_FRAG_DESC) {
1595                         /* This tx_desc describes a memory mapping. Free it.
1596                          */
1597                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1598                         dma_len = le16_to_cpu(tx->len);
1599                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1600                                        PCI_DMA_TODEVICE);
1601                 }
1602
1603                 tx->flags = 0;
1604                 typhoon_inc_tx_index(&lastRead, 1);
1605         }
1606
1607         return lastRead;
1608 }
1609
1610 static void
1611 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1612                         volatile __le32 * index)
1613 {
1614         u32 lastRead;
1615         int numDesc = MAX_SKB_FRAGS + 1;
1616
1617         /* This will need changing if we start to use the Hi Tx ring. */
1618         lastRead = typhoon_clean_tx(tp, txRing, index);
1619         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1620                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1621                 netif_wake_queue(tp->dev);
1622
1623         txRing->lastRead = lastRead;
1624         smp_wmb();
1625 }
1626
1627 static void
1628 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1629 {
1630         struct typhoon_indexes *indexes = tp->indexes;
1631         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1632         struct basic_ring *ring = &tp->rxBuffRing;
1633         struct rx_free *r;
1634
1635         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1636                                 le32_to_cpu(indexes->rxBuffCleared)) {
1637                 /* no room in ring, just drop the skb
1638                  */
1639                 dev_kfree_skb_any(rxb->skb);
1640                 rxb->skb = NULL;
1641                 return;
1642         }
1643
1644         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1645         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1646         r->virtAddr = idx;
1647         r->physAddr = cpu_to_le32(rxb->dma_addr);
1648
1649         /* Tell the card about it */
1650         wmb();
1651         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1652 }
1653
1654 static int
1655 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1656 {
1657         struct typhoon_indexes *indexes = tp->indexes;
1658         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1659         struct basic_ring *ring = &tp->rxBuffRing;
1660         struct rx_free *r;
1661         struct sk_buff *skb;
1662         dma_addr_t dma_addr;
1663
1664         rxb->skb = NULL;
1665
1666         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1667                                 le32_to_cpu(indexes->rxBuffCleared))
1668                 return -ENOMEM;
1669
1670         skb = dev_alloc_skb(PKT_BUF_SZ);
1671         if(!skb)
1672                 return -ENOMEM;
1673
1674 #if 0
1675         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1676          * address! Pretty please?
1677          */
1678         skb_reserve(skb, 2);
1679 #endif
1680
1681         skb->dev = tp->dev;
1682         dma_addr = pci_map_single(tp->pdev, skb->data,
1683                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1684
1685         /* Since no card does 64 bit DAC, the high bits will never
1686          * change from zero.
1687          */
1688         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1689         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1690         r->virtAddr = idx;
1691         r->physAddr = cpu_to_le32(dma_addr);
1692         rxb->skb = skb;
1693         rxb->dma_addr = dma_addr;
1694
1695         /* Tell the card about it */
1696         wmb();
1697         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1698         return 0;
1699 }
1700
1701 static int
1702 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1703            volatile __le32 * cleared, int budget)
1704 {
1705         struct rx_desc *rx;
1706         struct sk_buff *skb, *new_skb;
1707         struct rxbuff_ent *rxb;
1708         dma_addr_t dma_addr;
1709         u32 local_ready;
1710         u32 rxaddr;
1711         int pkt_len;
1712         u32 idx;
1713         __le32 csum_bits;
1714         int received;
1715
1716         received = 0;
1717         local_ready = le32_to_cpu(*ready);
1718         rxaddr = le32_to_cpu(*cleared);
1719         while(rxaddr != local_ready && budget > 0) {
1720                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1721                 idx = rx->addr;
1722                 rxb = &tp->rxbuffers[idx];
1723                 skb = rxb->skb;
1724                 dma_addr = rxb->dma_addr;
1725
1726                 typhoon_inc_rx_index(&rxaddr, 1);
1727
1728                 if(rx->flags & TYPHOON_RX_ERROR) {
1729                         typhoon_recycle_rx_skb(tp, idx);
1730                         continue;
1731                 }
1732
1733                 pkt_len = le16_to_cpu(rx->frameLen);
1734
1735                 if(pkt_len < rx_copybreak &&
1736                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1737                         skb_reserve(new_skb, 2);
1738                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1739                                                     PKT_BUF_SZ,
1740                                                     PCI_DMA_FROMDEVICE);
1741                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1742                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1743                                                        PKT_BUF_SZ,
1744                                                        PCI_DMA_FROMDEVICE);
1745                         skb_put(new_skb, pkt_len);
1746                         typhoon_recycle_rx_skb(tp, idx);
1747                 } else {
1748                         new_skb = skb;
1749                         skb_put(new_skb, pkt_len);
1750                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1751                                        PCI_DMA_FROMDEVICE);
1752                         typhoon_alloc_rx_skb(tp, idx);
1753                 }
1754                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1755                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1756                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1757                 if(csum_bits ==
1758                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1759                    csum_bits ==
1760                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1761                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1762                 } else
1763                         skb_checksum_none_assert(new_skb);
1764
1765                 spin_lock(&tp->state_lock);
1766                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1767                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1768                                                  ntohl(rx->vlanTag) & 0xffff);
1769                 else
1770                         netif_receive_skb(new_skb);
1771                 spin_unlock(&tp->state_lock);
1772
1773                 received++;
1774                 budget--;
1775         }
1776         *cleared = cpu_to_le32(rxaddr);
1777
1778         return received;
1779 }
1780
1781 static void
1782 typhoon_fill_free_ring(struct typhoon *tp)
1783 {
1784         u32 i;
1785
1786         for(i = 0; i < RXENT_ENTRIES; i++) {
1787                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1788                 if(rxb->skb)
1789                         continue;
1790                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1791                         break;
1792         }
1793 }
1794
1795 static int
1796 typhoon_poll(struct napi_struct *napi, int budget)
1797 {
1798         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1799         struct typhoon_indexes *indexes = tp->indexes;
1800         int work_done;
1801
1802         rmb();
1803         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1804                         typhoon_process_response(tp, 0, NULL);
1805
1806         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1807                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1808
1809         work_done = 0;
1810
1811         if(indexes->rxHiCleared != indexes->rxHiReady) {
1812                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1813                                         &indexes->rxHiCleared, budget);
1814         }
1815
1816         if(indexes->rxLoCleared != indexes->rxLoReady) {
1817                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1818                                         &indexes->rxLoCleared, budget - work_done);
1819         }
1820
1821         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1822                 /* rxBuff ring is empty, try to fill it. */
1823                 typhoon_fill_free_ring(tp);
1824         }
1825
1826         if (work_done < budget) {
1827                 napi_complete(napi);
1828                 iowrite32(TYPHOON_INTR_NONE,
1829                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1830                 typhoon_post_pci_writes(tp->ioaddr);
1831         }
1832
1833         return work_done;
1834 }
1835
1836 static irqreturn_t
1837 typhoon_interrupt(int irq, void *dev_instance)
1838 {
1839         struct net_device *dev = dev_instance;
1840         struct typhoon *tp = netdev_priv(dev);
1841         void __iomem *ioaddr = tp->ioaddr;
1842         u32 intr_status;
1843
1844         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1845         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1846                 return IRQ_NONE;
1847
1848         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1849
1850         if (napi_schedule_prep(&tp->napi)) {
1851                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1852                 typhoon_post_pci_writes(ioaddr);
1853                 __napi_schedule(&tp->napi);
1854         } else {
1855                 netdev_err(dev, "Error, poll already scheduled\n");
1856         }
1857         return IRQ_HANDLED;
1858 }
1859
1860 static void
1861 typhoon_free_rx_rings(struct typhoon *tp)
1862 {
1863         u32 i;
1864
1865         for(i = 0; i < RXENT_ENTRIES; i++) {
1866                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1867                 if(rxb->skb) {
1868                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1869                                        PCI_DMA_FROMDEVICE);
1870                         dev_kfree_skb(rxb->skb);
1871                         rxb->skb = NULL;
1872                 }
1873         }
1874 }
1875
1876 static int
1877 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1878 {
1879         struct pci_dev *pdev = tp->pdev;
1880         void __iomem *ioaddr = tp->ioaddr;
1881         struct cmd_desc xp_cmd;
1882         int err;
1883
1884         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1885         xp_cmd.parm1 = events;
1886         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1887         if(err < 0) {
1888                 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1889                            err);
1890                 return err;
1891         }
1892
1893         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1894         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1895         if(err < 0) {
1896                 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1897                 return err;
1898         }
1899
1900         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1901                 return -ETIMEDOUT;
1902
1903         /* Since we cannot monitor the status of the link while sleeping,
1904          * tell the world it went away.
1905          */
1906         netif_carrier_off(tp->dev);
1907
1908         pci_enable_wake(tp->pdev, state, 1);
1909         pci_disable_device(pdev);
1910         return pci_set_power_state(pdev, state);
1911 }
1912
1913 static int
1914 typhoon_wakeup(struct typhoon *tp, int wait_type)
1915 {
1916         struct pci_dev *pdev = tp->pdev;
1917         void __iomem *ioaddr = tp->ioaddr;
1918
1919         pci_set_power_state(pdev, PCI_D0);
1920         pci_restore_state(pdev);
1921
1922         /* Post 2.x.x versions of the Sleep Image require a reset before
1923          * we can download the Runtime Image. But let's not make users of
1924          * the old firmware pay for the reset.
1925          */
1926         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1927         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1928                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1929                 return typhoon_reset(ioaddr, wait_type);
1930
1931         return 0;
1932 }
1933
1934 static int
1935 typhoon_start_runtime(struct typhoon *tp)
1936 {
1937         struct net_device *dev = tp->dev;
1938         void __iomem *ioaddr = tp->ioaddr;
1939         struct cmd_desc xp_cmd;
1940         int err;
1941
1942         typhoon_init_rings(tp);
1943         typhoon_fill_free_ring(tp);
1944
1945         err = typhoon_download_firmware(tp);
1946         if(err < 0) {
1947                 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1948                 goto error_out;
1949         }
1950
1951         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1952                 netdev_err(tp->dev, "cannot boot 3XP\n");
1953                 err = -EIO;
1954                 goto error_out;
1955         }
1956
1957         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1958         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1959         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1960         if(err < 0)
1961                 goto error_out;
1962
1963         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1964         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1965         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1966         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1967         if(err < 0)
1968                 goto error_out;
1969
1970         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1971          * us some more information on how to control it.
1972          */
1973         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1974         xp_cmd.parm1 = 0;
1975         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1976         if(err < 0)
1977                 goto error_out;
1978
1979         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1980         xp_cmd.parm1 = tp->xcvr_select;
1981         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1982         if(err < 0)
1983                 goto error_out;
1984
1985         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1986         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1987         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1988         if(err < 0)
1989                 goto error_out;
1990
1991         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1992         spin_lock_bh(&tp->state_lock);
1993         xp_cmd.parm2 = tp->offload;
1994         xp_cmd.parm3 = tp->offload;
1995         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1996         spin_unlock_bh(&tp->state_lock);
1997         if(err < 0)
1998                 goto error_out;
1999
2000         typhoon_set_rx_mode(dev);
2001
2002         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
2003         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2004         if(err < 0)
2005                 goto error_out;
2006
2007         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
2008         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2009         if(err < 0)
2010                 goto error_out;
2011
2012         tp->card_state = Running;
2013         smp_wmb();
2014
2015         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2016         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
2017         typhoon_post_pci_writes(ioaddr);
2018
2019         return 0;
2020
2021 error_out:
2022         typhoon_reset(ioaddr, WaitNoSleep);
2023         typhoon_free_rx_rings(tp);
2024         typhoon_init_rings(tp);
2025         return err;
2026 }
2027
2028 static int
2029 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2030 {
2031         struct typhoon_indexes *indexes = tp->indexes;
2032         struct transmit_ring *txLo = &tp->txLoRing;
2033         void __iomem *ioaddr = tp->ioaddr;
2034         struct cmd_desc xp_cmd;
2035         int i;
2036
2037         /* Disable interrupts early, since we can't schedule a poll
2038          * when called with !netif_running(). This will be posted
2039          * when we force the posting of the command.
2040          */
2041         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2042
2043         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2044         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2045
2046         /* Wait 1/2 sec for any outstanding transmits to occur
2047          * We'll cleanup after the reset if this times out.
2048          */
2049         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2050                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2051                         break;
2052                 udelay(TYPHOON_UDELAY);
2053         }
2054
2055         if(i == TYPHOON_WAIT_TIMEOUT)
2056                 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2057
2058         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2059         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2060
2061         /* save the statistics so when we bring the interface up again,
2062          * the values reported to userspace are correct.
2063          */
2064         tp->card_state = Sleeping;
2065         smp_wmb();
2066         typhoon_do_get_stats(tp);
2067         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2068
2069         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2070         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2071
2072         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2073                 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2074
2075         if(typhoon_reset(ioaddr, wait_type) < 0) {
2076                 netdev_err(tp->dev, "unable to reset 3XP\n");
2077                 return -ETIMEDOUT;
2078         }
2079
2080         /* cleanup any outstanding Tx packets */
2081         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2082                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2083                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2084         }
2085
2086         return 0;
2087 }
2088
2089 static void
2090 typhoon_tx_timeout(struct net_device *dev)
2091 {
2092         struct typhoon *tp = netdev_priv(dev);
2093
2094         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2095                 netdev_warn(dev, "could not reset in tx timeout\n");
2096                 goto truly_dead;
2097         }
2098
2099         /* If we ever start using the Hi ring, it will need cleaning too */
2100         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2101         typhoon_free_rx_rings(tp);
2102
2103         if(typhoon_start_runtime(tp) < 0) {
2104                 netdev_err(dev, "could not start runtime in tx timeout\n");
2105                 goto truly_dead;
2106         }
2107
2108         netif_wake_queue(dev);
2109         return;
2110
2111 truly_dead:
2112         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2113         typhoon_reset(tp->ioaddr, NoWait);
2114         netif_carrier_off(dev);
2115 }
2116
2117 static int
2118 typhoon_open(struct net_device *dev)
2119 {
2120         struct typhoon *tp = netdev_priv(dev);
2121         int err;
2122
2123         err = typhoon_request_firmware(tp);
2124         if (err)
2125                 goto out;
2126
2127         err = typhoon_wakeup(tp, WaitSleep);
2128         if(err < 0) {
2129                 netdev_err(dev, "unable to wakeup device\n");
2130                 goto out_sleep;
2131         }
2132
2133         err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2134                                 dev->name, dev);
2135         if(err < 0)
2136                 goto out_sleep;
2137
2138         napi_enable(&tp->napi);
2139
2140         err = typhoon_start_runtime(tp);
2141         if(err < 0) {
2142                 napi_disable(&tp->napi);
2143                 goto out_irq;
2144         }
2145
2146         netif_start_queue(dev);
2147         return 0;
2148
2149 out_irq:
2150         free_irq(dev->irq, dev);
2151
2152 out_sleep:
2153         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2154                 netdev_err(dev, "unable to reboot into sleep img\n");
2155                 typhoon_reset(tp->ioaddr, NoWait);
2156                 goto out;
2157         }
2158
2159         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2160                 netdev_err(dev, "unable to go back to sleep\n");
2161
2162 out:
2163         return err;
2164 }
2165
2166 static int
2167 typhoon_close(struct net_device *dev)
2168 {
2169         struct typhoon *tp = netdev_priv(dev);
2170
2171         netif_stop_queue(dev);
2172         napi_disable(&tp->napi);
2173
2174         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2175                 netdev_err(dev, "unable to stop runtime\n");
2176
2177         /* Make sure there is no irq handler running on a different CPU. */
2178         free_irq(dev->irq, dev);
2179
2180         typhoon_free_rx_rings(tp);
2181         typhoon_init_rings(tp);
2182
2183         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2184                 netdev_err(dev, "unable to boot sleep image\n");
2185
2186         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2187                 netdev_err(dev, "unable to put card to sleep\n");
2188
2189         return 0;
2190 }
2191
2192 #ifdef CONFIG_PM
2193 static int
2194 typhoon_resume(struct pci_dev *pdev)
2195 {
2196         struct net_device *dev = pci_get_drvdata(pdev);
2197         struct typhoon *tp = netdev_priv(dev);
2198
2199         /* If we're down, resume when we are upped.
2200          */
2201         if(!netif_running(dev))
2202                 return 0;
2203
2204         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2205                 netdev_err(dev, "critical: could not wake up in resume\n");
2206                 goto reset;
2207         }
2208
2209         if(typhoon_start_runtime(tp) < 0) {
2210                 netdev_err(dev, "critical: could not start runtime in resume\n");
2211                 goto reset;
2212         }
2213
2214         netif_device_attach(dev);
2215         return 0;
2216
2217 reset:
2218         typhoon_reset(tp->ioaddr, NoWait);
2219         return -EBUSY;
2220 }
2221
2222 static int
2223 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2224 {
2225         struct net_device *dev = pci_get_drvdata(pdev);
2226         struct typhoon *tp = netdev_priv(dev);
2227         struct cmd_desc xp_cmd;
2228
2229         /* If we're down, we're already suspended.
2230          */
2231         if(!netif_running(dev))
2232                 return 0;
2233
2234         spin_lock_bh(&tp->state_lock);
2235         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2236                 spin_unlock_bh(&tp->state_lock);
2237                 netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
2238                 return -EBUSY;
2239         }
2240         spin_unlock_bh(&tp->state_lock);
2241
2242         netif_device_detach(dev);
2243
2244         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2245                 netdev_err(dev, "unable to stop runtime\n");
2246                 goto need_resume;
2247         }
2248
2249         typhoon_free_rx_rings(tp);
2250         typhoon_init_rings(tp);
2251
2252         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2253                 netdev_err(dev, "unable to boot sleep image\n");
2254                 goto need_resume;
2255         }
2256
2257         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2258         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2259         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2260         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2261                 netdev_err(dev, "unable to set mac address in suspend\n");
2262                 goto need_resume;
2263         }
2264
2265         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2266         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2267         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2268                 netdev_err(dev, "unable to set rx filter in suspend\n");
2269                 goto need_resume;
2270         }
2271
2272         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2273                 netdev_err(dev, "unable to put card to sleep\n");
2274                 goto need_resume;
2275         }
2276
2277         return 0;
2278
2279 need_resume:
2280         typhoon_resume(pdev);
2281         return -EBUSY;
2282 }
2283 #endif
2284
2285 static int __devinit
2286 typhoon_test_mmio(struct pci_dev *pdev)
2287 {
2288         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2289         int mode = 0;
2290         u32 val;
2291
2292         if(!ioaddr)
2293                 goto out;
2294
2295         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2296                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2297                 goto out_unmap;
2298
2299         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2300         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2301         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2302
2303         /* Ok, see if we can change our interrupt status register by
2304          * sending ourselves an interrupt. If so, then MMIO works.
2305          * The 50usec delay is arbitrary -- it could probably be smaller.
2306          */
2307         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2308         if((val & TYPHOON_INTR_SELF) == 0) {
2309                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2310                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2311                 udelay(50);
2312                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2313                 if(val & TYPHOON_INTR_SELF)
2314                         mode = 1;
2315         }
2316
2317         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2318         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2319         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2320         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2321
2322 out_unmap:
2323         pci_iounmap(pdev, ioaddr);
2324
2325 out:
2326         if(!mode)
2327                 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2328         return mode;
2329 }
2330
2331 static const struct net_device_ops typhoon_netdev_ops = {
2332         .ndo_open               = typhoon_open,
2333         .ndo_stop               = typhoon_close,
2334         .ndo_start_xmit         = typhoon_start_tx,
2335         .ndo_set_multicast_list = typhoon_set_rx_mode,
2336         .ndo_tx_timeout         = typhoon_tx_timeout,
2337         .ndo_get_stats          = typhoon_get_stats,
2338         .ndo_validate_addr      = eth_validate_addr,
2339         .ndo_set_mac_address    = typhoon_set_mac_address,
2340         .ndo_change_mtu         = eth_change_mtu,
2341         .ndo_vlan_rx_register   = typhoon_vlan_rx_register,
2342 };
2343
2344 static int __devinit
2345 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2346 {
2347         struct net_device *dev;
2348         struct typhoon *tp;
2349         int card_id = (int) ent->driver_data;
2350         void __iomem *ioaddr;
2351         void *shared;
2352         dma_addr_t shared_dma;
2353         struct cmd_desc xp_cmd;
2354         struct resp_desc xp_resp[3];
2355         int err = 0;
2356         const char *err_msg;
2357
2358         dev = alloc_etherdev(sizeof(*tp));
2359         if(dev == NULL) {
2360                 err_msg = "unable to alloc new net device";
2361                 err = -ENOMEM;
2362                 goto error_out;
2363         }
2364         SET_NETDEV_DEV(dev, &pdev->dev);
2365
2366         err = pci_enable_device(pdev);
2367         if(err < 0) {
2368                 err_msg = "unable to enable device";
2369                 goto error_out_dev;
2370         }
2371
2372         err = pci_set_mwi(pdev);
2373         if(err < 0) {
2374                 err_msg = "unable to set MWI";
2375                 goto error_out_disable;
2376         }
2377
2378         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2379         if(err < 0) {
2380                 err_msg = "No usable DMA configuration";
2381                 goto error_out_mwi;
2382         }
2383
2384         /* sanity checks on IO and MMIO BARs
2385          */
2386         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2387                 err_msg = "region #1 not a PCI IO resource, aborting";
2388                 err = -ENODEV;
2389                 goto error_out_mwi;
2390         }
2391         if(pci_resource_len(pdev, 0) < 128) {
2392                 err_msg = "Invalid PCI IO region size, aborting";
2393                 err = -ENODEV;
2394                 goto error_out_mwi;
2395         }
2396         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2397                 err_msg = "region #1 not a PCI MMIO resource, aborting";
2398                 err = -ENODEV;
2399                 goto error_out_mwi;
2400         }
2401         if(pci_resource_len(pdev, 1) < 128) {
2402                 err_msg = "Invalid PCI MMIO region size, aborting";
2403                 err = -ENODEV;
2404                 goto error_out_mwi;
2405         }
2406
2407         err = pci_request_regions(pdev, KBUILD_MODNAME);
2408         if(err < 0) {
2409                 err_msg = "could not request regions";
2410                 goto error_out_mwi;
2411         }
2412
2413         /* map our registers
2414          */
2415         if(use_mmio != 0 && use_mmio != 1)
2416                 use_mmio = typhoon_test_mmio(pdev);
2417
2418         ioaddr = pci_iomap(pdev, use_mmio, 128);
2419         if (!ioaddr) {
2420                 err_msg = "cannot remap registers, aborting";
2421                 err = -EIO;
2422                 goto error_out_regions;
2423         }
2424
2425         /* allocate pci dma space for rx and tx descriptor rings
2426          */
2427         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2428                                       &shared_dma);
2429         if(!shared) {
2430                 err_msg = "could not allocate DMA memory";
2431                 err = -ENOMEM;
2432                 goto error_out_remap;
2433         }
2434
2435         dev->irq = pdev->irq;
2436         tp = netdev_priv(dev);
2437         tp->shared = (struct typhoon_shared *) shared;
2438         tp->shared_dma = shared_dma;
2439         tp->pdev = pdev;
2440         tp->tx_pdev = pdev;
2441         tp->ioaddr = ioaddr;
2442         tp->tx_ioaddr = ioaddr;
2443         tp->dev = dev;
2444
2445         /* Init sequence:
2446          * 1) Reset the adapter to clear any bad juju
2447          * 2) Reload the sleep image
2448          * 3) Boot the sleep image
2449          * 4) Get the hardware address.
2450          * 5) Put the card to sleep.
2451          */
2452         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2453                 err_msg = "could not reset 3XP";
2454                 err = -EIO;
2455                 goto error_out_dma;
2456         }
2457
2458         /* Now that we've reset the 3XP and are sure it's not going to
2459          * write all over memory, enable bus mastering, and save our
2460          * state for resuming after a suspend.
2461          */
2462         pci_set_master(pdev);
2463         pci_save_state(pdev);
2464
2465         typhoon_init_interface(tp);
2466         typhoon_init_rings(tp);
2467
2468         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2469                 err_msg = "cannot boot 3XP sleep image";
2470                 err = -EIO;
2471                 goto error_out_reset;
2472         }
2473
2474         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2475         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2476                 err_msg = "cannot read MAC address";
2477                 err = -EIO;
2478                 goto error_out_reset;
2479         }
2480
2481         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2482         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2483
2484         if(!is_valid_ether_addr(dev->dev_addr)) {
2485                 err_msg = "Could not obtain valid ethernet address, aborting";
2486                 goto error_out_reset;
2487         }
2488
2489         /* Read the Sleep Image version last, so the response is valid
2490          * later when we print out the version reported.
2491          */
2492         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2493         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2494                 err_msg = "Could not get Sleep Image version";
2495                 goto error_out_reset;
2496         }
2497
2498         tp->capabilities = typhoon_card_info[card_id].capabilities;
2499         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2500
2501         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2502          * READ_VERSIONS command. Those versions are OK after waking up
2503          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2504          * seem to need a little extra help to get started. Since we don't
2505          * know how to nudge it along, just kick it.
2506          */
2507         if(xp_resp[0].numDesc != 0)
2508                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2509
2510         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2511                 err_msg = "cannot put adapter to sleep";
2512                 err = -EIO;
2513                 goto error_out_reset;
2514         }
2515
2516         /* The chip-specific entries in the device structure. */
2517         dev->netdev_ops         = &typhoon_netdev_ops;
2518         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2519         dev->watchdog_timeo     = TX_TIMEOUT;
2520
2521         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2522
2523         /* We can handle scatter gather, up to 16 entries, and
2524          * we can do IP checksumming (only version 4, doh...)
2525          */
2526         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2527         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2528         dev->features |= NETIF_F_TSO;
2529
2530         if(register_netdev(dev) < 0) {
2531                 err_msg = "unable to register netdev";
2532                 goto error_out_reset;
2533         }
2534
2535         pci_set_drvdata(pdev, dev);
2536
2537         netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2538                     typhoon_card_info[card_id].name,
2539                     use_mmio ? "MMIO" : "IO",
2540                     (unsigned long long)pci_resource_start(pdev, use_mmio),
2541                     dev->dev_addr);
2542
2543         /* xp_resp still contains the response to the READ_VERSIONS command.
2544          * For debugging, let the user know what version he has.
2545          */
2546         if(xp_resp[0].numDesc == 0) {
2547                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2548                  * of version is Month/Day of build.
2549                  */
2550                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2551                 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2552                             monthday >> 8, monthday & 0xff);
2553         } else if(xp_resp[0].numDesc == 2) {
2554                 /* This is the Typhoon 1.1+ type Sleep Image
2555                  */
2556                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2557                 u8 *ver_string = (u8 *) &xp_resp[1];
2558                 ver_string[25] = 0;
2559                 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2560                             sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2561                             sleep_ver & 0xfff, ver_string);
2562         } else {
2563                 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2564                             xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2565         }
2566
2567         return 0;
2568
2569 error_out_reset:
2570         typhoon_reset(ioaddr, NoWait);
2571
2572 error_out_dma:
2573         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2574                             shared, shared_dma);
2575 error_out_remap:
2576         pci_iounmap(pdev, ioaddr);
2577 error_out_regions:
2578         pci_release_regions(pdev);
2579 error_out_mwi:
2580         pci_clear_mwi(pdev);
2581 error_out_disable:
2582         pci_disable_device(pdev);
2583 error_out_dev:
2584         free_netdev(dev);
2585 error_out:
2586         pr_err("%s: %s\n", pci_name(pdev), err_msg);
2587         return err;
2588 }
2589
2590 static void __devexit
2591 typhoon_remove_one(struct pci_dev *pdev)
2592 {
2593         struct net_device *dev = pci_get_drvdata(pdev);
2594         struct typhoon *tp = netdev_priv(dev);
2595
2596         unregister_netdev(dev);
2597         pci_set_power_state(pdev, PCI_D0);
2598         pci_restore_state(pdev);
2599         typhoon_reset(tp->ioaddr, NoWait);
2600         pci_iounmap(pdev, tp->ioaddr);
2601         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2602                             tp->shared, tp->shared_dma);
2603         pci_release_regions(pdev);
2604         pci_clear_mwi(pdev);
2605         pci_disable_device(pdev);
2606         pci_set_drvdata(pdev, NULL);
2607         free_netdev(dev);
2608 }
2609
2610 static struct pci_driver typhoon_driver = {
2611         .name           = KBUILD_MODNAME,
2612         .id_table       = typhoon_pci_tbl,
2613         .probe          = typhoon_init_one,
2614         .remove         = __devexit_p(typhoon_remove_one),
2615 #ifdef CONFIG_PM
2616         .suspend        = typhoon_suspend,
2617         .resume         = typhoon_resume,
2618 #endif
2619 };
2620
2621 static int __init
2622 typhoon_init(void)
2623 {
2624         return pci_register_driver(&typhoon_driver);
2625 }
2626
2627 static void __exit
2628 typhoon_cleanup(void)
2629 {
2630         if (typhoon_fw)
2631                 release_firmware(typhoon_fw);
2632         pci_unregister_driver(&typhoon_driver);
2633 }
2634
2635 module_init(typhoon_init);
2636 module_exit(typhoon_cleanup);