1 /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * Copyright (C) 2004 Sun Microsystems Inc.
4 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * This driver uses the sungem driver (c) David Miller
22 * (davem@redhat.com) as its basis.
24 * The cassini chip has a number of features that distinguish it from
26 * 4 transmit descriptor rings that are used for either QoS (VLAN) or
27 * load balancing (non-VLAN mode)
28 * batching of multiple packets
29 * multiple CPU dispatching
30 * page-based RX descriptor engine with separate completion rings
31 * Gigabit support (GMII and PCS interface)
32 * MIF link up/down detection works
34 * RX is handled by page sized buffers that are attached as fragments to
35 * the skb. here's what's done:
36 * -- driver allocates pages at a time and keeps reference counts
38 * -- the upper protocol layers assume that the header is in the skb
39 * itself. as a result, cassini will copy a small amount (64 bytes)
41 * -- driver appends the rest of the data pages as frags to skbuffs
42 * and increments the reference count
43 * -- on page reclamation, the driver swaps the page with a spare page.
44 * if that page is still in use, it frees its reference to that page,
45 * and allocates a new page for use. otherwise, it just recycles the
48 * NOTE: cassini can parse the header. however, it's not worth it
49 * as long as the network stack requires a header copy.
51 * TX has 4 queues. currently these queues are used in a round-robin
52 * fashion for load balancing. They can also be used for QoS. for that
53 * to work, however, QoS information needs to be exposed down to the driver
54 * level so that subqueues get targetted to particular transmit rings.
55 * alternatively, the queues can be configured via use of the all-purpose
58 * RX DATA: the rx completion ring has all the info, but the rx desc
59 * ring has all of the data. RX can conceivably come in under multiple
60 * interrupts, but the INT# assignment needs to be set up properly by
61 * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
62 * that. also, the two descriptor rings are designed to distinguish between
63 * encrypted and non-encrypted packets, but we use them for buffering
66 * by default, the selective clear mask is set up to process rx packets.
69 #include <linux/config.h>
70 #include <linux/version.h>
72 #include <linux/module.h>
73 #include <linux/kernel.h>
74 #include <linux/types.h>
75 #include <linux/compiler.h>
76 #include <linux/slab.h>
77 #include <linux/delay.h>
78 #include <linux/init.h>
79 #include <linux/ioport.h>
80 #include <linux/pci.h>
82 #include <linux/highmem.h>
83 #include <linux/list.h>
84 #include <linux/dma-mapping.h>
86 #include <linux/netdevice.h>
87 #include <linux/etherdevice.h>
88 #include <linux/skbuff.h>
89 #include <linux/ethtool.h>
90 #include <linux/crc32.h>
91 #include <linux/random.h>
92 #include <linux/mii.h>
94 #include <linux/tcp.h>
96 #include <net/checksum.h>
98 #include <asm/atomic.h>
99 #include <asm/system.h>
101 #include <asm/byteorder.h>
102 #include <asm/uaccess.h>
104 #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
105 #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
106 #define CAS_NCPUS num_online_cpus()
108 #if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
110 #define cas_skb_release(x) netif_receive_skb(x)
112 #define cas_skb_release(x) netif_rx(x)
115 /* select which firmware to use */
116 #define USE_HP_WORKAROUND
117 #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
118 #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
122 #define USE_TX_COMPWB /* use completion writeback registers */
123 #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
124 #define USE_RX_BLANK /* hw interrupt mitigation */
125 #undef USE_ENTROPY_DEV /* don't test for entropy device */
127 /* NOTE: these aren't useable unless PCI interrupts can be assigned.
128 * also, we need to make cp->lock finer-grained.
135 #undef USE_VPD_DEBUG /* debug vpd information if defined */
137 /* rx processing options */
138 #define USE_PAGE_ORDER /* specify to allocate large rx pages */
139 #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
140 #define RX_COPY_ALWAYS 0 /* if 0, use frags */
141 #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
142 #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
144 #define DRV_MODULE_NAME "cassini"
145 #define PFX DRV_MODULE_NAME ": "
146 #define DRV_MODULE_VERSION "1.4"
147 #define DRV_MODULE_RELDATE "1 July 2004"
149 #define CAS_DEF_MSG_ENABLE \
159 /* length of time before we decide the hardware is borked,
160 * and dev->tx_timeout() should be called to fix the problem
162 #define CAS_TX_TIMEOUT (HZ)
163 #define CAS_LINK_TIMEOUT (22*HZ/10)
164 #define CAS_LINK_FAST_TIMEOUT (1)
166 /* timeout values for state changing. these specify the number
167 * of 10us delays to be used before giving up.
169 #define STOP_TRIES_PHY 1000
170 #define STOP_TRIES 5000
172 /* specify a minimum frame size to deal with some fifo issues
173 * max mtu == 2 * page size - ethernet header - 64 - swivel =
174 * 2 * page_size - 0x50
176 #define CAS_MIN_FRAME 97
177 #define CAS_1000MB_MIN_FRAME 255
178 #define CAS_MIN_MTU 60
179 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
183 * Eliminate these and use separate atomic counters for each, to
184 * avoid a race condition.
187 #define CAS_RESET_MTU 1
188 #define CAS_RESET_ALL 2
189 #define CAS_RESET_SPARE 3
192 static char version[] __devinitdata =
193 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
195 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
196 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
197 MODULE_LICENSE("GPL");
198 MODULE_PARM(cassini_debug, "i");
199 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
200 MODULE_PARM(link_mode, "i");
201 MODULE_PARM_DESC(link_mode, "default link mode");
204 * Work around for a PCS bug in which the link goes down due to the chip
205 * being confused and never showing a link status of "up."
207 #define DEFAULT_LINKDOWN_TIMEOUT 5
209 * Value in seconds, for user input.
211 static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
212 MODULE_PARM(linkdown_timeout, "i");
213 MODULE_PARM_DESC(linkdown_timeout,
214 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
217 * value in 'ticks' (units used by jiffies). Set when we init the
218 * module because 'HZ' in actually a function call on some flavors of
219 * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
221 static int link_transition_timeout;
224 static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
225 static int link_mode;
227 static u16 link_modes[] __devinitdata = {
228 BMCR_ANENABLE, /* 0 : autoneg */
229 0, /* 1 : 10bt half duplex */
230 BMCR_SPEED100, /* 2 : 100bt half duplex */
231 BMCR_FULLDPLX, /* 3 : 10bt full duplex */
232 BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
233 CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
236 static struct pci_device_id cas_pci_tbl[] __devinitdata = {
237 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
238 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
239 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
240 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
244 MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
246 static void cas_set_link_modes(struct cas *cp);
248 static inline void cas_lock_tx(struct cas *cp)
252 for (i = 0; i < N_TX_RINGS; i++)
253 spin_lock(&cp->tx_lock[i]);
256 static inline void cas_lock_all(struct cas *cp)
258 spin_lock_irq(&cp->lock);
262 /* WTZ: QA was finding deadlock problems with the previous
263 * versions after long test runs with multiple cards per machine.
264 * See if replacing cas_lock_all with safer versions helps. The
265 * symptoms QA is reporting match those we'd expect if interrupts
266 * aren't being properly restored, and we fixed a previous deadlock
267 * with similar symptoms by using save/restore versions in other
270 #define cas_lock_all_save(cp, flags) \
272 struct cas *xxxcp = (cp); \
273 spin_lock_irqsave(&xxxcp->lock, flags); \
274 cas_lock_tx(xxxcp); \
277 static inline void cas_unlock_tx(struct cas *cp)
281 for (i = N_TX_RINGS; i > 0; i--)
282 spin_unlock(&cp->tx_lock[i - 1]);
285 static inline void cas_unlock_all(struct cas *cp)
288 spin_unlock_irq(&cp->lock);
291 #define cas_unlock_all_restore(cp, flags) \
293 struct cas *xxxcp = (cp); \
294 cas_unlock_tx(xxxcp); \
295 spin_unlock_irqrestore(&xxxcp->lock, flags); \
298 static void cas_disable_irq(struct cas *cp, const int ring)
300 /* Make sure we won't get any more interrupts */
302 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
306 /* disable completion interrupts and selectively mask */
307 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
309 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
319 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
320 cp->regs + REG_PLUS_INTRN_MASK(ring));
324 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
325 REG_PLUS_INTRN_MASK(ring));
331 static inline void cas_mask_intr(struct cas *cp)
335 for (i = 0; i < N_RX_COMP_RINGS; i++)
336 cas_disable_irq(cp, i);
339 static void cas_enable_irq(struct cas *cp, const int ring)
341 if (ring == 0) { /* all but TX_DONE */
342 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
346 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
348 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
358 writel(INTRN_MASK_RX_EN, cp->regs +
359 REG_PLUS_INTRN_MASK(ring));
368 static inline void cas_unmask_intr(struct cas *cp)
372 for (i = 0; i < N_RX_COMP_RINGS; i++)
373 cas_enable_irq(cp, i);
376 static inline void cas_entropy_gather(struct cas *cp)
378 #ifdef USE_ENTROPY_DEV
379 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
382 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
383 readl(cp->regs + REG_ENTROPY_IV),
388 static inline void cas_entropy_reset(struct cas *cp)
390 #ifdef USE_ENTROPY_DEV
391 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
394 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
395 cp->regs + REG_BIM_LOCAL_DEV_EN);
396 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
397 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
399 /* if we read back 0x0, we don't have an entropy device */
400 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
401 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
405 /* access to the phy. the following assumes that we've initialized the MIF to
406 * be in frame rather than bit-bang mode
408 static u16 cas_phy_read(struct cas *cp, int reg)
411 int limit = STOP_TRIES_PHY;
413 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
414 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
415 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
416 cmd |= MIF_FRAME_TURN_AROUND_MSB;
417 writel(cmd, cp->regs + REG_MIF_FRAME);
419 /* poll for completion */
420 while (limit-- > 0) {
422 cmd = readl(cp->regs + REG_MIF_FRAME);
423 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
424 return (cmd & MIF_FRAME_DATA_MASK);
426 return 0xFFFF; /* -1 */
429 static int cas_phy_write(struct cas *cp, int reg, u16 val)
431 int limit = STOP_TRIES_PHY;
434 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
435 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
436 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
437 cmd |= MIF_FRAME_TURN_AROUND_MSB;
438 cmd |= val & MIF_FRAME_DATA_MASK;
439 writel(cmd, cp->regs + REG_MIF_FRAME);
441 /* poll for completion */
442 while (limit-- > 0) {
444 cmd = readl(cp->regs + REG_MIF_FRAME);
445 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
451 static void cas_phy_powerup(struct cas *cp)
453 u16 ctl = cas_phy_read(cp, MII_BMCR);
455 if ((ctl & BMCR_PDOWN) == 0)
458 cas_phy_write(cp, MII_BMCR, ctl);
461 static void cas_phy_powerdown(struct cas *cp)
463 u16 ctl = cas_phy_read(cp, MII_BMCR);
465 if (ctl & BMCR_PDOWN)
468 cas_phy_write(cp, MII_BMCR, ctl);
471 /* cp->lock held. note: the last put_page will free the buffer */
472 static int cas_page_free(struct cas *cp, cas_page_t *page)
474 pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
476 __free_pages(page->buffer, cp->page_order);
481 #ifdef RX_COUNT_BUFFERS
482 #define RX_USED_ADD(x, y) ((x)->used += (y))
483 #define RX_USED_SET(x, y) ((x)->used = (y))
485 #define RX_USED_ADD(x, y)
486 #define RX_USED_SET(x, y)
489 /* local page allocation routines for the receive buffers. jumbo pages
490 * require at least 8K contiguous and 8K aligned buffers.
492 static cas_page_t *cas_page_alloc(struct cas *cp, const int flags)
496 page = kmalloc(sizeof(cas_page_t), flags);
500 INIT_LIST_HEAD(&page->list);
501 RX_USED_SET(page, 0);
502 page->buffer = alloc_pages(flags, cp->page_order);
505 page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
506 cp->page_size, PCI_DMA_FROMDEVICE);
514 /* initialize spare pool of rx buffers, but allocate during the open */
515 static void cas_spare_init(struct cas *cp)
517 spin_lock(&cp->rx_inuse_lock);
518 INIT_LIST_HEAD(&cp->rx_inuse_list);
519 spin_unlock(&cp->rx_inuse_lock);
521 spin_lock(&cp->rx_spare_lock);
522 INIT_LIST_HEAD(&cp->rx_spare_list);
523 cp->rx_spares_needed = RX_SPARE_COUNT;
524 spin_unlock(&cp->rx_spare_lock);
527 /* used on close. free all the spare buffers. */
528 static void cas_spare_free(struct cas *cp)
530 struct list_head list, *elem, *tmp;
532 /* free spare buffers */
533 INIT_LIST_HEAD(&list);
534 spin_lock(&cp->rx_spare_lock);
535 list_splice(&cp->rx_spare_list, &list);
536 INIT_LIST_HEAD(&cp->rx_spare_list);
537 spin_unlock(&cp->rx_spare_lock);
538 list_for_each_safe(elem, tmp, &list) {
539 cas_page_free(cp, list_entry(elem, cas_page_t, list));
542 INIT_LIST_HEAD(&list);
545 * Looks like Adrian had protected this with a different
546 * lock than used everywhere else to manipulate this list.
548 spin_lock(&cp->rx_inuse_lock);
549 list_splice(&cp->rx_inuse_list, &list);
550 INIT_LIST_HEAD(&cp->rx_inuse_list);
551 spin_unlock(&cp->rx_inuse_lock);
553 spin_lock(&cp->rx_spare_lock);
554 list_splice(&cp->rx_inuse_list, &list);
555 INIT_LIST_HEAD(&cp->rx_inuse_list);
556 spin_unlock(&cp->rx_spare_lock);
558 list_for_each_safe(elem, tmp, &list) {
559 cas_page_free(cp, list_entry(elem, cas_page_t, list));
563 /* replenish spares if needed */
564 static void cas_spare_recover(struct cas *cp, const int flags)
566 struct list_head list, *elem, *tmp;
569 /* check inuse list. if we don't need any more free buffers,
573 /* make a local copy of the list */
574 INIT_LIST_HEAD(&list);
575 spin_lock(&cp->rx_inuse_lock);
576 list_splice(&cp->rx_inuse_list, &list);
577 INIT_LIST_HEAD(&cp->rx_inuse_list);
578 spin_unlock(&cp->rx_inuse_lock);
580 list_for_each_safe(elem, tmp, &list) {
581 cas_page_t *page = list_entry(elem, cas_page_t, list);
583 if (page_count(page->buffer) > 1)
587 spin_lock(&cp->rx_spare_lock);
588 if (cp->rx_spares_needed > 0) {
589 list_add(elem, &cp->rx_spare_list);
590 cp->rx_spares_needed--;
591 spin_unlock(&cp->rx_spare_lock);
593 spin_unlock(&cp->rx_spare_lock);
594 cas_page_free(cp, page);
598 /* put any inuse buffers back on the list */
599 if (!list_empty(&list)) {
600 spin_lock(&cp->rx_inuse_lock);
601 list_splice(&list, &cp->rx_inuse_list);
602 spin_unlock(&cp->rx_inuse_lock);
605 spin_lock(&cp->rx_spare_lock);
606 needed = cp->rx_spares_needed;
607 spin_unlock(&cp->rx_spare_lock);
611 /* we still need spares, so try to allocate some */
612 INIT_LIST_HEAD(&list);
615 cas_page_t *spare = cas_page_alloc(cp, flags);
618 list_add(&spare->list, &list);
622 spin_lock(&cp->rx_spare_lock);
623 list_splice(&list, &cp->rx_spare_list);
624 cp->rx_spares_needed -= i;
625 spin_unlock(&cp->rx_spare_lock);
628 /* pull a page from the list. */
629 static cas_page_t *cas_page_dequeue(struct cas *cp)
631 struct list_head *entry;
634 spin_lock(&cp->rx_spare_lock);
635 if (list_empty(&cp->rx_spare_list)) {
636 /* try to do a quick recovery */
637 spin_unlock(&cp->rx_spare_lock);
638 cas_spare_recover(cp, GFP_ATOMIC);
639 spin_lock(&cp->rx_spare_lock);
640 if (list_empty(&cp->rx_spare_list)) {
641 if (netif_msg_rx_err(cp))
642 printk(KERN_ERR "%s: no spare buffers "
643 "available.\n", cp->dev->name);
644 spin_unlock(&cp->rx_spare_lock);
649 entry = cp->rx_spare_list.next;
651 recover = ++cp->rx_spares_needed;
652 spin_unlock(&cp->rx_spare_lock);
654 /* trigger the timer to do the recovery */
655 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
657 atomic_inc(&cp->reset_task_pending);
658 atomic_inc(&cp->reset_task_pending_spare);
659 schedule_work(&cp->reset_task);
661 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
662 schedule_work(&cp->reset_task);
665 return list_entry(entry, cas_page_t, list);
669 static void cas_mif_poll(struct cas *cp, const int enable)
673 cfg = readl(cp->regs + REG_MIF_CFG);
674 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
676 if (cp->phy_type & CAS_PHY_MII_MDIO1)
677 cfg |= MIF_CFG_PHY_SELECT;
679 /* poll and interrupt on link status change. */
681 cfg |= MIF_CFG_POLL_EN;
682 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
683 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
685 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
686 cp->regs + REG_MIF_MASK);
687 writel(cfg, cp->regs + REG_MIF_CFG);
690 /* Must be invoked under cp->lock */
691 static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
697 int oldstate = cp->lstate;
698 int link_was_not_down = !(oldstate == link_down);
700 /* Setup link parameters */
703 lcntl = cp->link_cntl;
704 if (ep->autoneg == AUTONEG_ENABLE)
705 cp->link_cntl = BMCR_ANENABLE;
708 if (ep->speed == SPEED_100)
709 cp->link_cntl |= BMCR_SPEED100;
710 else if (ep->speed == SPEED_1000)
711 cp->link_cntl |= CAS_BMCR_SPEED1000;
712 if (ep->duplex == DUPLEX_FULL)
713 cp->link_cntl |= BMCR_FULLDPLX;
716 changed = (lcntl != cp->link_cntl);
719 if (cp->lstate == link_up) {
720 printk(KERN_INFO "%s: PCS link down.\n",
724 printk(KERN_INFO "%s: link configuration changed\n",
728 cp->lstate = link_down;
729 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
734 * WTZ: If the old state was link_up, we turn off the carrier
735 * to replicate everything we do elsewhere on a link-down
736 * event when we were already in a link-up state..
738 if (oldstate == link_up)
739 netif_carrier_off(cp->dev);
740 if (changed && link_was_not_down) {
742 * WTZ: This branch will simply schedule a full reset after
743 * we explicitly changed link modes in an ioctl. See if this
744 * fixes the link-problems we were having for forced mode.
746 atomic_inc(&cp->reset_task_pending);
747 atomic_inc(&cp->reset_task_pending_all);
748 schedule_work(&cp->reset_task);
750 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
754 if (cp->phy_type & CAS_PHY_SERDES) {
755 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
757 if (cp->link_cntl & BMCR_ANENABLE) {
758 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
759 cp->lstate = link_aneg;
761 if (cp->link_cntl & BMCR_FULLDPLX)
762 val |= PCS_MII_CTRL_DUPLEX;
763 val &= ~PCS_MII_AUTONEG_EN;
764 cp->lstate = link_force_ok;
766 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
767 writel(val, cp->regs + REG_PCS_MII_CTRL);
771 ctl = cas_phy_read(cp, MII_BMCR);
772 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
773 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
774 ctl |= cp->link_cntl;
775 if (ctl & BMCR_ANENABLE) {
776 ctl |= BMCR_ANRESTART;
777 cp->lstate = link_aneg;
779 cp->lstate = link_force_ok;
781 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
782 cas_phy_write(cp, MII_BMCR, ctl);
787 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
790 /* Must be invoked under cp->lock. */
791 static int cas_reset_mii_phy(struct cas *cp)
793 int limit = STOP_TRIES_PHY;
796 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
799 val = cas_phy_read(cp, MII_BMCR);
800 if ((val & BMCR_RESET) == 0)
807 static void cas_saturn_firmware_load(struct cas *cp)
809 cas_saturn_patch_t *patch = cas_saturn_patch;
811 cas_phy_powerdown(cp);
813 /* expanded memory access mode */
814 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
816 /* pointer configuration for new firmware */
817 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
818 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
819 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
820 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
821 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
822 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
823 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
824 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
826 /* download new firmware */
827 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
828 cas_phy_write(cp, DP83065_MII_REGE, patch->addr);
829 while (patch->addr) {
830 cas_phy_write(cp, DP83065_MII_REGD, patch->val);
834 /* enable firmware */
835 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
836 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
840 /* phy initialization */
841 static void cas_phy_init(struct cas *cp)
845 /* if we're in MII/GMII mode, set up phy */
846 if (CAS_PHY_MII(cp->phy_type)) {
847 writel(PCS_DATAPATH_MODE_MII,
848 cp->regs + REG_PCS_DATAPATH_MODE);
851 cas_reset_mii_phy(cp); /* take out of isolate mode */
853 if (PHY_LUCENT_B0 == cp->phy_id) {
854 /* workaround link up/down issue with lucent */
855 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
856 cas_phy_write(cp, MII_BMCR, 0x00f1);
857 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
859 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
860 /* workarounds for broadcom phy */
861 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
862 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
863 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
864 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
865 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
866 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
867 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
868 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
869 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
870 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
871 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
873 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
874 val = cas_phy_read(cp, BROADCOM_MII_REG4);
875 val = cas_phy_read(cp, BROADCOM_MII_REG4);
877 /* link workaround */
878 cas_phy_write(cp, BROADCOM_MII_REG4,
882 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
883 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
884 SATURN_PCFG_FSI : 0x0,
885 cp->regs + REG_SATURN_PCFG);
887 /* load firmware to address 10Mbps auto-negotiation
888 * issue. NOTE: this will need to be changed if the
889 * default firmware gets fixed.
891 if (PHY_NS_DP83065 == cp->phy_id) {
892 cas_saturn_firmware_load(cp);
897 /* advertise capabilities */
898 val = cas_phy_read(cp, MII_BMCR);
899 val &= ~BMCR_ANENABLE;
900 cas_phy_write(cp, MII_BMCR, val);
903 cas_phy_write(cp, MII_ADVERTISE,
904 cas_phy_read(cp, MII_ADVERTISE) |
905 (ADVERTISE_10HALF | ADVERTISE_10FULL |
906 ADVERTISE_100HALF | ADVERTISE_100FULL |
907 CAS_ADVERTISE_PAUSE |
908 CAS_ADVERTISE_ASYM_PAUSE));
910 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
911 /* make sure that we don't advertise half
912 * duplex to avoid a chip issue
914 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
915 val &= ~CAS_ADVERTISE_1000HALF;
916 val |= CAS_ADVERTISE_1000FULL;
917 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
921 /* reset pcs for serdes */
925 writel(PCS_DATAPATH_MODE_SERDES,
926 cp->regs + REG_PCS_DATAPATH_MODE);
928 /* enable serdes pins on saturn */
929 if (cp->cas_flags & CAS_FLAG_SATURN)
930 writel(0, cp->regs + REG_SATURN_PCFG);
932 /* Reset PCS unit. */
933 val = readl(cp->regs + REG_PCS_MII_CTRL);
934 val |= PCS_MII_RESET;
935 writel(val, cp->regs + REG_PCS_MII_CTRL);
938 while (limit-- > 0) {
940 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
945 printk(KERN_WARNING "%s: PCS reset bit would not "
946 "clear [%08x].\n", cp->dev->name,
947 readl(cp->regs + REG_PCS_STATE_MACHINE));
949 /* Make sure PCS is disabled while changing advertisement
952 writel(0x0, cp->regs + REG_PCS_CFG);
954 /* Advertise all capabilities except half-duplex. */
955 val = readl(cp->regs + REG_PCS_MII_ADVERT);
956 val &= ~PCS_MII_ADVERT_HD;
957 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
958 PCS_MII_ADVERT_ASYM_PAUSE);
959 writel(val, cp->regs + REG_PCS_MII_ADVERT);
962 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
964 /* pcs workaround: enable sync detect */
965 writel(PCS_SERDES_CTRL_SYNCD_EN,
966 cp->regs + REG_PCS_SERDES_CTRL);
971 static int cas_pcs_link_check(struct cas *cp)
973 u32 stat, state_machine;
976 /* The link status bit latches on zero, so you must
977 * read it twice in such a case to see a transition
978 * to the link being up.
980 stat = readl(cp->regs + REG_PCS_MII_STATUS);
981 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
982 stat = readl(cp->regs + REG_PCS_MII_STATUS);
984 /* The remote-fault indication is only valid
985 * when autoneg has completed.
987 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
988 PCS_MII_STATUS_REMOTE_FAULT)) ==
989 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
990 if (netif_msg_link(cp))
991 printk(KERN_INFO "%s: PCS RemoteFault\n",
995 /* work around link detection issue by querying the PCS state
998 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
999 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1000 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1001 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1002 stat |= PCS_MII_STATUS_LINK_STATUS;
1005 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1006 if (cp->lstate != link_up) {
1008 cp->lstate = link_up;
1009 cp->link_transition = LINK_TRANSITION_LINK_UP;
1011 cas_set_link_modes(cp);
1012 netif_carrier_on(cp->dev);
1015 } else if (cp->lstate == link_up) {
1016 cp->lstate = link_down;
1017 if (link_transition_timeout != 0 &&
1018 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1019 !cp->link_transition_jiffies_valid) {
1021 * force a reset, as a workaround for the
1022 * link-failure problem. May want to move this to a
1023 * point a bit earlier in the sequence. If we had
1024 * generated a reset a short time ago, we'll wait for
1025 * the link timer to check the status until a
1026 * timer expires (link_transistion_jiffies_valid is
1027 * true when the timer is running.) Instead of using
1028 * a system timer, we just do a check whenever the
1029 * link timer is running - this clears the flag after
1033 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1034 cp->link_transition_jiffies = jiffies;
1035 cp->link_transition_jiffies_valid = 1;
1037 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1039 netif_carrier_off(cp->dev);
1040 if (cp->opened && netif_msg_link(cp)) {
1041 printk(KERN_INFO "%s: PCS link down.\n",
1045 /* Cassini only: if you force a mode, there can be
1046 * sync problems on link down. to fix that, the following
1047 * things need to be checked:
1048 * 1) read serialink state register
1049 * 2) read pcs status register to verify link down.
1050 * 3) if link down and serial link == 0x03, then you need
1051 * to global reset the chip.
1053 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1054 /* should check to see if we're in a forced mode */
1055 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1059 } else if (cp->lstate == link_down) {
1060 if (link_transition_timeout != 0 &&
1061 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1062 !cp->link_transition_jiffies_valid) {
1063 /* force a reset, as a workaround for the
1064 * link-failure problem. May want to move
1065 * this to a point a bit earlier in the
1069 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1070 cp->link_transition_jiffies = jiffies;
1071 cp->link_transition_jiffies_valid = 1;
1073 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1080 static int cas_pcs_interrupt(struct net_device *dev,
1081 struct cas *cp, u32 status)
1083 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1085 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1087 return cas_pcs_link_check(cp);
1090 static int cas_txmac_interrupt(struct net_device *dev,
1091 struct cas *cp, u32 status)
1093 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1098 if (netif_msg_intr(cp))
1099 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
1100 cp->dev->name, txmac_stat);
1102 /* Defer timer expiration is quite normal,
1103 * don't even log the event.
1105 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1106 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1109 spin_lock(&cp->stat_lock[0]);
1110 if (txmac_stat & MAC_TX_UNDERRUN) {
1111 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
1113 cp->net_stats[0].tx_fifo_errors++;
1116 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1117 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
1119 cp->net_stats[0].tx_errors++;
1122 /* The rest are all cases of one of the 16-bit TX
1123 * counters expiring.
1125 if (txmac_stat & MAC_TX_COLL_NORMAL)
1126 cp->net_stats[0].collisions += 0x10000;
1128 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1129 cp->net_stats[0].tx_aborted_errors += 0x10000;
1130 cp->net_stats[0].collisions += 0x10000;
1133 if (txmac_stat & MAC_TX_COLL_LATE) {
1134 cp->net_stats[0].tx_aborted_errors += 0x10000;
1135 cp->net_stats[0].collisions += 0x10000;
1137 spin_unlock(&cp->stat_lock[0]);
1139 /* We do not keep track of MAC_TX_COLL_FIRST and
1140 * MAC_TX_PEAK_ATTEMPTS events.
1145 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1147 cas_hp_inst_t *inst;
1152 while ((inst = firmware) && inst->note) {
1153 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1155 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1156 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1157 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1159 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1160 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1161 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1162 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1163 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1164 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1165 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1166 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1168 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1169 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1170 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1171 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1172 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1178 static void cas_init_rx_dma(struct cas *cp)
1180 u64 desc_dma = cp->block_dvma;
1184 /* rx free descriptors */
1185 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1186 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1187 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1188 if ((N_RX_DESC_RINGS > 1) &&
1189 (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
1190 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1191 writel(val, cp->regs + REG_RX_CFG);
1193 val = (unsigned long) cp->init_rxds[0] -
1194 (unsigned long) cp->init_block;
1195 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1196 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1197 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1199 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1200 /* rx desc 2 is for IPSEC packets. however,
1201 * we don't it that for that purpose.
1203 val = (unsigned long) cp->init_rxds[1] -
1204 (unsigned long) cp->init_block;
1205 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1206 writel((desc_dma + val) & 0xffffffff, cp->regs +
1207 REG_PLUS_RX_DB1_LOW);
1208 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1212 /* rx completion registers */
1213 val = (unsigned long) cp->init_rxcs[0] -
1214 (unsigned long) cp->init_block;
1215 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1216 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1218 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1220 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1221 val = (unsigned long) cp->init_rxcs[i] -
1222 (unsigned long) cp->init_block;
1223 writel((desc_dma + val) >> 32, cp->regs +
1224 REG_PLUS_RX_CBN_HI(i));
1225 writel((desc_dma + val) & 0xffffffff, cp->regs +
1226 REG_PLUS_RX_CBN_LOW(i));
1230 /* read selective clear regs to prevent spurious interrupts
1231 * on reset because complete == kick.
1232 * selective clear set up to prevent interrupts on resets
1234 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1235 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1236 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1237 for (i = 1; i < N_RX_COMP_RINGS; i++)
1238 readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
1240 /* 2 is different from 3 and 4 */
1241 if (N_RX_COMP_RINGS > 1)
1242 writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
1243 cp->regs + REG_PLUS_ALIASN_CLEAR(1));
1245 for (i = 2; i < N_RX_COMP_RINGS; i++)
1246 writel(INTR_RX_DONE_ALT,
1247 cp->regs + REG_PLUS_ALIASN_CLEAR(i));
1250 /* set up pause thresholds */
1251 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1252 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1253 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1254 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1255 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1257 /* zero out dma reassembly buffers */
1258 for (i = 0; i < 64; i++) {
1259 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1260 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1261 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1262 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1265 /* make sure address register is 0 for normal operation */
1266 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1267 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1269 /* interrupt mitigation */
1271 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1272 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1273 writel(val, cp->regs + REG_RX_BLANK);
1275 writel(0x0, cp->regs + REG_RX_BLANK);
1278 /* interrupt generation as a function of low water marks for
1279 * free desc and completion entries. these are used to trigger
1280 * housekeeping for rx descs. we don't use the free interrupt
1281 * as it's not very useful
1283 /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
1284 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1285 writel(val, cp->regs + REG_RX_AE_THRESH);
1286 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1287 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1288 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1291 /* Random early detect registers. useful for congestion avoidance.
1292 * this should be tunable.
1294 writel(0x0, cp->regs + REG_RX_RED);
1296 /* receive page sizes. default == 2K (0x800) */
1298 if (cp->page_size == 0x1000)
1300 else if (cp->page_size == 0x2000)
1302 else if (cp->page_size == 0x4000)
1305 /* round mtu + offset. constrain to page size. */
1306 size = cp->dev->mtu + 64;
1307 if (size > cp->page_size)
1308 size = cp->page_size;
1312 else if (size <= 0x800)
1314 else if (size <= 0x1000)
1319 cp->mtu_stride = 1 << (i + 10);
1320 val = CAS_BASE(RX_PAGE_SIZE, val);
1321 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1322 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1323 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1324 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1326 /* enable the header parser if desired */
1327 if (CAS_HP_FIRMWARE == cas_prog_null)
1330 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1331 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1332 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1333 writel(val, cp->regs + REG_HP_CFG);
1336 static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1338 memset(rxc, 0, sizeof(*rxc));
1339 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1342 /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
1343 * flipping is protected by the fact that the chip will not
1344 * hand back the same page index while it's being processed.
1346 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1348 cas_page_t *page = cp->rx_pages[1][index];
1351 if (page_count(page->buffer) == 1)
1354 new = cas_page_dequeue(cp);
1356 spin_lock(&cp->rx_inuse_lock);
1357 list_add(&page->list, &cp->rx_inuse_list);
1358 spin_unlock(&cp->rx_inuse_lock);
1363 /* this needs to be changed if we actually use the ENC RX DESC ring */
1364 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1367 cas_page_t **page0 = cp->rx_pages[0];
1368 cas_page_t **page1 = cp->rx_pages[1];
1370 /* swap if buffer is in use */
1371 if (page_count(page0[index]->buffer) > 1) {
1372 cas_page_t *new = cas_page_spare(cp, index);
1374 page1[index] = page0[index];
1378 RX_USED_SET(page0[index], 0);
1379 return page0[index];
1382 static void cas_clean_rxds(struct cas *cp)
1384 /* only clean ring 0 as ring 1 is used for spare buffers */
1385 struct cas_rx_desc *rxd = cp->init_rxds[0];
1388 /* release all rx flows */
1389 for (i = 0; i < N_RX_FLOWS; i++) {
1390 struct sk_buff *skb;
1391 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1392 cas_skb_release(skb);
1396 /* initialize descriptors */
1397 size = RX_DESC_RINGN_SIZE(0);
1398 for (i = 0; i < size; i++) {
1399 cas_page_t *page = cas_page_swap(cp, 0, i);
1400 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1401 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1402 CAS_BASE(RX_INDEX_RING, 0));
1405 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1407 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1410 static void cas_clean_rxcs(struct cas *cp)
1414 /* take ownership of rx comp descriptors */
1415 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1416 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1417 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1418 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1419 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1420 cas_rxc_init(rxc + j);
1426 /* When we get a RX fifo overflow, the RX unit is probably hung
1427 * so we do the following.
1429 * If any part of the reset goes wrong, we return 1 and that causes the
1430 * whole chip to be reset.
1432 static int cas_rxmac_reset(struct cas *cp)
1434 struct net_device *dev = cp->dev;
1438 /* First, reset MAC RX. */
1439 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1440 for (limit = 0; limit < STOP_TRIES; limit++) {
1441 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1445 if (limit == STOP_TRIES) {
1446 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
1447 "chip.\n", dev->name);
1451 /* Second, disable RX DMA. */
1452 writel(0, cp->regs + REG_RX_CFG);
1453 for (limit = 0; limit < STOP_TRIES; limit++) {
1454 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1458 if (limit == STOP_TRIES) {
1459 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
1460 "chip.\n", dev->name);
1466 /* Execute RX reset command. */
1467 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1468 for (limit = 0; limit < STOP_TRIES; limit++) {
1469 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1473 if (limit == STOP_TRIES) {
1474 printk(KERN_ERR "%s: RX reset command will not execute, "
1475 "resetting whole chip.\n", dev->name);
1479 /* reset driver rx state */
1483 /* Now, reprogram the rest of RX unit. */
1484 cas_init_rx_dma(cp);
1487 val = readl(cp->regs + REG_RX_CFG);
1488 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1489 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1490 val = readl(cp->regs + REG_MAC_RX_CFG);
1491 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1496 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1499 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1504 if (netif_msg_intr(cp))
1505 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1506 cp->dev->name, stat);
1508 /* these are all rollovers */
1509 spin_lock(&cp->stat_lock[0]);
1510 if (stat & MAC_RX_ALIGN_ERR)
1511 cp->net_stats[0].rx_frame_errors += 0x10000;
1513 if (stat & MAC_RX_CRC_ERR)
1514 cp->net_stats[0].rx_crc_errors += 0x10000;
1516 if (stat & MAC_RX_LEN_ERR)
1517 cp->net_stats[0].rx_length_errors += 0x10000;
1519 if (stat & MAC_RX_OVERFLOW) {
1520 cp->net_stats[0].rx_over_errors++;
1521 cp->net_stats[0].rx_fifo_errors++;
1524 /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
1527 spin_unlock(&cp->stat_lock[0]);
1531 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1534 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1539 if (netif_msg_intr(cp))
1540 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
1541 cp->dev->name, stat);
1543 /* This interrupt is just for pause frame and pause
1544 * tracking. It is useful for diagnostics and debug
1545 * but probably by default we will mask these events.
1547 if (stat & MAC_CTRL_PAUSE_STATE)
1548 cp->pause_entered++;
1550 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1551 cp->pause_last_time_recvd = (stat >> 16);
1557 /* Must be invoked under cp->lock. */
1558 static inline int cas_mdio_link_not_up(struct cas *cp)
1562 switch (cp->lstate) {
1563 case link_force_ret:
1564 if (netif_msg_link(cp))
1565 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1566 " forced mode\n", cp->dev->name);
1567 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1568 cp->timer_ticks = 5;
1569 cp->lstate = link_force_ok;
1570 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1574 val = cas_phy_read(cp, MII_BMCR);
1576 /* Try forced modes. we try things in the following order:
1577 * 1000 full -> 100 full/half -> 10 half
1579 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1580 val |= BMCR_FULLDPLX;
1581 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1582 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1583 cas_phy_write(cp, MII_BMCR, val);
1584 cp->timer_ticks = 5;
1585 cp->lstate = link_force_try;
1586 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1589 case link_force_try:
1590 /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
1591 val = cas_phy_read(cp, MII_BMCR);
1592 cp->timer_ticks = 5;
1593 if (val & CAS_BMCR_SPEED1000) { /* gigabit */
1594 val &= ~CAS_BMCR_SPEED1000;
1595 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1596 cas_phy_write(cp, MII_BMCR, val);
1600 if (val & BMCR_SPEED100) {
1601 if (val & BMCR_FULLDPLX) /* fd failed */
1602 val &= ~BMCR_FULLDPLX;
1603 else { /* 100Mbps failed */
1604 val &= ~BMCR_SPEED100;
1606 cas_phy_write(cp, MII_BMCR, val);
1616 /* must be invoked with cp->lock held */
1617 static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1621 if (bmsr & BMSR_LSTATUS) {
1622 /* Ok, here we got a link. If we had it due to a forced
1623 * fallback, and we were configured for autoneg, we
1624 * retry a short autoneg pass. If you know your hub is
1625 * broken, use ethtool ;)
1627 if ((cp->lstate == link_force_try) &&
1628 (cp->link_cntl & BMCR_ANENABLE)) {
1629 cp->lstate = link_force_ret;
1630 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1631 cas_mif_poll(cp, 0);
1632 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1633 cp->timer_ticks = 5;
1634 if (cp->opened && netif_msg_link(cp))
1635 printk(KERN_INFO "%s: Got link after fallback, retrying"
1636 " autoneg once...\n", cp->dev->name);
1637 cas_phy_write(cp, MII_BMCR,
1638 cp->link_fcntl | BMCR_ANENABLE |
1640 cas_mif_poll(cp, 1);
1642 } else if (cp->lstate != link_up) {
1643 cp->lstate = link_up;
1644 cp->link_transition = LINK_TRANSITION_LINK_UP;
1647 cas_set_link_modes(cp);
1648 netif_carrier_on(cp->dev);
1654 /* link not up. if the link was previously up, we restart the
1658 if (cp->lstate == link_up) {
1659 cp->lstate = link_down;
1660 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1662 netif_carrier_off(cp->dev);
1663 if (cp->opened && netif_msg_link(cp))
1664 printk(KERN_INFO "%s: Link down\n",
1668 } else if (++cp->timer_ticks > 10)
1669 cas_mdio_link_not_up(cp);
1674 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1677 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1680 /* check for a link change */
1681 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1684 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1685 return cas_mii_link_check(cp, bmsr);
1688 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1691 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1696 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
1697 readl(cp->regs + REG_BIM_DIAG));
1699 /* cassini+ has this reserved */
1700 if ((stat & PCI_ERR_BADACK) &&
1701 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1702 printk("<No ACK64# during ABS64 cycle> ");
1704 if (stat & PCI_ERR_DTRTO)
1705 printk("<Delayed transaction timeout> ");
1706 if (stat & PCI_ERR_OTHER)
1708 if (stat & PCI_ERR_BIM_DMA_WRITE)
1709 printk("<BIM DMA 0 write req> ");
1710 if (stat & PCI_ERR_BIM_DMA_READ)
1711 printk("<BIM DMA 0 read req> ");
1714 if (stat & PCI_ERR_OTHER) {
1717 /* Interrogate PCI config space for the
1720 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1721 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
1723 if (cfg & PCI_STATUS_PARITY)
1724 printk(KERN_ERR "%s: PCI parity error detected.\n",
1726 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1727 printk(KERN_ERR "%s: PCI target abort.\n",
1729 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1730 printk(KERN_ERR "%s: PCI master acks target abort.\n",
1732 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1733 printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
1734 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1735 printk(KERN_ERR "%s: PCI system error SERR#.\n",
1737 if (cfg & PCI_STATUS_DETECTED_PARITY)
1738 printk(KERN_ERR "%s: PCI parity error.\n",
1741 /* Write the error bits back to clear them. */
1742 cfg &= (PCI_STATUS_PARITY |
1743 PCI_STATUS_SIG_TARGET_ABORT |
1744 PCI_STATUS_REC_TARGET_ABORT |
1745 PCI_STATUS_REC_MASTER_ABORT |
1746 PCI_STATUS_SIG_SYSTEM_ERROR |
1747 PCI_STATUS_DETECTED_PARITY);
1748 pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
1751 /* For all PCI errors, we should reset the chip. */
1755 /* All non-normal interrupt conditions get serviced here.
1756 * Returns non-zero if we should just exit the interrupt
1757 * handler right now (ie. if we reset the card which invalidates
1758 * all of the other original irq status bits).
1760 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1763 if (status & INTR_RX_TAG_ERROR) {
1764 /* corrupt RX tag framing */
1765 if (netif_msg_rx_err(cp))
1766 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
1768 spin_lock(&cp->stat_lock[0]);
1769 cp->net_stats[0].rx_errors++;
1770 spin_unlock(&cp->stat_lock[0]);
1774 if (status & INTR_RX_LEN_MISMATCH) {
1775 /* length mismatch. */
1776 if (netif_msg_rx_err(cp))
1777 printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
1779 spin_lock(&cp->stat_lock[0]);
1780 cp->net_stats[0].rx_errors++;
1781 spin_unlock(&cp->stat_lock[0]);
1785 if (status & INTR_PCS_STATUS) {
1786 if (cas_pcs_interrupt(dev, cp, status))
1790 if (status & INTR_TX_MAC_STATUS) {
1791 if (cas_txmac_interrupt(dev, cp, status))
1795 if (status & INTR_RX_MAC_STATUS) {
1796 if (cas_rxmac_interrupt(dev, cp, status))
1800 if (status & INTR_MAC_CTRL_STATUS) {
1801 if (cas_mac_interrupt(dev, cp, status))
1805 if (status & INTR_MIF_STATUS) {
1806 if (cas_mif_interrupt(dev, cp, status))
1810 if (status & INTR_PCI_ERROR_STATUS) {
1811 if (cas_pci_interrupt(dev, cp, status))
1818 atomic_inc(&cp->reset_task_pending);
1819 atomic_inc(&cp->reset_task_pending_all);
1820 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
1822 schedule_work(&cp->reset_task);
1824 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1825 printk(KERN_ERR "reset called in cas_abnormal_irq\n");
1826 schedule_work(&cp->reset_task);
1831 /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when
1832 * determining whether to do a netif_stop/wakeup
1834 #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1835 #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1836 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1839 unsigned long off = addr + len;
1841 if (CAS_TABORT(cp) == 1)
1843 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1845 return TX_TARGET_ABORT_LEN;
1848 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1850 struct cas_tx_desc *txds;
1851 struct sk_buff **skbs;
1852 struct net_device *dev = cp->dev;
1855 spin_lock(&cp->tx_lock[ring]);
1856 txds = cp->init_txds[ring];
1857 skbs = cp->tx_skbs[ring];
1858 entry = cp->tx_old[ring];
1860 count = TX_BUFF_COUNT(ring, entry, limit);
1861 while (entry != limit) {
1862 struct sk_buff *skb = skbs[entry];
1868 /* this should never occur */
1869 entry = TX_DESC_NEXT(ring, entry);
1873 /* however, we might get only a partial skb release. */
1874 count -= skb_shinfo(skb)->nr_frags +
1875 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1879 if (netif_msg_tx_done(cp))
1880 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
1881 cp->dev->name, ring, entry);
1884 cp->tx_tiny_use[ring][entry].nbufs = 0;
1886 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1887 struct cas_tx_desc *txd = txds + entry;
1889 daddr = le64_to_cpu(txd->buffer);
1890 dlen = CAS_VAL(TX_DESC_BUFLEN,
1891 le64_to_cpu(txd->control));
1892 pci_unmap_page(cp->pdev, daddr, dlen,
1894 entry = TX_DESC_NEXT(ring, entry);
1896 /* tiny buffer may follow */
1897 if (cp->tx_tiny_use[ring][entry].used) {
1898 cp->tx_tiny_use[ring][entry].used = 0;
1899 entry = TX_DESC_NEXT(ring, entry);
1903 spin_lock(&cp->stat_lock[ring]);
1904 cp->net_stats[ring].tx_packets++;
1905 cp->net_stats[ring].tx_bytes += skb->len;
1906 spin_unlock(&cp->stat_lock[ring]);
1907 dev_kfree_skb_irq(skb);
1909 cp->tx_old[ring] = entry;
1911 /* this is wrong for multiple tx rings. the net device needs
1912 * multiple queues for this to do the right thing. we wait
1913 * for 2*packets to be available when using tiny buffers
1915 if (netif_queue_stopped(dev) &&
1916 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1917 netif_wake_queue(dev);
1918 spin_unlock(&cp->tx_lock[ring]);
1921 static void cas_tx(struct net_device *dev, struct cas *cp,
1925 #ifdef USE_TX_COMPWB
1926 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1928 if (netif_msg_intr(cp))
1929 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n",
1930 cp->dev->name, status, compwb);
1931 /* process all the rings */
1932 for (ring = 0; ring < N_TX_RINGS; ring++) {
1933 #ifdef USE_TX_COMPWB
1934 /* use the completion writeback registers */
1935 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1936 CAS_VAL(TX_COMPWB_LSB, compwb);
1937 compwb = TX_COMPWB_NEXT(compwb);
1939 limit = readl(cp->regs + REG_TX_COMPN(ring));
1941 if (cp->tx_old[ring] != limit)
1942 cas_tx_ringN(cp, ring, limit);
1947 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1948 int entry, const u64 *words,
1949 struct sk_buff **skbref)
1951 int dlen, hlen, len, i, alloclen;
1952 int off, swivel = RX_SWIVEL_OFF_VAL;
1953 struct cas_page *page;
1954 struct sk_buff *skb;
1955 void *addr, *crcaddr;
1958 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1959 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1962 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1965 alloclen = max(hlen, RX_COPY_MIN);
1967 skb = dev_alloc_skb(alloclen + swivel + cp->crc_size);
1973 skb_reserve(skb, swivel);
1976 addr = crcaddr = NULL;
1977 if (hlen) { /* always copy header pages */
1978 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1979 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1980 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1984 if (!dlen) /* attach FCS */
1986 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
1987 PCI_DMA_FROMDEVICE);
1988 addr = cas_page_map(page->buffer);
1989 memcpy(p, addr + off, i);
1990 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
1991 PCI_DMA_FROMDEVICE);
1992 cas_page_unmap(addr);
1993 RX_USED_ADD(page, 0x100);
1999 if (alloclen < (hlen + dlen)) {
2000 skb_frag_t *frag = skb_shinfo(skb)->frags;
2002 /* normal or jumbo packets. we use frags */
2003 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2004 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2005 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2007 hlen = min(cp->page_size - off, dlen);
2009 if (netif_msg_rx_err(cp)) {
2010 printk(KERN_DEBUG "%s: rx page overflow: "
2011 "%d\n", cp->dev->name, hlen);
2013 dev_kfree_skb_irq(skb);
2017 if (i == dlen) /* attach FCS */
2019 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2020 PCI_DMA_FROMDEVICE);
2022 /* make sure we always copy a header */
2024 if (p == (char *) skb->data) { /* not split */
2025 addr = cas_page_map(page->buffer);
2026 memcpy(p, addr + off, RX_COPY_MIN);
2027 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2028 PCI_DMA_FROMDEVICE);
2029 cas_page_unmap(addr);
2031 swivel = RX_COPY_MIN;
2032 RX_USED_ADD(page, cp->mtu_stride);
2034 RX_USED_ADD(page, hlen);
2036 skb_put(skb, alloclen);
2038 skb_shinfo(skb)->nr_frags++;
2039 skb->data_len += hlen - swivel;
2040 skb->len += hlen - swivel;
2042 get_page(page->buffer);
2043 frag->page = page->buffer;
2044 frag->page_offset = off;
2045 frag->size = hlen - swivel;
2047 /* any more data? */
2048 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2052 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2053 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2054 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2055 hlen + cp->crc_size,
2056 PCI_DMA_FROMDEVICE);
2057 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2058 hlen + cp->crc_size,
2059 PCI_DMA_FROMDEVICE);
2061 skb_shinfo(skb)->nr_frags++;
2062 skb->data_len += hlen;
2066 get_page(page->buffer);
2067 frag->page = page->buffer;
2068 frag->page_offset = 0;
2070 RX_USED_ADD(page, hlen + cp->crc_size);
2074 addr = cas_page_map(page->buffer);
2075 crcaddr = addr + off + hlen;
2079 /* copying packet */
2083 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2084 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2085 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2086 hlen = min(cp->page_size - off, dlen);
2088 if (netif_msg_rx_err(cp)) {
2089 printk(KERN_DEBUG "%s: rx page overflow: "
2090 "%d\n", cp->dev->name, hlen);
2092 dev_kfree_skb_irq(skb);
2096 if (i == dlen) /* attach FCS */
2098 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
2099 PCI_DMA_FROMDEVICE);
2100 addr = cas_page_map(page->buffer);
2101 memcpy(p, addr + off, i);
2102 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
2103 PCI_DMA_FROMDEVICE);
2104 cas_page_unmap(addr);
2105 if (p == (char *) skb->data) /* not split */
2106 RX_USED_ADD(page, cp->mtu_stride);
2108 RX_USED_ADD(page, i);
2110 /* any more data? */
2111 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2113 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2114 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2115 pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
2116 dlen + cp->crc_size,
2117 PCI_DMA_FROMDEVICE);
2118 addr = cas_page_map(page->buffer);
2119 memcpy(p, addr, dlen + cp->crc_size);
2120 pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
2121 dlen + cp->crc_size,
2122 PCI_DMA_FROMDEVICE);
2123 cas_page_unmap(addr);
2124 RX_USED_ADD(page, dlen + cp->crc_size);
2129 crcaddr = skb->data + alloclen;
2131 skb_put(skb, alloclen);
2134 i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]);
2136 /* checksum includes FCS. strip it out. */
2137 i = csum_fold(csum_partial(crcaddr, cp->crc_size, i));
2139 cas_page_unmap(addr);
2141 skb->csum = ntohs(i ^ 0xffff);
2142 skb->ip_summed = CHECKSUM_HW;
2143 skb->protocol = eth_type_trans(skb, cp->dev);
2148 /* we can handle up to 64 rx flows at a time. we do the same thing
2149 * as nonreassm except that we batch up the buffers.
2150 * NOTE: we currently just treat each flow as a bunch of packets that
2151 * we pass up. a better way would be to coalesce the packets
2152 * into a jumbo packet. to do that, we need to do the following:
2153 * 1) the first packet will have a clean split between header and
2155 * 2) each time the next flow packet comes in, extend the
2156 * data length and merge the checksums.
2157 * 3) on flow release, fix up the header.
2158 * 4) make sure the higher layer doesn't care.
2159 * because packets get coalesced, we shouldn't run into fragment count
2162 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2163 struct sk_buff *skb)
2165 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2166 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2168 /* this is protected at a higher layer, so no need to
2169 * do any additional locking here. stick the buffer
2172 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow);
2173 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2174 while ((skb = __skb_dequeue(flow))) {
2175 cas_skb_release(skb);
2180 /* put rx descriptor back on ring. if a buffer is in use by a higher
2181 * layer, this will need to put in a replacement.
2183 static void cas_post_page(struct cas *cp, const int ring, const int index)
2188 entry = cp->rx_old[ring];
2190 new = cas_page_swap(cp, ring, index);
2191 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2192 cp->init_rxds[ring][entry].index =
2193 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2194 CAS_BASE(RX_INDEX_RING, ring));
2196 entry = RX_DESC_ENTRY(ring, entry + 1);
2197 cp->rx_old[ring] = entry;
2203 writel(entry, cp->regs + REG_RX_KICK);
2204 else if ((N_RX_DESC_RINGS > 1) &&
2205 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2206 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2210 /* only when things are bad */
2211 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2213 unsigned int entry, last, count, released;
2215 cas_page_t **page = cp->rx_pages[ring];
2217 entry = cp->rx_old[ring];
2219 if (netif_msg_intr(cp))
2220 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
2221 cp->dev->name, ring, entry);
2224 count = entry & 0x3;
2225 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2227 while (entry != last) {
2228 /* make a new buffer if it's still in use */
2229 if (page_count(page[entry]->buffer) > 1) {
2230 cas_page_t *new = cas_page_dequeue(cp);
2232 /* let the timer know that we need to
2235 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2236 if (!timer_pending(&cp->link_timer))
2237 mod_timer(&cp->link_timer, jiffies +
2238 CAS_LINK_FAST_TIMEOUT);
2239 cp->rx_old[ring] = entry;
2240 cp->rx_last[ring] = num ? num - released : 0;
2243 spin_lock(&cp->rx_inuse_lock);
2244 list_add(&page[entry]->list, &cp->rx_inuse_list);
2245 spin_unlock(&cp->rx_inuse_lock);
2246 cp->init_rxds[ring][entry].buffer =
2247 cpu_to_le64(new->dma_addr);
2257 entry = RX_DESC_ENTRY(ring, entry + 1);
2259 cp->rx_old[ring] = entry;
2265 writel(cluster, cp->regs + REG_RX_KICK);
2266 else if ((N_RX_DESC_RINGS > 1) &&
2267 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2268 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2273 /* process a completion ring. packets are set up in three basic ways:
2274 * small packets: should be copied header + data in single buffer.
2275 * large packets: header and data in a single buffer.
2276 * split packets: header in a separate buffer from data.
2277 * data may be in multiple pages. data may be > 256
2278 * bytes but in a single page.
2280 * NOTE: RX page posting is done in this routine as well. while there's
2281 * the capability of using multiple RX completion rings, it isn't
2282 * really worthwhile due to the fact that the page posting will
2283 * force serialization on the single descriptor ring.
2285 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2287 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2291 if (netif_msg_intr(cp))
2292 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
2293 cp->dev->name, ring,
2294 readl(cp->regs + REG_RX_COMP_HEAD),
2297 entry = cp->rx_new[ring];
2300 struct cas_rx_comp *rxc = rxcs + entry;
2301 struct sk_buff *skb;
2306 words[0] = le64_to_cpu(rxc->word1);
2307 words[1] = le64_to_cpu(rxc->word2);
2308 words[2] = le64_to_cpu(rxc->word3);
2309 words[3] = le64_to_cpu(rxc->word4);
2311 /* don't touch if still owned by hw */
2312 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2316 /* hw hasn't cleared the zero bit yet */
2317 if (words[3] & RX_COMP4_ZERO) {
2321 /* get info on the packet */
2322 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2323 spin_lock(&cp->stat_lock[ring]);
2324 cp->net_stats[ring].rx_errors++;
2325 if (words[3] & RX_COMP4_LEN_MISMATCH)
2326 cp->net_stats[ring].rx_length_errors++;
2327 if (words[3] & RX_COMP4_BAD)
2328 cp->net_stats[ring].rx_crc_errors++;
2329 spin_unlock(&cp->stat_lock[ring]);
2331 /* We'll just return it to Cassini. */
2333 spin_lock(&cp->stat_lock[ring]);
2334 ++cp->net_stats[ring].rx_dropped;
2335 spin_unlock(&cp->stat_lock[ring]);
2339 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2345 /* see if it's a flow re-assembly or not. the driver
2346 * itself handles release back up.
2348 if (RX_DONT_BATCH || (type == 0x2)) {
2349 /* non-reassm: these always get released */
2350 cas_skb_release(skb);
2352 cas_rx_flow_pkt(cp, words, skb);
2355 spin_lock(&cp->stat_lock[ring]);
2356 cp->net_stats[ring].rx_packets++;
2357 cp->net_stats[ring].rx_bytes += len;
2358 spin_unlock(&cp->stat_lock[ring]);
2359 cp->dev->last_rx = jiffies;
2364 /* should it be released? */
2365 if (words[0] & RX_COMP1_RELEASE_HDR) {
2366 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2367 dring = CAS_VAL(RX_INDEX_RING, i);
2368 i = CAS_VAL(RX_INDEX_NUM, i);
2369 cas_post_page(cp, dring, i);
2372 if (words[0] & RX_COMP1_RELEASE_DATA) {
2373 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2374 dring = CAS_VAL(RX_INDEX_RING, i);
2375 i = CAS_VAL(RX_INDEX_NUM, i);
2376 cas_post_page(cp, dring, i);
2379 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2380 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2381 dring = CAS_VAL(RX_INDEX_RING, i);
2382 i = CAS_VAL(RX_INDEX_NUM, i);
2383 cas_post_page(cp, dring, i);
2386 /* skip to the next entry */
2387 entry = RX_COMP_ENTRY(ring, entry + 1 +
2388 CAS_VAL(RX_COMP1_SKIP, words[0]));
2390 if (budget && (npackets >= budget))
2394 cp->rx_new[ring] = entry;
2397 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
2403 /* put completion entries back on the ring */
2404 static void cas_post_rxcs_ringN(struct net_device *dev,
2405 struct cas *cp, int ring)
2407 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2410 last = cp->rx_cur[ring];
2411 entry = cp->rx_new[ring];
2412 if (netif_msg_intr(cp))
2413 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
2414 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
2417 /* zero and re-mark descriptors */
2418 while (last != entry) {
2419 cas_rxc_init(rxc + last);
2420 last = RX_COMP_ENTRY(ring, last + 1);
2422 cp->rx_cur[ring] = last;
2425 writel(last, cp->regs + REG_RX_COMP_TAIL);
2426 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2427 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2432 /* cassini can use all four PCI interrupts for the completion ring.
2433 * rings 3 and 4 are identical
2435 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2436 static inline void cas_handle_irqN(struct net_device *dev,
2437 struct cas *cp, const u32 status,
2440 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2441 cas_post_rxcs_ringN(dev, cp, ring);
2444 static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs)
2446 struct net_device *dev = dev_id;
2447 struct cas *cp = netdev_priv(dev);
2448 unsigned long flags;
2450 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2452 /* check for shared irq */
2456 ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2457 spin_lock_irqsave(&cp->lock, flags);
2458 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2461 netif_rx_schedule(dev);
2463 cas_rx_ringN(cp, ring, 0);
2465 status &= ~INTR_RX_DONE_ALT;
2469 cas_handle_irqN(dev, cp, status, ring);
2470 spin_unlock_irqrestore(&cp->lock, flags);
2476 /* everything but rx packets */
2477 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2479 if (status & INTR_RX_BUF_UNAVAIL_1) {
2480 /* Frame arrived, no free RX buffers available.
2481 * NOTE: we can get this on a link transition. */
2482 cas_post_rxds_ringN(cp, 1, 0);
2483 spin_lock(&cp->stat_lock[1]);
2484 cp->net_stats[1].rx_dropped++;
2485 spin_unlock(&cp->stat_lock[1]);
2488 if (status & INTR_RX_BUF_AE_1)
2489 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2490 RX_AE_FREEN_VAL(1));
2492 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2493 cas_post_rxcs_ringN(cp, 1);
2496 /* ring 2 handles a few more events than 3 and 4 */
2497 static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs)
2499 struct net_device *dev = dev_id;
2500 struct cas *cp = netdev_priv(dev);
2501 unsigned long flags;
2502 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2504 /* check for shared interrupt */
2508 spin_lock_irqsave(&cp->lock, flags);
2509 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2512 netif_rx_schedule(dev);
2514 cas_rx_ringN(cp, 1, 0);
2516 status &= ~INTR_RX_DONE_ALT;
2519 cas_handle_irq1(cp, status);
2520 spin_unlock_irqrestore(&cp->lock, flags);
2525 static inline void cas_handle_irq(struct net_device *dev,
2526 struct cas *cp, const u32 status)
2528 /* housekeeping interrupts */
2529 if (status & INTR_ERROR_MASK)
2530 cas_abnormal_irq(dev, cp, status);
2532 if (status & INTR_RX_BUF_UNAVAIL) {
2533 /* Frame arrived, no free RX buffers available.
2534 * NOTE: we can get this on a link transition.
2536 cas_post_rxds_ringN(cp, 0, 0);
2537 spin_lock(&cp->stat_lock[0]);
2538 cp->net_stats[0].rx_dropped++;
2539 spin_unlock(&cp->stat_lock[0]);
2540 } else if (status & INTR_RX_BUF_AE) {
2541 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2542 RX_AE_FREEN_VAL(0));
2545 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2546 cas_post_rxcs_ringN(dev, cp, 0);
2549 static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2551 struct net_device *dev = dev_id;
2552 struct cas *cp = netdev_priv(dev);
2553 unsigned long flags;
2554 u32 status = readl(cp->regs + REG_INTR_STATUS);
2559 spin_lock_irqsave(&cp->lock, flags);
2560 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2561 cas_tx(dev, cp, status);
2562 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2565 if (status & INTR_RX_DONE) {
2568 netif_rx_schedule(dev);
2570 cas_rx_ringN(cp, 0, 0);
2572 status &= ~INTR_RX_DONE;
2576 cas_handle_irq(dev, cp, status);
2577 spin_unlock_irqrestore(&cp->lock, flags);
2583 static int cas_poll(struct net_device *dev, int *budget)
2585 struct cas *cp = netdev_priv(dev);
2586 int i, enable_intr, todo, credits;
2587 u32 status = readl(cp->regs + REG_INTR_STATUS);
2588 unsigned long flags;
2590 spin_lock_irqsave(&cp->lock, flags);
2591 cas_tx(dev, cp, status);
2592 spin_unlock_irqrestore(&cp->lock, flags);
2594 /* NAPI rx packets. we spread the credits across all of the
2597 todo = min(*budget, dev->quota);
2599 /* to make sure we're fair with the work we loop through each
2600 * ring N_RX_COMP_RING times with a request of
2601 * todo / N_RX_COMP_RINGS
2605 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2607 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2608 credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS);
2609 if (credits >= todo) {
2618 dev->quota -= credits;
2620 /* final rx completion */
2621 spin_lock_irqsave(&cp->lock, flags);
2623 cas_handle_irq(dev, cp, status);
2626 if (N_RX_COMP_RINGS > 1) {
2627 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2629 cas_handle_irq1(dev, cp, status);
2634 if (N_RX_COMP_RINGS > 2) {
2635 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2637 cas_handle_irqN(dev, cp, status, 2);
2642 if (N_RX_COMP_RINGS > 3) {
2643 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2645 cas_handle_irqN(dev, cp, status, 3);
2648 spin_unlock_irqrestore(&cp->lock, flags);
2650 netif_rx_complete(dev);
2651 cas_unmask_intr(cp);
2658 #ifdef CONFIG_NET_POLL_CONTROLLER
2659 static void cas_netpoll(struct net_device *dev)
2661 struct cas *cp = netdev_priv(dev);
2663 cas_disable_irq(cp, 0);
2664 cas_interrupt(cp->pdev->irq, dev, NULL);
2665 cas_enable_irq(cp, 0);
2668 if (N_RX_COMP_RINGS > 1) {
2669 /* cas_interrupt1(); */
2673 if (N_RX_COMP_RINGS > 2) {
2674 /* cas_interruptN(); */
2678 if (N_RX_COMP_RINGS > 3) {
2679 /* cas_interruptN(); */
2685 static void cas_tx_timeout(struct net_device *dev)
2687 struct cas *cp = netdev_priv(dev);
2689 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2690 if (!cp->hw_running) {
2691 printk("%s: hrm.. hw not running!\n", dev->name);
2695 printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
2696 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
2698 printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
2699 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
2701 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
2702 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2704 readl(cp->regs + REG_TX_CFG),
2705 readl(cp->regs + REG_MAC_TX_STATUS),
2706 readl(cp->regs + REG_MAC_TX_CFG),
2707 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2708 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2709 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2710 readl(cp->regs + REG_TX_SM_1),
2711 readl(cp->regs + REG_TX_SM_2));
2713 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
2715 readl(cp->regs + REG_RX_CFG),
2716 readl(cp->regs + REG_MAC_RX_STATUS),
2717 readl(cp->regs + REG_MAC_RX_CFG));
2719 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
2721 readl(cp->regs + REG_HP_STATE_MACHINE),
2722 readl(cp->regs + REG_HP_STATUS0),
2723 readl(cp->regs + REG_HP_STATUS1),
2724 readl(cp->regs + REG_HP_STATUS2));
2727 atomic_inc(&cp->reset_task_pending);
2728 atomic_inc(&cp->reset_task_pending_all);
2729 schedule_work(&cp->reset_task);
2731 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2732 schedule_work(&cp->reset_task);
2736 static inline int cas_intme(int ring, int entry)
2738 /* Algorithm: IRQ every 1/2 of descriptors. */
2739 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2745 static void cas_write_txd(struct cas *cp, int ring, int entry,
2746 dma_addr_t mapping, int len, u64 ctrl, int last)
2748 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2750 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2751 if (cas_intme(ring, entry))
2752 ctrl |= TX_DESC_INTME;
2754 ctrl |= TX_DESC_EOF;
2755 txd->control = cpu_to_le64(ctrl);
2756 txd->buffer = cpu_to_le64(mapping);
2759 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2762 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2765 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2766 const int entry, const int tentry)
2768 cp->tx_tiny_use[ring][tentry].nbufs++;
2769 cp->tx_tiny_use[ring][entry].used = 1;
2770 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2773 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2774 struct sk_buff *skb)
2776 struct net_device *dev = cp->dev;
2777 int entry, nr_frags, frag, tabort, tentry;
2779 unsigned long flags;
2783 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2785 /* This is a hard error, log it. */
2786 if (TX_BUFFS_AVAIL(cp, ring) <=
2787 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2788 netif_stop_queue(dev);
2789 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2790 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
2791 "queue awake!\n", dev->name);
2796 if (skb->ip_summed == CHECKSUM_HW) {
2797 u64 csum_start_off, csum_stuff_off;
2799 csum_start_off = (u64) (skb->h.raw - skb->data);
2800 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
2802 ctrl = TX_DESC_CSUM_EN |
2803 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2804 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2807 entry = cp->tx_new[ring];
2808 cp->tx_skbs[ring][entry] = skb;
2810 nr_frags = skb_shinfo(skb)->nr_frags;
2811 len = skb_headlen(skb);
2812 mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
2813 offset_in_page(skb->data), len,
2817 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2818 if (unlikely(tabort)) {
2819 /* NOTE: len is always > tabort */
2820 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2821 ctrl | TX_DESC_SOF, 0);
2822 entry = TX_DESC_NEXT(ring, entry);
2824 memcpy(tx_tiny_buf(cp, ring, entry), skb->data +
2825 len - tabort, tabort);
2826 mapping = tx_tiny_map(cp, ring, entry, tentry);
2827 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2830 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2831 TX_DESC_SOF, (nr_frags == 0));
2833 entry = TX_DESC_NEXT(ring, entry);
2835 for (frag = 0; frag < nr_frags; frag++) {
2836 skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2839 mapping = pci_map_page(cp->pdev, fragp->page,
2840 fragp->page_offset, len,
2843 tabort = cas_calc_tabort(cp, fragp->page_offset, len);
2844 if (unlikely(tabort)) {
2847 /* NOTE: len is always > tabort */
2848 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2850 entry = TX_DESC_NEXT(ring, entry);
2852 addr = cas_page_map(fragp->page);
2853 memcpy(tx_tiny_buf(cp, ring, entry),
2854 addr + fragp->page_offset + len - tabort,
2856 cas_page_unmap(addr);
2857 mapping = tx_tiny_map(cp, ring, entry, tentry);
2861 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2862 (frag + 1 == nr_frags));
2863 entry = TX_DESC_NEXT(ring, entry);
2866 cp->tx_new[ring] = entry;
2867 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2868 netif_stop_queue(dev);
2870 if (netif_msg_tx_queued(cp))
2871 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
2873 dev->name, ring, entry, skb->len,
2874 TX_BUFFS_AVAIL(cp, ring));
2875 writel(entry, cp->regs + REG_TX_KICKN(ring));
2876 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2880 static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2882 struct cas *cp = netdev_priv(dev);
2884 /* this is only used as a load-balancing hint, so it doesn't
2885 * need to be SMP safe
2889 skb = skb_padto(skb, cp->min_frame_size);
2893 /* XXX: we need some higher-level QoS hooks to steer packets to
2894 * individual queues.
2896 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2898 dev->trans_start = jiffies;
2902 static void cas_init_tx_dma(struct cas *cp)
2904 u64 desc_dma = cp->block_dvma;
2909 /* set up tx completion writeback registers. must be 8-byte aligned */
2910 #ifdef USE_TX_COMPWB
2911 off = offsetof(struct cas_init_block, tx_compwb);
2912 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2913 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2916 /* enable completion writebacks, enable paced mode,
2917 * disable read pipe, and disable pre-interrupt compwbs
2919 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2920 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2921 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2922 TX_CFG_INTR_COMPWB_DIS;
2924 /* write out tx ring info and tx desc bases */
2925 for (i = 0; i < MAX_TX_RINGS; i++) {
2926 off = (unsigned long) cp->init_txds[i] -
2927 (unsigned long) cp->init_block;
2929 val |= CAS_TX_RINGN_BASE(i);
2930 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2931 writel((desc_dma + off) & 0xffffffff, cp->regs +
2933 /* don't zero out the kick register here as the system
2937 writel(val, cp->regs + REG_TX_CFG);
2939 /* program max burst sizes. these numbers should be different
2943 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2944 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2945 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2946 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2948 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2949 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2950 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2951 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2955 /* Must be invoked under cp->lock. */
2956 static inline void cas_init_dma(struct cas *cp)
2958 cas_init_tx_dma(cp);
2959 cas_init_rx_dma(cp);
2962 /* Must be invoked under cp->lock. */
2963 static u32 cas_setup_multicast(struct cas *cp)
2968 if (cp->dev->flags & IFF_PROMISC) {
2969 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2971 } else if (cp->dev->flags & IFF_ALLMULTI) {
2972 for (i=0; i < 16; i++)
2973 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2974 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2979 struct dev_mc_list *dmi = cp->dev->mc_list;
2982 /* use the alternate mac address registers for the
2983 * first 15 multicast addresses
2985 for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
2987 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
2988 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
2989 writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
2992 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
2993 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2994 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
2995 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2996 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
2997 cp->regs + REG_MAC_ADDRN(i*3 + 2));
3001 /* use hw hash table for the next series of
3002 * multicast addresses
3004 memset(hash_table, 0, sizeof(hash_table));
3006 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
3008 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
3011 for (i=0; i < 16; i++)
3012 writel(hash_table[i], cp->regs +
3013 REG_MAC_HASH_TABLEN(i));
3014 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
3020 /* must be invoked under cp->stat_lock[N_TX_RINGS] */
3021 static void cas_clear_mac_err(struct cas *cp)
3023 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
3024 writel(0, cp->regs + REG_MAC_COLL_FIRST);
3025 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
3026 writel(0, cp->regs + REG_MAC_COLL_LATE);
3027 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
3028 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
3029 writel(0, cp->regs + REG_MAC_RECV_FRAME);
3030 writel(0, cp->regs + REG_MAC_LEN_ERR);
3031 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
3032 writel(0, cp->regs + REG_MAC_FCS_ERR);
3033 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
3037 static void cas_mac_reset(struct cas *cp)
3041 /* do both TX and RX reset */
3042 writel(0x1, cp->regs + REG_MAC_TX_RESET);
3043 writel(0x1, cp->regs + REG_MAC_RX_RESET);
3048 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
3056 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3061 if (readl(cp->regs + REG_MAC_TX_RESET) |
3062 readl(cp->regs + REG_MAC_RX_RESET))
3063 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
3064 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
3065 readl(cp->regs + REG_MAC_RX_RESET),
3066 readl(cp->regs + REG_MAC_STATE_MACHINE));
3070 /* Must be invoked under cp->lock. */
3071 static void cas_init_mac(struct cas *cp)
3073 unsigned char *e = &cp->dev->dev_addr[0];
3075 #ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE
3080 /* setup core arbitration weight register */
3081 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3083 /* XXX Use pci_dma_burst_advice() */
3084 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3085 /* set the infinite burst register for chips that don't have
3088 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3089 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3092 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3094 writel(0x00, cp->regs + REG_MAC_IPG0);
3095 writel(0x08, cp->regs + REG_MAC_IPG1);
3096 writel(0x04, cp->regs + REG_MAC_IPG2);
3098 /* change later for 802.3z */
3099 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3101 /* min frame + FCS */
3102 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3104 /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we
3105 * specify the maximum frame size to prevent RX tag errors on
3108 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3109 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3110 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3111 cp->regs + REG_MAC_FRAMESIZE_MAX);
3113 /* NOTE: crc_size is used as a surrogate for half-duplex.
3114 * workaround saturn half-duplex issue by increasing preamble
3117 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3118 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3120 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3121 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3122 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3123 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3125 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3127 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3128 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3129 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3130 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3131 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3133 /* setup mac address in perfect filter array */
3134 for (i = 0; i < 45; i++)
3135 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3137 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3138 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3139 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3141 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3142 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3143 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3145 #ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE
3146 cp->mac_rx_cfg = cas_setup_multicast(cp);
3148 /* WTZ: Do what Adrian did in cas_set_multicast. Doing
3149 * a writel does not seem to be necessary because Cassini
3150 * seems to preserve the configuration when we do the reset.
3151 * If the chip is in trouble, though, it is not clear if we
3152 * can really count on this behavior. cas_set_multicast uses
3153 * spin_lock_irqsave, but we are called only in cas_init_hw and
3154 * cas_init_hw is protected by cas_lock_all, which calls
3155 * spin_lock_irq (so it doesn't need to save the flags, and
3156 * we should be OK for the writel, as that is the only
3159 cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp);
3160 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
3162 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3163 cas_clear_mac_err(cp);
3164 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3166 /* Setup MAC interrupts. We want to get all of the interesting
3167 * counter expiration events, but we do not want to hear about
3168 * normal rx/tx as the DMA engine tells us that.
3170 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3171 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3173 /* Don't enable even the PAUSE interrupts for now, we
3174 * make no use of those events other than to record them.
3176 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3179 /* Must be invoked under cp->lock. */
3180 static void cas_init_pause_thresholds(struct cas *cp)
3182 /* Calculate pause thresholds. Setting the OFF threshold to the
3183 * full RX fifo size effectively disables PAUSE generation
3185 if (cp->rx_fifo_size <= (2 * 1024)) {
3186 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3188 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3189 if (max_frame * 3 > cp->rx_fifo_size) {
3190 cp->rx_pause_off = 7104;
3191 cp->rx_pause_on = 960;
3193 int off = (cp->rx_fifo_size - (max_frame * 2));
3194 int on = off - max_frame;
3195 cp->rx_pause_off = off;
3196 cp->rx_pause_on = on;
3201 static int cas_vpd_match(const void __iomem *p, const char *str)
3203 int len = strlen(str) + 1;
3206 for (i = 0; i < len; i++) {
3207 if (readb(p + i) != str[i])
3214 /* get the mac address by reading the vpd information in the rom.
3215 * also get the phy type and determine if there's an entropy generator.
3216 * NOTE: this is a bit convoluted for the following reasons:
3217 * 1) vpd info has order-dependent mac addresses for multinic cards
3218 * 2) the only way to determine the nic order is to use the slot
3220 * 3) fiber cards don't have bridges, so their slot numbers don't
3222 * 4) we don't actually know we have a fiber card until after
3223 * the mac addresses are parsed.
3225 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3228 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3229 void __iomem *base, *kstart;
3232 #define VPD_FOUND_MAC 0x01
3233 #define VPD_FOUND_PHY 0x02
3235 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3238 /* give us access to the PROM */
3239 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3240 cp->regs + REG_BIM_LOCAL_DEV_EN);
3242 /* check for an expansion rom */
3243 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3244 goto use_random_mac_addr;
3246 /* search for beginning of vpd */
3248 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3249 /* check for PCIR */
3250 if ((readb(p + i + 0) == 0x50) &&
3251 (readb(p + i + 1) == 0x43) &&
3252 (readb(p + i + 2) == 0x49) &&
3253 (readb(p + i + 3) == 0x52)) {
3254 base = p + (readb(p + i + 8) |