Merge branch 'master' of github.com:davem330/net
David S. Miller [Thu, 22 Sep 2011 07:23:13 +0000 (03:23 -0400)]
Conflicts:
MAINTAINERS
drivers/net/Kconfig
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/wireless/iwlwifi/iwl-pci.c
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/wl12xx/main.c

73 files changed:
1  2 
Documentation/networking/ip-sysctl.txt
MAINTAINERS
drivers/bcma/main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/aeroflex/greth.h
drivers/net/ethernet/amd/am79c961a.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/io.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/workarounds.h
drivers/net/ethernet/sun/cassini.c
drivers/net/ppp/ppp_generic.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/iwl-3945-rs.c
drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/wl12xx/acx.c
drivers/net/wireless/wl12xx/sdio.c
drivers/scsi/fcoe/fcoe.c
include/linux/skbuff.h
include/net/cfg80211.h
include/net/tcp.h
net/bridge/br_if.c
net/can/af_can.c
net/core/dev.c
net/core/fib_rules.c
net/core/neighbour.c
net/core/netpoll.c
net/core/skbuff.c
net/ipv4/igmp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/main.c
net/mac80211/sta_info.c
net/sctp/sm_statefuns.c
net/socket.c
net/wireless/core.c
net/wireless/reg.c

@@@ -992,7 -992,7 +992,7 @@@ bindv6only - BOOLEA
                TRUE: disable IPv4-mapped address feature
                FALSE: enable IPv4-mapped address feature
  
-       Default: FALSE (as specified in RFC2553bis)
+       Default: FALSE (as specified in RFC3493)
  
  IPv6 Fragmentation:
  
@@@ -1045,11 -1045,6 +1045,11 @@@ conf/interface/*
  accept_ra - BOOLEAN
        Accept Router Advertisements; autoconfigure using them.
  
 +      It also determines whether or not to transmit Router
 +      Solicitations. If and only if the functional setting is to
 +      accept Router Advertisements, Router Solicitations will be
 +      transmitted.
 +
        Possible values are:
                0 Do not accept Router Advertisements.
                1 Accept Router Advertisements if forwarding is disabled.
@@@ -1120,14 -1115,14 +1120,14 @@@ forwarding - BOOLEA
        Possible values are:
                0 Forwarding disabled
                1 Forwarding enabled
 -              2 Forwarding enabled (Hybrid Mode)
  
        FALSE (0):
  
        By default, Host behaviour is assumed.  This means:
  
        1. IsRouter flag is not set in Neighbour Advertisements.
 -      2. Router Solicitations are being sent when necessary.
 +      2. If accept_ra is TRUE (default), transmit Router
 +         Solicitations.
        3. If accept_ra is TRUE (default), accept Router
           Advertisements (and do autoconfiguration).
        4. If accept_redirects is TRUE (default), accept Redirects.
        This means exactly the reverse from the above:
  
        1. IsRouter flag is set in Neighbour Advertisements.
 -      2. Router Solicitations are not sent.
 +      2. Router Solicitations are not sent unless accept_ra is 2.
        3. Router Advertisements are ignored unless accept_ra is 2.
        4. Redirects are ignored.
  
 -      TRUE (2):
 -
 -      Hybrid mode. Same behaviour as TRUE, except for:
 -
 -      2. Router Solicitations are being sent when necessary.
 -
        Default: 0 (disabled) if global forwarding is disabled (default),
                 otherwise 1 (enabled).
  
diff --combined MAINTAINERS
@@@ -117,20 -117,20 +117,20 @@@ Maintainers List (try to look for most 
  M:    Philip Blundell <philb@gnu.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/3c505*
 +F:    drivers/net/ethernet/i825xx/3c505*
  
  3C59X NETWORK DRIVER
  M:    Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    Documentation/networking/vortex.txt
 -F:    drivers/net/3c59x.c
 +F:    drivers/net/ethernet/3com/3c59x.c
  
  3CR990 NETWORK DRIVER
  M:    David Dillow <dave@thedillows.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/typhoon*
 +F:    drivers/net/ethernet/3com/typhoon*
  
  3WARE SAS/SATA-RAID SCSI DRIVERS (3W-XXXX, 3W-9XXX, 3W-SAS)
  M:    Adam Radford <linuxraid@lsi.com>
@@@ -156,7 -156,7 +156,7 @@@ M: Realtek linux nic maintainers <nic_s
  M:    Francois Romieu <romieu@fr.zoreil.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/r8169.c
 +F:    drivers/net/ethernet/realtek/r8169.c
  
  8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
  M:    Greg Kroah-Hartman <gregkh@suse.de>
@@@ -170,7 -170,8 +170,7 @@@ F: include/linux/serial_8250.
  8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
  L:    netdev@vger.kernel.org
  S:    Orphan / Obsolete
 -F:    drivers/net/*8390*
 -F:    drivers/net/ax88796.c
 +F:    drivers/net/ethernet/8390/
  
  9P FILE SYSTEM
  M:    Eric Van Hensbergen <ericvh@gmail.com>
@@@ -213,7 -214,7 +213,7 @@@ ACENIC DRIVE
  M:    Jes Sorensen <jes@trained-monkey.org>
  L:    linux-acenic@sunsite.dk
  S:    Maintained
 -F:    drivers/net/acenic*
 +F:    drivers/net/ethernet/alteon/acenic*
  
  ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
  M:    Peter Feuerer <peter@piie.net>
@@@ -745,7 -746,7 +745,7 @@@ L: linux-arm-kernel@lists.infradead.or
  W:    http://www.arm.linux.org.uk/
  S:    Maintained
  F:    arch/arm/mach-ebsa110/
 -F:    drivers/net/arm/am79c961a.*
 +F:    drivers/net/ethernet/amd/am79c961a.*
  
  ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
  M:    Daniel Ribeiro <drwyrm@gmail.com>
@@@ -1014,8 -1015,7 +1014,8 @@@ F:      arch/arm/include/asm/hardware/ioc.
  F:    arch/arm/include/asm/hardware/iomd.h
  F:    arch/arm/include/asm/hardware/memc.h
  F:    arch/arm/mach-rpc/
 -F:    drivers/net/arm/ether*
 +F:    drivers/net/ethernet/i825xx/ether1*
 +F:    drivers/net/ethernet/seeq/ether3*
  F:    drivers/scsi/arm/
  
  ARM/SHARK MACHINE SUPPORT
@@@ -1127,7 -1127,7 +1127,7 @@@ F:      arch/arm/mach-nuc93x
  F:    drivers/input/keyboard/w90p910_keypad.c
  F:    drivers/input/touchscreen/w90p910_ts.c
  F:    drivers/watchdog/nuc900_wdt.c
 -F:    drivers/net/arm/w90p910_ether.c
 +F:    drivers/net/ethernet/nuvoton/w90p910_ether.c
  F:    drivers/mtd/nand/nuc900_nand.c
  F:    drivers/rtc/rtc-nuc900.c
  F:    drivers/spi/spi_nuc900.c
@@@ -1230,7 -1230,7 +1230,7 @@@ F:      Documentation/aoe
  F:    drivers/block/aoe/
  
  ATHEROS ATH GENERIC UTILITIES
 -M:    "Luis R. Rodriguez" <lrodriguez@atheros.com>
 +M:    "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
  L:    linux-wireless@vger.kernel.org
  S:    Supported
  F:    drivers/net/wireless/ath/*
  ATHEROS ATH5K WIRELESS DRIVER
  M:    Jiri Slaby <jirislaby@gmail.com>
  M:    Nick Kossifidis <mickflemm@gmail.com>
 -M:    "Luis R. Rodriguez" <lrodriguez@atheros.com>
 +M:    "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
  M:    Bob Copeland <me@bobcopeland.com>
  L:    linux-wireless@vger.kernel.org
  L:    ath5k-devel@lists.ath5k.org
@@@ -1246,19 -1246,11 +1246,19 @@@ W:   http://wireless.kernel.org/en/users/
  S:    Maintained
  F:    drivers/net/wireless/ath/ath5k/
  
 +ATHEROS ATH6KL WIRELESS DRIVER
 +M:    Kalle Valo <kvalo@qca.qualcomm.com>
 +L:    linux-wireless@vger.kernel.org
 +W:    http://wireless.kernel.org/en/users/Drivers/ath6kl
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
 +S:    Supported
 +F:    drivers/net/wireless/ath/ath6kl/
 +
  ATHEROS ATH9K WIRELESS DRIVER
 -M:    "Luis R. Rodriguez" <lrodriguez@atheros.com>
 -M:    Jouni Malinen <jmalinen@atheros.com>
 -M:    Vasanthakumar Thiagarajan <vasanth@atheros.com>
 -M:    Senthil Balasubramanian <senthilkumar@atheros.com>
 +M:    "Luis R. Rodriguez" <mcgrof@qca.qualcomm.com>
 +M:    Jouni Malinen <jouni@qca.qualcomm.com>
 +M:    Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
 +M:    Senthil Balasubramanian <senthilb@qca.qualcomm.com>
  L:    linux-wireless@vger.kernel.org
  L:    ath9k-devel@lists.ath9k.org
  W:    http://wireless.kernel.org/en/users/Drivers/ath9k
@@@ -1286,12 -1278,11 +1286,11 @@@ F:   drivers/input/misc/ati_remote2.
  ATLX ETHERNET DRIVERS
  M:    Jay Cliburn <jcliburn@gmail.com>
  M:    Chris Snook <chris.snook@gmail.com>
- M:    Jie Yang <yangjie@qca.qualcomm.com>
  L:    netdev@vger.kernel.org
  W:    http://sourceforge.net/projects/atl1
  W:    http://atl1.sourceforge.net
  S:    Maintained
 -F:    drivers/net/atlx/
 +F:    drivers/net/ethernet/atheros/
  
  ATM
  M:    Chas Williams <chas@cmf.nrl.navy.mil>
@@@ -1331,7 -1322,7 +1330,7 @@@ F:      include/video/atmel_lcdc.
  ATMEL MACB ETHERNET DRIVER
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  S:    Supported
 -F:    drivers/net/macb.*
 +F:    drivers/net/ethernet/cadence/
  
  ATMEL SPI DRIVER
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
@@@ -1454,7 -1445,7 +1453,7 @@@ BLACKFIN EMAC DRIVE
  L:    uclinux-dist-devel@blackfin.uclinux.org
  W:    http://blackfin.uclinux.org
  S:    Supported
 -F:    drivers/net/bfin_mac.*
 +F:    drivers/net/ethernet/adi/
  
  BLACKFIN RTC DRIVER
  M:    Mike Frysinger <vapier.adi@gmail.com>
@@@ -1535,27 -1526,27 +1534,27 @@@ BROADCOM B44 10/100 ETHERNET DRIVE
  M:    Gary Zambrano <zambrano@broadcom.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/b44.*
 +F:    drivers/net/ethernet/broadcom/b44.*
  
  BROADCOM BNX2 GIGABIT ETHERNET DRIVER
  M:    Michael Chan <mchan@broadcom.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/bnx2.*
 -F:    drivers/net/bnx2_*
 +F:    drivers/net/ethernet/broadcom/bnx2.*
 +F:    drivers/net/ethernet/broadcom/bnx2_*
  
  BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
  M:    Eilon Greenstein <eilong@broadcom.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/bnx2x/
 +F:    drivers/net/ethernet/broadcom/bnx2x/
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
  M:    Matt Carlson <mcarlson@broadcom.com>
  M:    Michael Chan <mchan@broadcom.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/tg3.*
 +F:    drivers/net/ethernet/broadcom/tg3.*
  
  BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
  M:    Brett Rudley <brudley@broadcom.com>
@@@ -1582,10 -1573,9 +1581,9 @@@ F:     drivers/scsi/bfa
  
  BROCADE BNA 10 GIGABIT ETHERNET DRIVER
  M:    Rasesh Mody <rmody@brocade.com>
- M:    Debashis Dutt <ddutt@brocade.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/bna/
 +F:    drivers/net/ethernet/brocade/bna/
  
  BSG (block layer generic sg v4 driver)
  M:    FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
@@@ -1766,17 -1756,16 +1764,16 @@@ F:   Documentation/zh_CN
  
  CISCO VIC ETHERNET NIC DRIVER
  M:    Christian Benvenuti <benve@cisco.com>
  M:    Roopa Prabhu <roprabhu@cisco.com>
  M:    David Wang <dwang2@cisco.com>
  S:    Supported
 -F:    drivers/net/enic/
 +F:    drivers/net/ethernet/cisco/enic/
  
  CIRRUS LOGIC EP93XX ETHERNET DRIVER
  M:    Hartley Sweeten <hsweeten@visionengravers.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/arm/ep93xx_eth.c
 +F:    drivers/net/ethernet/cirrus/ep93xx_eth.c
  
  CIRRUS LOGIC EP93XX OHCI USB HOST DRIVER
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -1891,7 -1880,7 +1888,7 @@@ S:      Maintaine
  F:    drivers/connector/
  
  CONTROL GROUPS (CGROUPS)
- M:    Paul Menage <menage@google.com>
+ M:    Paul Menage <paul@paulmenage.org>
  M:    Li Zefan <lizf@cn.fujitsu.com>
  L:    containers@lists.linux-foundation.org
  S:    Maintained
@@@ -1916,7 -1905,7 +1913,7 @@@ CPMAC ETHERNET DRIVE
  M:    Florian Fainelli <florian@openwrt.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/cpmac.c
 +F:    drivers/net/ethernet/ti/cpmac.c
  
  CPU FREQUENCY DRIVERS
  M:    Dave Jones <davej@redhat.com>
@@@ -1940,7 -1929,7 +1937,7 @@@ S:      Maintaine
  F:    tools/power/cpupower
  
  CPUSETS
- M:    Paul Menage <menage@google.com>
+ M:    Paul Menage <paul@paulmenage.org>
  W:    http://www.bullopensource.org/cpuset/
  W:    http://oss.sgi.com/projects/cpusets/
  S:    Supported
@@@ -2003,7 -1992,7 +2000,7 @@@ M:      Divy Le Ray <divy@chelsio.com
  L:    netdev@vger.kernel.org
  W:    http://www.chelsio.com
  S:    Supported
 -F:    drivers/net/cxgb3/
 +F:    drivers/net/ethernet/chelsio/cxgb3/
  
  CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
  M:    Steve Wise <swise@chelsio.com>
@@@ -2017,7 -2006,7 +2014,7 @@@ M:      Dimitris Michailidis <dm@chelsio.com
  L:    netdev@vger.kernel.org
  W:    http://www.chelsio.com
  S:    Supported
 -F:    drivers/net/cxgb4/
 +F:    drivers/net/ethernet/chelsio/cxgb4/
  
  CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
  M:    Steve Wise <swise@chelsio.com>
@@@ -2031,14 -2020,14 +2028,14 @@@ M:   Casey Leedom <leedom@chelsio.com
  L:    netdev@vger.kernel.org
  W:    http://www.chelsio.com
  S:    Supported
 -F:    drivers/net/cxgb4vf/
 +F:    drivers/net/ethernet/chelsio/cxgb4vf/
  
  STMMAC ETHERNET DRIVER
  M:    Giuseppe Cavallaro <peppe.cavallaro@st.com>
  L:    netdev@vger.kernel.org
  W:    http://www.stlinux.com
  S:    Supported
 -F:    drivers/net/stmmac/
 +F:    drivers/net/ethernet/stmicro/stmmac/
  
  CYBERPRO FB DRIVER
  M:    Russell King <linux@arm.linux.org.uk>
@@@ -2082,7 -2071,7 +2079,7 @@@ DAVICOM FAST ETHERNET (DMFE) NETWORK DR
  L:    netdev@vger.kernel.org
  S:    Orphan
  F:    Documentation/networking/dmfe.txt
 -F:    drivers/net/tulip/dmfe.c
 +F:    drivers/net/ethernet/tulip/dmfe.c
  
  DC390/AM53C974 SCSI driver
  M:    Kurt Garloff <garloff@suse.de>
@@@ -2121,7 -2110,7 +2118,7 @@@ F:      net/decnet
  DEFXX FDDI NETWORK DRIVER
  M:    "Maciej W. Rozycki" <macro@linux-mips.org>
  S:    Maintained
 -F:    drivers/net/defxx.*
 +F:    drivers/net/fddi/defxx.*
  
  DELL LAPTOP DRIVER
  M:    Matthew Garrett <mjg59@srcf.ucam.org>
@@@ -2474,7 -2463,7 +2471,7 @@@ EHEA (IBM pSeries eHEA 10Gb ethernet ad
  M:    Breno Leitao <leitao@linux.vnet.ibm.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/ehea/
 +F:    drivers/net/ethernet/ibm/ehea/
  
  EMBEDDED LINUX
  M:    Paul Gortmaker <paul.gortmaker@windriver.com>
@@@ -2519,7 -2508,7 +2516,7 @@@ ETHEREXPRESS-16 NETWORK DRIVE
  M:    Philip Blundell <philb@gnu.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/eexpress.*
 +F:    drivers/net/ethernet/i825xx/eexpress.*
  
  ETHERNET BRIDGE
  M:    Stephen Hemminger <shemminger@linux-foundation.org>
@@@ -2533,7 -2522,7 +2530,7 @@@ F:      net/bridge
  ETHERTEAM 16I DRIVER
  M:    Mika Kuoppala <miku@iki.fi>
  S:    Maintained
 -F:    drivers/net/eth16i.c
 +F:    drivers/net/ethernet/fujitsu/eth16i.c
  
  EXT2 FILE SYSTEM
  M:    Jan Kara <jack@suse.cz>
@@@ -2657,11 -2646,11 +2654,11 @@@ F:   drivers/net/wan/dlci.
  F:    drivers/net/wan/sdla.c
  
  FRAMEBUFFER LAYER
- M:    Paul Mundt <lethal@linux-sh.org>
+ M:    Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
  L:    linux-fbdev@vger.kernel.org
  W:    http://linux-fbdev.sourceforge.net/
  Q:    http://patchwork.kernel.org/project/linux-fbdev/list/
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
+ T:    git git://github.com/schandinat/linux-2.6.git fbdev-next
  S:    Maintained
  F:    Documentation/fb/
  F:    Documentation/devicetree/bindings/fb/
@@@ -2697,7 -2686,7 +2694,7 @@@ M:      Vitaly Bordug <vbordug@ru.mvista.com
  L:    linuxppc-dev@lists.ozlabs.org
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/fs_enet/
 +F:    drivers/net/ethernet/freescale/fs_enet/
  F:    include/linux/fs_enet_pd.h
  
  FREESCALE QUICC ENGINE LIBRARY
@@@ -2719,7 -2708,7 +2716,7 @@@ M:      Li Yang <leoli@freescale.com
  L:    netdev@vger.kernel.org
  L:    linuxppc-dev@lists.ozlabs.org
  S:    Maintained
 -F:    drivers/net/ucc_geth*
 +F:    drivers/net/ethernet/freescale/ucc_geth*
  
  FREESCALE QUICC ENGINE UCC UART DRIVER
  M:    Timur Tabi <timur@freescale.com>
@@@ -3057,7 -3046,6 +3054,7 @@@ S:      Maintaine
  F:    include/linux/hippidevice.h
  F:    include/linux/if_hippi.h
  F:    net/802/hippi.c
 +F:    drivers/net/hippi/
  
  HOST AP DRIVER
  M:    Jouni Malinen <j@w1.fi>
@@@ -3075,7 -3063,7 +3072,7 @@@ F:      drivers/platform/x86/tc1100-wmi.
  HP100:        Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
  M:    Jaroslav Kysela <perex@perex.cz>
  S:    Maintained
 -F:    drivers/net/hp100.*
 +F:    drivers/net/ethernet/hp/hp100.*
  
  HPET: High Precision Event Timers driver
  M:    Clemens Ladisch <clemens@ladisch.de>
@@@ -3173,7 -3161,7 +3170,7 @@@ IBM Power Virtual Ethernet Device Drive
  M:    Santiago Leon <santil@linux.vnet.ibm.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/ibmveth.*
 +F:    drivers/net/ethernet/ibm/ibmveth.*
  
  IBM ServeRAID RAID DRIVER
  P:    Jack Hammer
@@@ -3271,6 -3259,17 +3268,17 @@@ F:    Documentation/input/multi-touch-prot
  F:    drivers/input/input-mt.c
  K:    \b(ABS|SYN)_MT_
  
+ INTEL C600 SERIES SAS CONTROLLER DRIVER
+ M:    Intel SCU Linux support <intel-linux-scu@intel.com>
+ M:    Dan Williams <dan.j.williams@intel.com>
+ M:    Dave Jiang <dave.jiang@intel.com>
+ M:    Ed Nadolski <edmund.nadolski@intel.com>
+ L:    linux-scsi@vger.kernel.org
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git
+ S:    Maintained
+ F:    drivers/scsi/isci/
+ F:    firmware/isci/
  INTEL IDLE DRIVER
  M:    Len Brown <lenb@kernel.org>
  L:    linux-pm@lists.linux-foundation.org
@@@ -3329,7 -3328,7 +3337,7 @@@ F:      arch/arm/mach-ixp4xx/include/mach/qm
  F:    arch/arm/mach-ixp4xx/include/mach/npe.h
  F:    arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
  F:    arch/arm/mach-ixp4xx/ixp4xx_npe.c
 -F:    drivers/net/arm/ixp4xx_eth.c
 +F:    drivers/net/ethernet/xscale/ixp4xx_eth.c
  F:    drivers/net/wan/ixp4xx_hss.c
  
  INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
@@@ -3341,7 -3340,7 +3349,7 @@@ INTEL IXP2000 ETHERNET DRIVE
  M:    Lennert Buytenhek <kernel@wantstofly.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/ixp2000/
 +F:    drivers/net/ethernet/xscale/ixp2000/
  
  INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
  M:    Jeff Kirsher <jeffrey.t.kirsher@intel.com>
@@@ -3350,13 -3349,13 +3358,13 @@@ M:   Bruce Allan <bruce.w.allan@intel.com
  M:    Carolyn Wyborny <carolyn.wyborny@intel.com>
  M:    Don Skidmore <donald.c.skidmore@intel.com>
  M:    Greg Rose <gregory.v.rose@intel.com>
 -M:    PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com>
 +M:    Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
  M:    Alex Duyck <alexander.h.duyck@intel.com>
  M:    John Ronciak <john.ronciak@intel.com>
  L:    e1000-devel@lists.sourceforge.net
  W:    http://e1000.sourceforge.net/
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
  S:    Supported
  F:    Documentation/networking/e100.txt
  F:    Documentation/networking/e1000.txt
@@@ -3366,7 -3365,14 +3374,7 @@@ F:     Documentation/networking/igbvf.tx
  F:    Documentation/networking/ixgb.txt
  F:    Documentation/networking/ixgbe.txt
  F:    Documentation/networking/ixgbevf.txt
 -F:    drivers/net/e100.c
 -F:    drivers/net/e1000/
 -F:    drivers/net/e1000e/
 -F:    drivers/net/igb/
 -F:    drivers/net/igbvf/
 -F:    drivers/net/ixgb/
 -F:    drivers/net/ixgbe/
 -F:    drivers/net/ixgbevf/
 +F:    drivers/net/ethernet/intel/
  
  INTEL MRST PMU DRIVER
  M:    Len Brown <len.brown@intel.com>
@@@ -3434,7 -3440,7 +3442,7 @@@ IOC3 ETHERNET DRIVE
  M:    Ralf Baechle <ralf@linux-mips.org>
  L:    linux-mips@linux-mips.org
  S:    Maintained
 -F:    drivers/net/ioc3-eth.c
 +F:    drivers/net/ethernet/sgi/ioc3-eth.c
  
  IOC3 SERIAL DRIVER
  M:    Pat Gefre <pfg@sgi.com>
@@@ -3452,7 -3458,7 +3460,7 @@@ M:      Francois Romieu <romieu@fr.zoreil.co
  M:    Sorbica Shieh <sorbica@icplus.com.tw>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/ipg.*
 +F:    drivers/net/ethernet/icplus/ipg.*
  
  IPATH DRIVER
  M:    Mike Marciniszyn <infinipath@qlogic.com>
@@@ -3600,7 -3606,7 +3608,7 @@@ JME NETWORK DRIVE
  M:    Guo-Fu Tseng <cooldavid@cooldavid.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/jme.*
 +F:    drivers/net/ethernet/jme.*
  
  JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
  M:    David Woodhouse <dwmw2@infradead.org>
@@@ -4131,7 -4137,7 +4139,7 @@@ MARVELL MV643XX ETHERNET DRIVE
  M:    Lennert Buytenhek <buytenh@wantstofly.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/mv643xx_eth.*
 +F:    drivers/net/ethernet/marvell/mv643xx_eth.*
  F:    include/linux/mv643xx.h
  
  MARVELL MWIFIEX WIRELESS DRIVER
@@@ -4345,12 -4351,12 +4353,12 @@@ M:   Andrew Gallatin <gallatin@myri.com
  L:    netdev@vger.kernel.org
  W:    http://www.myri.com/scs/download-Myri10GE.html
  S:    Supported
 -F:    drivers/net/myri10ge/
 +F:    drivers/net/ethernet/myricom/myri10ge/
  
  NATSEMI ETHERNET DRIVER (DP8381x)
  M:    Tim Hockin <thockin@hockin.org>
  S:    Maintained
 -F:    drivers/net/natsemi.c
 +F:    drivers/net/ethernet/natsemi/natsemi.c
  
  NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
  M:    Daniel Mack <zonque@gmail.com>
@@@ -4390,8 -4396,9 +4398,8 @@@ W:      http://trac.neterion.com/cgi-bin/tra
  W:    http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
  S:    Supported
  F:    Documentation/networking/s2io.txt
 -F:    drivers/net/s2io*
  F:    Documentation/networking/vxge.txt
 -F:    drivers/net/vxge/
 +F:    drivers/net/ethernet/neterion/
  
  NETFILTER/IPTABLES/IPCHAINS
  P:    Rusty Russell
@@@ -4405,7 -4412,8 +4413,8 @@@ L:      netfilter@vger.kernel.or
  L:    coreteam@netfilter.org
  W:    http://www.netfilter.org/
  W:    http://www.iptables.org/
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
  S:    Supported
  F:    include/linux/netfilter*
  F:    include/linux/netfilter/
@@@ -4451,8 -4459,8 +4460,8 @@@ M:      "David S. Miller" <davem@davemloft.n
  L:    netdev@vger.kernel.org
  W:    http://www.linuxfoundation.org/en/Net
  W:    http://patchwork.ozlabs.org/project/netdev/list/
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
  S:    Maintained
  F:    net/
  F:    include/net/
@@@ -4504,23 -4512,11 +4513,23 @@@ F:   include/linux/if_
  F:    include/linux/*device.h
  
  NETXEN (1/10) GbE SUPPORT
 -M:    Amit Kumar Salecha <amit.salecha@qlogic.com>
 +M:    Sony Chacko <sony.chacko@qlogic.com>
 +M:    Rajesh Borundia <rajesh.borundia@qlogic.com>
  L:    netdev@vger.kernel.org
  W:    http://www.qlogic.com
  S:    Supported
 -F:    drivers/net/netxen/
 +F:    drivers/net/ethernet/qlogic/netxen/
 +
 +NFC SUBSYSTEM
 +M:    Lauro Ramos Venancio <lauro.venancio@openbossa.org>
 +M:    Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
 +M:    Samuel Ortiz <sameo@linux.intel.com>
 +L:    linux-wireless@vger.kernel.org
 +S:    Maintained
 +F:    net/nfc/
 +F:    include/linux/nfc.h
 +F:    include/net/nfc.h
 +F:    drivers/nfc/
  
  NFS, SUNRPC, AND LOCKD CLIENTS
  M:    Trond Myklebust <Trond.Myklebust@netapp.com>
@@@ -4541,7 -4537,7 +4550,7 @@@ M:      Jan-Pascal van Best <janpascal@vanbe
  M:    Andreas Mohr <andi@lisas.de>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/ni5010.*
 +F:    drivers/net/ethernet/racal/ni5010.*
  
  NILFS2 FILESYSTEM
  M:    KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp>
@@@ -4617,7 -4613,7 +4626,7 @@@ F:      arch/arm/mach-omap2/clockdomain2xxx_
  F:    arch/arm/mach-omap2/clockdomain44xx.c
  
  OMAP AUDIO SUPPORT
- M:    Jarkko Nikula <jhnikula@gmail.com>
+ M:    Jarkko Nikula <jarkko.nikula@bitmer.com>
  L:    alsa-devel@alsa-project.org (subscribers-only)
  L:    linux-omap@vger.kernel.org
  S:    Maintained
@@@ -4787,7 -4783,7 +4796,7 @@@ F:      drivers/net/wireless/orinoco
  
  OSD LIBRARY and FILESYSTEM
  M:    Boaz Harrosh <bharrosh@panasas.com>
- M:    Benny Halevy <bhalevy@panasas.com>
+ M:    Benny Halevy <bhalevy@tonian.com>
  L:    osd-dev@open-osd.org
  W:    http://open-osd.org
  T:    git git://git.open-osd.org/open-osd.git
@@@ -4807,7 -4803,7 +4816,7 @@@ PA SEMI ETHERNET DRIVE
  M:    Olof Johansson <olof@lixom.net>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/pasemi_mac.*
 +F:    drivers/net/ethernet/pasemi/*
  
  PA SEMI SMBUS DRIVER
  M:    Olof Johansson <olof@lixom.net>
@@@ -4954,7 -4950,7 +4963,7 @@@ PCNET32 NETWORK DRIVE
  M:    Don Fry <pcnet32@frontier.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/pcnet32.c
 +F:    drivers/net/ethernet/amd/pcnet32.c
  
  PCRYPT PARALLEL CRYPTO ENGINE
  M:    Steffen Klassert <steffen.klassert@secunet.com>
@@@ -4984,7 -4980,7 +4993,7 @@@ M:      Paul Mackerras <paulus@samba.org
  M:    Ingo Molnar <mingo@elte.hu>
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  S:    Supported
- F:    kernel/perf_event*.c
+ F:    kernel/events/*
  F:    include/linux/perf_event.h
  F:    arch/*/kernel/perf_event*.c
  F:    arch/*/kernel/*/perf_event*.c
@@@ -5086,7 -5082,7 +5095,7 @@@ PPP PROTOCOL DRIVERS AND COMPRESSOR
  M:    Paul Mackerras <paulus@samba.org>
  L:    linux-ppp@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/ppp_*
 +F:    drivers/net/ppp/ppp_*
  
  PPP OVER ATM (RFC 2364)
  M:    Mitchell Blank Jr <mitch@sfgoth.com>
@@@ -5097,8 -5093,8 +5106,8 @@@ F:      include/linux/atmppp.
  PPP OVER ETHERNET
  M:    Michal Ostrowski <mostrows@earthlink.net>
  S:    Maintained
 -F:    drivers/net/pppoe.c
 -F:    drivers/net/pppox.c
 +F:    drivers/net/ppp/pppoe.c
 +F:    drivers/net/ppp/pppox.c
  
  PPP OVER L2TP
  M:    James Chapman <jchapman@katalix.com>
@@@ -5119,7 -5115,7 +5128,7 @@@ PPTP DRIVE
  M:    Dmitry Kozlov <xeb@mail.ru>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/pptp.c
 +F:    drivers/net/ppp/pptp.c
  W:    http://sourceforge.net/projects/accel-pptp
  
  PREEMPTIBLE KERNEL
@@@ -5148,7 -5144,7 +5157,7 @@@ M:      Geoff Levand <geoff@infradead.org
  L:    netdev@vger.kernel.org
  L:    cbe-oss-dev@lists.ozlabs.org
  S:    Maintained
 -F:    drivers/net/ps3_gelic_net.*
 +F:    drivers/net/ethernet/toshiba/ps3_gelic_net.*
  
  PS3 PLATFORM SUPPORT
  M:    Geoff Levand <geoff@infradead.org>
@@@ -5266,24 -5262,23 +5275,24 @@@ M:   linux-driver@qlogic.co
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    Documentation/networking/LICENSE.qla3xxx
 -F:    drivers/net/qla3xxx.*
 +F:    drivers/net/ethernet/qlogic/qla3xxx.*
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
 -M:    Amit Kumar Salecha <amit.salecha@qlogic.com>
  M:    Anirban Chakraborty <anirban.chakraborty@qlogic.com>
 +M:    Sony Chacko <sony.chacko@qlogic.com>
  M:    linux-driver@qlogic.com
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/qlcnic/
 +F:    drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
 +M:    Anirban Chakraborty <anirban.chakraborty@qlogic.com>
  M:    Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
  M:    Ron Mercer <ron.mercer@qlogic.com>
  M:    linux-driver@qlogic.com
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/qlge/
 +F:    drivers/net/ethernet/qlogic/qlge/
  
  QNX4 FILESYSTEM
  M:    Anders Larsen <al@alarsen.net>
@@@ -5365,7 -5360,7 +5374,7 @@@ RDC R6040 FAST ETHERNET DRIVE
  M:    Florian Fainelli <florian@openwrt.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/r6040.c
 +F:    drivers/net/ethernet/rdc/r6040.c
  
  RDS - RELIABLE DATAGRAM SOCKETS
  M:    Andy Grover <andy.grover@oracle.com>
@@@ -5546,6 -5541,7 +5555,7 @@@ F:      include/media/*7146
  
  SAMSUNG AUDIO (ASoC) DRIVERS
  M:    Jassi Brar <jassisinghbrar@gmail.com>
+ M:    Sangbeom Kim <sbkim73@samsung.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Supported
  F:    sound/soc/samsung
@@@ -5768,7 -5764,7 +5778,7 @@@ M:      Ajit Khaparde <ajit.khaparde@emulex.
  L:    netdev@vger.kernel.org
  W:    http://www.emulex.com
  S:    Supported
 -F:    drivers/net/benet/
 +F:    drivers/net/ethernet/emulex/benet/
  
  SFC NETWORK DRIVER
  M:    Solarflare linux maintainers <linux-net-drivers@solarflare.com>
@@@ -5776,7 -5772,7 +5786,7 @@@ M:      Steve Hodgson <shodgson@solarflare.c
  M:    Ben Hutchings <bhutchings@solarflare.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/sfc/
 +F:    drivers/net/ethernet/sfc/
  
  SGI GRU DRIVER
  M:    Jack Steiner <steiner@sgi.com>
@@@ -5842,14 -5838,14 +5852,14 @@@ SIS 190 ETHERNET DRIVE
  M:    Francois Romieu <romieu@fr.zoreil.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/sis190.c
 +F:    drivers/net/ethernet/sis/sis190.c
  
  SIS 900/7016 FAST ETHERNET DRIVER
  M:    Daniele Venzano <venza@brownhat.org>
  W:    http://www.brownhat.org/sis900.html
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/sis900.*
 +F:    drivers/net/ethernet/sis/sis900.*
  
  SIS 96X I2C/SMBUS DRIVER
  M:    "Mark M. Hoffman" <mhoffman@lightlink.com>
@@@ -5876,7 -5872,8 +5886,7 @@@ SKGE, SKY2 10/100/1000 GIGABIT ETHERNE
  M:    Stephen Hemminger <shemminger@linux-foundation.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/skge.*
 -F:    drivers/net/sky2.*
 +F:    drivers/net/ethernet/marvell/sk*
  
  SLAB ALLOCATOR
  M:    Christoph Lameter <cl@linux-foundation.org>
@@@ -5890,7 -5887,7 +5900,7 @@@ F:      mm/sl?b.
  SMC91x ETHERNET DRIVER
  M:    Nicolas Pitre <nico@fluxnic.net>
  S:    Odd Fixes
 -F:    drivers/net/smc91x.*
 +F:    drivers/net/ethernet/smsc/smc91x.*
  
  SMM665 HARDWARE MONITOR DRIVER
  M:    Guenter Roeck <linux@roeck-us.net>
@@@ -5925,13 -5922,13 +5935,13 @@@ M:   Steve Glendinning <steve.glendinning
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    include/linux/smsc911x.h
 -F:    drivers/net/smsc911x.*
 +F:    drivers/net/ethernet/smsc/smsc911x.*
  
  SMSC9420 PCI ETHERNET DRIVER
  M:    Steve Glendinning <steve.glendinning@smsc.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/smsc9420.*
 +F:    drivers/net/ethernet/smsc/smsc9420.*
  
  SN-IA64 (Itanium) SUB-PLATFORM
  M:    Jes Sorensen <jes@sgi.com>
@@@ -5965,7 -5962,7 +5975,7 @@@ SONIC NETWORK DRIVE
  M:    Thomas Bogendoerfer <tsbogend@alpha.franken.de>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/sonic.*
 +F:    drivers/net/ethernet/natsemi/sonic.*
  
  SONICS SILICON BACKPLANE DRIVER (SSB)
  M:    Michael Buesch <m@bues.ch>
@@@ -6106,7 -6103,7 +6116,7 @@@ M:      Jens Osterkamp <jens@de.ibm.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    Documentation/networking/spider_net.txt
 -F:    drivers/net/spider_net*
 +F:    drivers/net/ethernet/toshiba/spider_net*
  
  SPU FILE SYSTEM
  M:    Jeremy Kerr <jk@ozlabs.org>
@@@ -6153,6 -6150,12 +6163,6 @@@ M:     Jakub Schmidtke <sjakub@gmail.com
  S:    Odd Fixes
  F:    drivers/staging/asus_oled/
  
 -STAGING - ATHEROS ATH6KL WIRELESS DRIVER
 -M:    Luis R. Rodriguez <mcgrof@gmail.com>
 -M:    Naveen Singh <nsingh@atheros.com>
 -S:    Odd Fixes
 -F:    drivers/staging/ath6kl/
 -
  STAGING - COMEDI
  M:    Ian Abbott <abbotti@mev.co.uk>
  M:    Mori Hess <fmhess@users.sourceforge.net>
@@@ -6278,7 -6281,7 +6288,7 @@@ F:      drivers/staging/xgifb
  STARFIRE/DURALAN NETWORK DRIVER
  M:    Ion Badulescu <ionut@badula.org>
  S:    Odd Fixes
 -F:    drivers/net/starfire*
 +F:    drivers/net/ethernet/adaptec/starfire*
  
  SUN3/3X
  M:    Sam Creasey <sammy@sammy.net>
@@@ -6287,7 -6290,6 +6297,7 @@@ S:      Maintaine
  F:    arch/m68k/kernel/*sun3*
  F:    arch/m68k/sun3*/
  F:    arch/m68k/include/asm/sun3*
 +F:    drivers/net/ethernet/i825xx/sun3*
  
  SUPERH
  M:    Paul Mundt <lethal@linux-sh.org>
@@@ -6376,7 -6378,7 +6386,7 @@@ M:      Alexander Indenbaum <baum@tehutinetw
  M:    Andy Gospodarek <andy@greyhouse.net>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    drivers/net/tehuti*
 +F:    drivers/net/ethernet/tehuti/*
  
  Telecom Clock Driver for MCPL0010
  M:    Mark Gross <mark.gross@intel.com>
@@@ -6427,7 -6429,7 +6437,7 @@@ W:      http://www.tilera.com/scm
  S:    Supported
  F:    arch/tile/
  F:    drivers/tty/hvc/hvc_tile.c
 -F:    drivers/net/tile/
 +F:    drivers/net/ethernet/tile/
  F:    drivers/edac/tile_edac.c
  
  TLAN NETWORK DRIVER
@@@ -6436,7 -6438,7 +6446,7 @@@ L:      tlan-devel@lists.sourceforge.net (su
  W:    http://sourceforge.net/projects/tlan/
  S:    Maintained
  F:    Documentation/networking/tlan.txt
 -F:    drivers/net/tlan.*
 +F:    drivers/net/ethernet/ti/tlan.*
  
  TOMOYO SECURITY MODULE
  M:    Kentaro Takeda <takedakn@nttdata.co.jp>
@@@ -6530,7 -6532,7 +6540,7 @@@ TULIP NETWORK DRIVER
  M:    Grant Grundler <grundler@parisc-linux.org>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/tulip/
 +F:    drivers/net/ethernet/tulip/
  
  TUN/TAP driver
  M:    Maxim Krasnyansky <maxk@qualcomm.com>
@@@ -6576,7 -6578,7 +6586,7 @@@ W:      http://uclinux-h8.sourceforge.jp
  S:    Supported
  F:    arch/h8300/
  F:    drivers/ide/ide-h8300.c
 -F:    drivers/net/ne-h8300.c
 +F:    drivers/net/ethernet/8390/ne-h8300.c
  
  UDF FILESYSTEM
  M:    Jan Kara <jack@suse.cz>
@@@ -7004,7 -7006,7 +7014,7 @@@ F:      include/linux/vhost.
  VIA RHINE NETWORK DRIVER
  M:    Roger Luethi <rl@hellgate.ch>
  S:    Maintained
 -F:    drivers/net/via-rhine.c
 +F:    drivers/net/ethernet/via/via-rhine.c
  
  VIAPRO SMBUS DRIVER
  M:    Jean Delvare <khali@linux-fr.org>
@@@ -7032,7 -7034,7 +7042,7 @@@ VIA VELOCITY NETWORK DRIVE
  M:    Francois Romieu <romieu@fr.zoreil.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/via-velocity.*
 +F:    drivers/net/ethernet/via/via-velocity.*
  
  VLAN (802.1Q)
  M:    Patrick McHardy <kaber@trash.net>
@@@ -7095,7 -7097,7 +7105,7 @@@ S:      Supporte
  F:    drivers/mmc/host/vub300.c
  
  W1 DALLAS'S 1-WIRE BUS
- M:    Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+ M:    Evgeniy Polyakov <zbr@ioremap.net>
  S:    Maintained
  F:    Documentation/w1/
  F:    drivers/w1/
@@@ -7207,6 -7209,9 +7217,9 @@@ W:      http://opensource.wolfsonmicro.com/c
  S:    Supported
  F:    Documentation/hwmon/wm83??
  F:    drivers/leds/leds-wm83*.c
+ F:    drivers/input/misc/wm831x-on.c
+ F:    drivers/input/touchscreen/wm831x-ts.c
+ F:    drivers/input/touchscreen/wm97*.c
  F:    drivers/mfd/wm8*.c
  F:    drivers/power/wm83*.c
  F:    drivers/rtc/rtc-wm83*.c
@@@ -7216,6 -7221,7 +7229,7 @@@ F:      drivers/watchdog/wm83*_wdt.
  F:    include/linux/mfd/wm831x/
  F:    include/linux/mfd/wm8350/
  F:    include/linux/mfd/wm8400*
+ F:    include/linux/wm97xx.h
  F:    include/sound/wm????.h
  F:    sound/soc/codecs/wm*
  
diff --combined drivers/bcma/main.c
@@@ -15,6 -15,7 +15,7 @@@ MODULE_LICENSE("GPL")
  static int bcma_bus_match(struct device *dev, struct device_driver *drv);
  static int bcma_device_probe(struct device *dev);
  static int bcma_device_remove(struct device *dev);
+ static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
  
  static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
  {
@@@ -49,6 -50,7 +50,7 @@@ static struct bus_type bcma_bus_type = 
        .match          = bcma_bus_match,
        .probe          = bcma_device_probe,
        .remove         = bcma_device_remove,
+       .uevent         = bcma_device_uevent,
        .dev_attrs      = bcma_device_attrs,
  };
  
@@@ -66,10 -68,6 +68,10 @@@ static struct bcma_device *bcma_find_co
  static void bcma_release_core_dev(struct device *dev)
  {
        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
 +      if (core->io_addr)
 +              iounmap(core->io_addr);
 +      if (core->io_wrap)
 +              iounmap(core->io_wrap);
        kfree(core);
  }
  
@@@ -84,7 -82,6 +86,7 @@@ static int bcma_register_cores(struct b
                case BCMA_CORE_CHIPCOMMON:
                case BCMA_CORE_PCI:
                case BCMA_CORE_PCIE:
 +              case BCMA_CORE_MIPS_74K:
                        continue;
                }
  
                        core->dma_dev = &bus->host_pci->dev;
                        core->irq = bus->host_pci->irq;
                        break;
 -              case BCMA_HOSTTYPE_NONE:
 +              case BCMA_HOSTTYPE_SOC:
 +                      core->dev.dma_mask = &core->dev.coherent_dma_mask;
 +                      core->dma_dev = &core->dev;
 +                      break;
                case BCMA_HOSTTYPE_SDIO:
                        break;
                }
@@@ -148,13 -142,6 +150,13 @@@ int bcma_bus_register(struct bcma_bus *
                bcma_core_chipcommon_init(&bus->drv_cc);
        }
  
 +      /* Init MIPS core */
 +      core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
 +      if (core) {
 +              bus->drv_mips.core = core;
 +              bcma_core_mips_init(&bus->drv_mips);
 +      }
 +
        /* Init PCIE core */
        core = bcma_find_core(bus, BCMA_CORE_PCIE);
        if (core) {
@@@ -184,59 -171,6 +186,59 @@@ void bcma_bus_unregister(struct bcma_bu
        bcma_unregister_cores(bus);
  }
  
 +int __init bcma_bus_early_register(struct bcma_bus *bus,
 +                                 struct bcma_device *core_cc,
 +                                 struct bcma_device *core_mips)
 +{
 +      int err;
 +      struct bcma_device *core;
 +      struct bcma_device_id match;
 +
 +      bcma_init_bus(bus);
 +
 +      match.manuf = BCMA_MANUF_BCM;
 +      match.id = BCMA_CORE_CHIPCOMMON;
 +      match.class = BCMA_CL_SIM;
 +      match.rev = BCMA_ANY_REV;
 +
 +      /* Scan for chip common core */
 +      err = bcma_bus_scan_early(bus, &match, core_cc);
 +      if (err) {
 +              pr_err("Failed to scan for common core: %d\n", err);
 +              return -1;
 +      }
 +
 +      match.manuf = BCMA_MANUF_MIPS;
 +      match.id = BCMA_CORE_MIPS_74K;
 +      match.class = BCMA_CL_SIM;
 +      match.rev = BCMA_ANY_REV;
 +
 +      /* Scan for mips core */
 +      err = bcma_bus_scan_early(bus, &match, core_mips);
 +      if (err) {
 +              pr_err("Failed to scan for mips core: %d\n", err);
 +              return -1;
 +      }
 +
 +      /* Init CC core */
 +      core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
 +      if (core) {
 +              bus->drv_cc.core = core;
 +              bcma_core_chipcommon_init(&bus->drv_cc);
 +      }
 +
 +      /* Init MIPS core */
 +      core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
 +      if (core) {
 +              bus->drv_mips.core = core;
 +              bcma_core_mips_init(&bus->drv_mips);
 +      }
 +
 +      pr_info("Early bus registered\n");
 +
 +      return 0;
 +}
 +
  int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
  {
        drv->drv.name = drv->name;
@@@ -295,6 -229,16 +297,16 @@@ static int bcma_device_remove(struct de
        return 0;
  }
  
+ static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+ {
+       struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+       return add_uevent_var(env,
+                             "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
+                             core->id.manuf, core->id.id,
+                             core->id.rev, core->id.class);
+ }
  static int __init bcma_modinit(void)
  {
        int err;
@@@ -717,11 -717,13 +717,13 @@@ static int ipoib_start_xmit(struct sk_b
  {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh;
-       struct neighbour *n;
+       struct neighbour *n = NULL;
        unsigned long flags;
  
-       n = dst_get_neighbour(skb_dst(skb));
-       if (likely(skb_dst(skb) && n)) {
+       if (likely(skb_dst(skb)))
+               n = dst_get_neighbour(skb_dst(skb));
+       if (likely(n)) {
                if (unlikely(!*to_ipoib_neigh(n))) {
                        ipoib_path_lookup(skb, dev);
                        return NETDEV_TX_OK;
@@@ -996,7 -998,7 +998,7 @@@ static const struct net_device_ops ipoi
        .ndo_fix_features        = ipoib_fix_features,
        .ndo_start_xmit          = ipoib_start_xmit,
        .ndo_tx_timeout          = ipoib_timeout,
 -      .ndo_set_multicast_list  = ipoib_set_mcast_list,
 +      .ndo_set_rx_mode         = ipoib_set_mcast_list,
        .ndo_neigh_setup         = ipoib_neigh_setup_dev,
  };
  
index bc3bd34,0000000..6715bf5
mode 100644,000000..100644
--- /dev/null
@@@ -1,1633 -1,0 +1,1641 @@@
 +/*
 + * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
 + *
 + * 2005-2010 (c) Aeroflex Gaisler AB
 + *
 + * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
 + * available in the GRLIB VHDL IP core library.
 + *
 + * Full documentation of both cores can be found here:
 + * http://www.gaisler.com/products/grlib/grip.pdf
 + *
 + * The Gigabit version supports scatter/gather DMA, any alignment of
 + * buffers and checksum offloading.
 + *
 + * This program is free software; you can redistribute it and/or modify it
 + * under the terms of the GNU General Public License as published by the
 + * Free Software Foundation; either version 2 of the License, or (at your
 + * option) any later version.
 + *
 + * Contributors: Kristoffer Glembo
 + *               Daniel Hellstrom
 + *               Marko Isomaki
 + */
 +
 +#include <linux/dma-mapping.h>
 +#include <linux/module.h>
 +#include <linux/uaccess.h>
 +#include <linux/init.h>
 +#include <linux/interrupt.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/ethtool.h>
 +#include <linux/skbuff.h>
 +#include <linux/io.h>
 +#include <linux/crc32.h>
 +#include <linux/mii.h>
 +#include <linux/of_device.h>
 +#include <linux/of_platform.h>
 +#include <linux/slab.h>
 +#include <asm/cacheflush.h>
 +#include <asm/byteorder.h>
 +
 +#ifdef CONFIG_SPARC
 +#include <asm/idprom.h>
 +#endif
 +
 +#include "greth.h"
 +
 +#define GRETH_DEF_MSG_ENABLE    \
 +      (NETIF_MSG_DRV          | \
 +       NETIF_MSG_PROBE        | \
 +       NETIF_MSG_LINK         | \
 +       NETIF_MSG_IFDOWN       | \
 +       NETIF_MSG_IFUP         | \
 +       NETIF_MSG_RX_ERR       | \
 +       NETIF_MSG_TX_ERR)
 +
 +static int greth_debug = -1;  /* -1 == use GRETH_DEF_MSG_ENABLE as value */
 +module_param(greth_debug, int, 0);
 +MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
 +
 +/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
 +static int macaddr[6];
 +module_param_array(macaddr, int, NULL, 0);
 +MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
 +
 +static int greth_edcl = 1;
 +module_param(greth_edcl, int, 0);
 +MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
 +
 +static int greth_open(struct net_device *dev);
 +static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
 +         struct net_device *dev);
 +static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
 +         struct net_device *dev);
 +static int greth_rx(struct net_device *dev, int limit);
 +static int greth_rx_gbit(struct net_device *dev, int limit);
 +static void greth_clean_tx(struct net_device *dev);
 +static void greth_clean_tx_gbit(struct net_device *dev);
 +static irqreturn_t greth_interrupt(int irq, void *dev_id);
 +static int greth_close(struct net_device *dev);
 +static int greth_set_mac_add(struct net_device *dev, void *p);
 +static void greth_set_multicast_list(struct net_device *dev);
 +
 +#define GRETH_REGLOAD(a)          (be32_to_cpu(__raw_readl(&(a))))
 +#define GRETH_REGSAVE(a, v)         (__raw_writel(cpu_to_be32(v), &(a)))
 +#define GRETH_REGORIN(a, v)         (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
 +#define GRETH_REGANDIN(a, v)        (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
 +
 +#define NEXT_TX(N)      (((N) + 1) & GRETH_TXBD_NUM_MASK)
 +#define SKIP_TX(N, C)   (((N) + C) & GRETH_TXBD_NUM_MASK)
 +#define NEXT_RX(N)      (((N) + 1) & GRETH_RXBD_NUM_MASK)
 +
 +static void greth_print_rx_packet(void *addr, int len)
 +{
 +      print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
 +                      addr, len, true);
 +}
 +
 +static void greth_print_tx_packet(struct sk_buff *skb)
 +{
 +      int i;
 +      int length;
 +
 +      if (skb_shinfo(skb)->nr_frags == 0)
 +              length = skb->len;
 +      else
 +              length = skb_headlen(skb);
 +
 +      print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 +                      skb->data, length, true);
 +
 +      for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 +
 +              print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
 +                             skb_frag_address(&skb_shinfo(skb)->frags[i]),
 +                             skb_shinfo(skb)->frags[i].size, true);
 +      }
 +}
 +
 +static inline void greth_enable_tx(struct greth_private *greth)
 +{
 +      wmb();
 +      GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
 +}
 +
 +static inline void greth_disable_tx(struct greth_private *greth)
 +{
 +      GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
 +}
 +
 +static inline void greth_enable_rx(struct greth_private *greth)
 +{
 +      wmb();
 +      GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
 +}
 +
 +static inline void greth_disable_rx(struct greth_private *greth)
 +{
 +      GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
 +}
 +
 +static inline void greth_enable_irqs(struct greth_private *greth)
 +{
 +      GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
 +}
 +
 +static inline void greth_disable_irqs(struct greth_private *greth)
 +{
 +      GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
 +}
 +
 +static inline void greth_write_bd(u32 *bd, u32 val)
 +{
 +      __raw_writel(cpu_to_be32(val), bd);
 +}
 +
 +static inline u32 greth_read_bd(u32 *bd)
 +{
 +      return be32_to_cpu(__raw_readl(bd));
 +}
 +
 +static void greth_clean_rings(struct greth_private *greth)
 +{
 +      int i;
 +      struct greth_bd *rx_bdp = greth->rx_bd_base;
 +      struct greth_bd *tx_bdp = greth->tx_bd_base;
 +
 +      if (greth->gbit_mac) {
 +
 +              /* Free and unmap RX buffers */
 +              for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 +                      if (greth->rx_skbuff[i] != NULL) {
 +                              dev_kfree_skb(greth->rx_skbuff[i]);
 +                              dma_unmap_single(greth->dev,
 +                                               greth_read_bd(&rx_bdp->addr),
 +                                               MAX_FRAME_SIZE+NET_IP_ALIGN,
 +                                               DMA_FROM_DEVICE);
 +                      }
 +              }
 +
 +              /* TX buffers */
 +              while (greth->tx_free < GRETH_TXBD_NUM) {
 +
 +                      struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
 +                      int nr_frags = skb_shinfo(skb)->nr_frags;
 +                      tx_bdp = greth->tx_bd_base + greth->tx_last;
 +                      greth->tx_last = NEXT_TX(greth->tx_last);
 +
 +                      dma_unmap_single(greth->dev,
 +                                       greth_read_bd(&tx_bdp->addr),
 +                                       skb_headlen(skb),
 +                                       DMA_TO_DEVICE);
 +
 +                      for (i = 0; i < nr_frags; i++) {
 +                              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +                              tx_bdp = greth->tx_bd_base + greth->tx_last;
 +
 +                              dma_unmap_page(greth->dev,
 +                                             greth_read_bd(&tx_bdp->addr),
 +                                             frag->size,
 +                                             DMA_TO_DEVICE);
 +
 +                              greth->tx_last = NEXT_TX(greth->tx_last);
 +                      }
 +                      greth->tx_free += nr_frags+1;
 +                      dev_kfree_skb(skb);
 +              }
 +
 +
 +      } else { /* 10/100 Mbps MAC */
 +
 +              for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
 +                      kfree(greth->rx_bufs[i]);
 +                      dma_unmap_single(greth->dev,
 +                                       greth_read_bd(&rx_bdp->addr),
 +                                       MAX_FRAME_SIZE,
 +                                       DMA_FROM_DEVICE);
 +              }
 +              for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
 +                      kfree(greth->tx_bufs[i]);
 +                      dma_unmap_single(greth->dev,
 +                                       greth_read_bd(&tx_bdp->addr),
 +                                       MAX_FRAME_SIZE,
 +                                       DMA_TO_DEVICE);
 +              }
 +      }
 +}
 +
 +static int greth_init_rings(struct greth_private *greth)
 +{
 +      struct sk_buff *skb;
 +      struct greth_bd *rx_bd, *tx_bd;
 +      u32 dma_addr;
 +      int i;
 +
 +      rx_bd = greth->rx_bd_base;
 +      tx_bd = greth->tx_bd_base;
 +
 +      /* Initialize descriptor rings and buffers */
 +      if (greth->gbit_mac) {
 +
 +              for (i = 0; i < GRETH_RXBD_NUM; i++) {
 +                      skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
 +                      if (skb == NULL) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Error allocating DMA ring.\n");
 +                              goto cleanup;
 +                      }
 +                      skb_reserve(skb, NET_IP_ALIGN);
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                skb->data,
 +                                                MAX_FRAME_SIZE+NET_IP_ALIGN,
 +                                                DMA_FROM_DEVICE);
 +
 +                      if (dma_mapping_error(greth->dev, dma_addr)) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Could not create initial DMA mapping\n");
 +                              goto cleanup;
 +                      }
 +                      greth->rx_skbuff[i] = skb;
 +                      greth_write_bd(&rx_bd[i].addr, dma_addr);
 +                      greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 +              }
 +
 +      } else {
 +
 +              /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
 +              for (i = 0; i < GRETH_RXBD_NUM; i++) {
 +
 +                      greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 +
 +                      if (greth->rx_bufs[i] == NULL) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Error allocating DMA ring.\n");
 +                              goto cleanup;
 +                      }
 +
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                greth->rx_bufs[i],
 +                                                MAX_FRAME_SIZE,
 +                                                DMA_FROM_DEVICE);
 +
 +                      if (dma_mapping_error(greth->dev, dma_addr)) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Could not create initial DMA mapping\n");
 +                              goto cleanup;
 +                      }
 +                      greth_write_bd(&rx_bd[i].addr, dma_addr);
 +                      greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
 +              }
 +              for (i = 0; i < GRETH_TXBD_NUM; i++) {
 +
 +                      greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
 +
 +                      if (greth->tx_bufs[i] == NULL) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Error allocating DMA ring.\n");
 +                              goto cleanup;
 +                      }
 +
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                greth->tx_bufs[i],
 +                                                MAX_FRAME_SIZE,
 +                                                DMA_TO_DEVICE);
 +
 +                      if (dma_mapping_error(greth->dev, dma_addr)) {
 +                              if (netif_msg_ifup(greth))
 +                                      dev_err(greth->dev, "Could not create initial DMA mapping\n");
 +                              goto cleanup;
 +                      }
 +                      greth_write_bd(&tx_bd[i].addr, dma_addr);
 +                      greth_write_bd(&tx_bd[i].stat, 0);
 +              }
 +      }
 +      greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
 +                     greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
 +
 +      /* Initialize pointers. */
 +      greth->rx_cur = 0;
 +      greth->tx_next = 0;
 +      greth->tx_last = 0;
 +      greth->tx_free = GRETH_TXBD_NUM;
 +
 +      /* Initialize descriptor base address */
 +      GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
 +      GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
 +
 +      return 0;
 +
 +cleanup:
 +      greth_clean_rings(greth);
 +      return -ENOMEM;
 +}
 +
 +static int greth_open(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      int err;
 +
 +      err = greth_init_rings(greth);
 +      if (err) {
 +              if (netif_msg_ifup(greth))
 +                      dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
 +              return err;
 +      }
 +
 +      err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
 +      if (err) {
 +              if (netif_msg_ifup(greth))
 +                      dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
 +              greth_clean_rings(greth);
 +              return err;
 +      }
 +
 +      if (netif_msg_ifup(greth))
 +              dev_dbg(&dev->dev, " starting queue\n");
 +      netif_start_queue(dev);
 +
 +      GRETH_REGSAVE(greth->regs->status, 0xFF);
 +
 +      napi_enable(&greth->napi);
 +
 +      greth_enable_irqs(greth);
 +      greth_enable_tx(greth);
 +      greth_enable_rx(greth);
 +      return 0;
 +
 +}
 +
 +static int greth_close(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +
 +      napi_disable(&greth->napi);
 +
 +      greth_disable_irqs(greth);
 +      greth_disable_tx(greth);
 +      greth_disable_rx(greth);
 +
 +      netif_stop_queue(dev);
 +
 +      free_irq(greth->irq, (void *) dev);
 +
 +      greth_clean_rings(greth);
 +
 +      return 0;
 +}
 +
 +static netdev_tx_t
 +greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_bd *bdp;
 +      int err = NETDEV_TX_OK;
 +      u32 status, dma_addr, ctrl;
 +      unsigned long flags;
 +
 +      /* Clean TX Ring */
 +      greth_clean_tx(greth->netdev);
 +
 +      if (unlikely(greth->tx_free <= 0)) {
 +              spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
 +              ctrl = GRETH_REGLOAD(greth->regs->control);
 +              /* Enable TX IRQ only if not already in poll() routine */
 +              if (ctrl & GRETH_RXI)
 +                      GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 +              netif_stop_queue(dev);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +              return NETDEV_TX_BUSY;
 +      }
 +
 +      if (netif_msg_pktdata(greth))
 +              greth_print_tx_packet(skb);
 +
 +
 +      if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 +              dev->stats.tx_errors++;
 +              goto out;
 +      }
 +
 +      bdp = greth->tx_bd_base + greth->tx_next;
 +      dma_addr = greth_read_bd(&bdp->addr);
 +
 +      memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
 +
 +      dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
 +
 +      status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
++      greth->tx_bufs_length[greth->tx_next] = skb->len & GRETH_BD_LEN;
 +
 +      /* Wrap around descriptor ring */
 +      if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
 +              status |= GRETH_BD_WR;
 +      }
 +
 +      greth->tx_next = NEXT_TX(greth->tx_next);
 +      greth->tx_free--;
 +
 +      /* Write descriptor control word and enable transmission */
 +      greth_write_bd(&bdp->stat, status);
 +      spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 +      greth_enable_tx(greth);
 +      spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +out:
 +      dev_kfree_skb(skb);
 +      return err;
 +}
 +
 +
 +static netdev_tx_t
 +greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_bd *bdp;
 +      u32 status = 0, dma_addr, ctrl;
 +      int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
 +      unsigned long flags;
 +
 +      nr_frags = skb_shinfo(skb)->nr_frags;
 +
 +      /* Clean TX Ring */
 +      greth_clean_tx_gbit(dev);
 +
 +      if (greth->tx_free < nr_frags + 1) {
 +              spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
 +              ctrl = GRETH_REGLOAD(greth->regs->control);
 +              /* Enable TX IRQ only if not already in poll() routine */
 +              if (ctrl & GRETH_RXI)
 +                      GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
 +              netif_stop_queue(dev);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +              err = NETDEV_TX_BUSY;
 +              goto out;
 +      }
 +
 +      if (netif_msg_pktdata(greth))
 +              greth_print_tx_packet(skb);
 +
 +      if (unlikely(skb->len > MAX_FRAME_SIZE)) {
 +              dev->stats.tx_errors++;
 +              goto out;
 +      }
 +
 +      /* Save skb pointer. */
 +      greth->tx_skbuff[greth->tx_next] = skb;
 +
 +      /* Linear buf */
 +      if (nr_frags != 0)
 +              status = GRETH_TXBD_MORE;
 +
-       status |= GRETH_TXBD_CSALL;
++      if (skb->ip_summed == CHECKSUM_PARTIAL)
++              status |= GRETH_TXBD_CSALL;
 +      status |= skb_headlen(skb) & GRETH_BD_LEN;
 +      if (greth->tx_next == GRETH_TXBD_NUM_MASK)
 +              status |= GRETH_BD_WR;
 +
 +
 +      bdp = greth->tx_bd_base + greth->tx_next;
 +      greth_write_bd(&bdp->stat, status);
 +      dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 +
 +      if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 +              goto map_error;
 +
 +      greth_write_bd(&bdp->addr, dma_addr);
 +
 +      curr_tx = NEXT_TX(greth->tx_next);
 +
 +      /* Frags */
 +      for (i = 0; i < nr_frags; i++) {
 +              skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +              greth->tx_skbuff[curr_tx] = NULL;
 +              bdp = greth->tx_bd_base + curr_tx;
 +
-               status = GRETH_TXBD_CSALL | GRETH_BD_EN;
++              status = GRETH_BD_EN;
++              if (skb->ip_summed == CHECKSUM_PARTIAL)
++                      status |= GRETH_TXBD_CSALL;
 +              status |= frag->size & GRETH_BD_LEN;
 +
 +              /* Wrap around descriptor ring */
 +              if (curr_tx == GRETH_TXBD_NUM_MASK)
 +                      status |= GRETH_BD_WR;
 +
 +              /* More fragments left */
 +              if (i < nr_frags - 1)
 +                      status |= GRETH_TXBD_MORE;
 +              else
 +                      status |= GRETH_BD_IE; /* enable IRQ on last fragment */
 +
 +              greth_write_bd(&bdp->stat, status);
 +
 +              dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size,
 +                                          DMA_TO_DEVICE);
 +
 +              if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
 +                      goto frag_map_error;
 +
 +              greth_write_bd(&bdp->addr, dma_addr);
 +
 +              curr_tx = NEXT_TX(curr_tx);
 +      }
 +
 +      wmb();
 +
 +      /* Enable the descriptor chain by enabling the first descriptor */
 +      bdp = greth->tx_bd_base + greth->tx_next;
 +      greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
 +      greth->tx_next = curr_tx;
 +      greth->tx_free -= nr_frags + 1;
 +
 +      wmb();
 +
 +      spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
 +      greth_enable_tx(greth);
 +      spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +      return NETDEV_TX_OK;
 +
 +frag_map_error:
 +      /* Unmap SKB mappings that succeeded and disable descriptor */
 +      for (i = 0; greth->tx_next + i != curr_tx; i++) {
 +              bdp = greth->tx_bd_base + greth->tx_next + i;
 +              dma_unmap_single(greth->dev,
 +                               greth_read_bd(&bdp->addr),
 +                               greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
 +                               DMA_TO_DEVICE);
 +              greth_write_bd(&bdp->stat, 0);
 +      }
 +map_error:
 +      if (net_ratelimit())
 +              dev_warn(greth->dev, "Could not create TX DMA mapping\n");
 +      dev_kfree_skb(skb);
 +out:
 +      return err;
 +}
 +
 +static irqreturn_t greth_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = dev_id;
 +      struct greth_private *greth;
 +      u32 status, ctrl;
 +      irqreturn_t retval = IRQ_NONE;
 +
 +      greth = netdev_priv(dev);
 +
 +      spin_lock(&greth->devlock);
 +
 +      /* Get the interrupt events that caused us to be here. */
 +      status = GRETH_REGLOAD(greth->regs->status);
 +
 +      /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
 +       * set regardless of whether IRQ is enabled or not. Especially
 +       * important when shared IRQ.
 +       */
 +      ctrl = GRETH_REGLOAD(greth->regs->control);
 +
 +      /* Handle rx and tx interrupts through poll */
 +      if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
 +          ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
 +              retval = IRQ_HANDLED;
 +
 +              /* Disable interrupts and schedule poll() */
 +              greth_disable_irqs(greth);
 +              napi_schedule(&greth->napi);
 +      }
 +
 +      mmiowb();
 +      spin_unlock(&greth->devlock);
 +
 +      return retval;
 +}
 +
 +static void greth_clean_tx(struct net_device *dev)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp;
 +      u32 stat;
 +
 +      greth = netdev_priv(dev);
 +
 +      while (1) {
 +              bdp = greth->tx_bd_base + greth->tx_last;
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 +              mb();
 +              stat = greth_read_bd(&bdp->stat);
 +
 +              if (unlikely(stat & GRETH_BD_EN))
 +                      break;
 +
 +              if (greth->tx_free == GRETH_TXBD_NUM)
 +                      break;
 +
 +              /* Check status for errors */
 +              if (unlikely(stat & GRETH_TXBD_STATUS)) {
 +                      dev->stats.tx_errors++;
 +                      if (stat & GRETH_TXBD_ERR_AL)
 +                              dev->stats.tx_aborted_errors++;
 +                      if (stat & GRETH_TXBD_ERR_UE)
 +                              dev->stats.tx_fifo_errors++;
 +              }
 +              dev->stats.tx_packets++;
++              dev->stats.tx_bytes += greth->tx_bufs_length[greth->tx_last];
 +              greth->tx_last = NEXT_TX(greth->tx_last);
 +              greth->tx_free++;
 +      }
 +
 +      if (greth->tx_free > 0) {
 +              netif_wake_queue(dev);
 +      }
 +
 +}
 +
 +static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
 +{
 +      /* Check status for errors */
 +      if (unlikely(stat & GRETH_TXBD_STATUS)) {
 +              dev->stats.tx_errors++;
 +              if (stat & GRETH_TXBD_ERR_AL)
 +                      dev->stats.tx_aborted_errors++;
 +              if (stat & GRETH_TXBD_ERR_UE)
 +                      dev->stats.tx_fifo_errors++;
 +              if (stat & GRETH_TXBD_ERR_LC)
 +                      dev->stats.tx_aborted_errors++;
 +      }
 +      dev->stats.tx_packets++;
 +}
 +
 +static void greth_clean_tx_gbit(struct net_device *dev)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp, *bdp_last_frag;
 +      struct sk_buff *skb;
 +      u32 stat;
 +      int nr_frags, i;
 +
 +      greth = netdev_priv(dev);
 +
 +      while (greth->tx_free < GRETH_TXBD_NUM) {
 +
 +              skb = greth->tx_skbuff[greth->tx_last];
 +
 +              nr_frags = skb_shinfo(skb)->nr_frags;
 +
 +              /* We only clean fully completed SKBs */
 +              bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
 +
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
 +              mb();
 +              stat = greth_read_bd(&bdp_last_frag->stat);
 +
 +              if (stat & GRETH_BD_EN)
 +                      break;
 +
 +              greth->tx_skbuff[greth->tx_last] = NULL;
 +
 +              greth_update_tx_stats(dev, stat);
++              dev->stats.tx_bytes += skb->len;
 +
 +              bdp = greth->tx_bd_base + greth->tx_last;
 +
 +              greth->tx_last = NEXT_TX(greth->tx_last);
 +
 +              dma_unmap_single(greth->dev,
 +                               greth_read_bd(&bdp->addr),
 +                               skb_headlen(skb),
 +                               DMA_TO_DEVICE);
 +
 +              for (i = 0; i < nr_frags; i++) {
 +                      skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 +                      bdp = greth->tx_bd_base + greth->tx_last;
 +
 +                      dma_unmap_page(greth->dev,
 +                                     greth_read_bd(&bdp->addr),
 +                                     frag->size,
 +                                     DMA_TO_DEVICE);
 +
 +                      greth->tx_last = NEXT_TX(greth->tx_last);
 +              }
 +              greth->tx_free += nr_frags+1;
 +              dev_kfree_skb(skb);
 +      }
 +
 +      if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
 +              netif_wake_queue(dev);
 +}
 +
 +static int greth_rx(struct net_device *dev, int limit)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp;
 +      struct sk_buff *skb;
 +      int pkt_len;
 +      int bad, count;
 +      u32 status, dma_addr;
 +      unsigned long flags;
 +
 +      greth = netdev_priv(dev);
 +
 +      for (count = 0; count < limit; ++count) {
 +
 +              bdp = greth->rx_bd_base + greth->rx_cur;
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 +              mb();
 +              status = greth_read_bd(&bdp->stat);
 +
 +              if (unlikely(status & GRETH_BD_EN)) {
 +                      break;
 +              }
 +
 +              dma_addr = greth_read_bd(&bdp->addr);
 +              bad = 0;
 +
 +              /* Check status for errors. */
 +              if (unlikely(status & GRETH_RXBD_STATUS)) {
 +                      if (status & GRETH_RXBD_ERR_FT) {
 +                              dev->stats.rx_length_errors++;
 +                              bad = 1;
 +                      }
 +                      if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
 +                              dev->stats.rx_frame_errors++;
 +                              bad = 1;
 +                      }
 +                      if (status & GRETH_RXBD_ERR_CRC) {
 +                              dev->stats.rx_crc_errors++;
 +                              bad = 1;
 +                      }
 +              }
 +              if (unlikely(bad)) {
 +                      dev->stats.rx_errors++;
 +
 +              } else {
 +
 +                      pkt_len = status & GRETH_BD_LEN;
 +
 +                      skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
 +
 +                      if (unlikely(skb == NULL)) {
 +
 +                              if (net_ratelimit())
 +                                      dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
 +
 +                              dev->stats.rx_dropped++;
 +
 +                      } else {
 +                              skb_reserve(skb, NET_IP_ALIGN);
 +                              skb->dev = dev;
 +
 +                              dma_sync_single_for_cpu(greth->dev,
 +                                                      dma_addr,
 +                                                      pkt_len,
 +                                                      DMA_FROM_DEVICE);
 +
 +                              if (netif_msg_pktdata(greth))
 +                                      greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
 +
 +                              memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
 +
 +                              skb->protocol = eth_type_trans(skb, dev);
++                              dev->stats.rx_bytes += pkt_len;
 +                              dev->stats.rx_packets++;
 +                              netif_receive_skb(skb);
 +                      }
 +              }
 +
 +              status = GRETH_BD_EN | GRETH_BD_IE;
 +              if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 +                      status |= GRETH_BD_WR;
 +              }
 +
 +              wmb();
 +              greth_write_bd(&bdp->stat, status);
 +
 +              dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
 +
 +              spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
 +              greth_enable_rx(greth);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +              greth->rx_cur = NEXT_RX(greth->rx_cur);
 +      }
 +
 +      return count;
 +}
 +
 +static inline int hw_checksummed(u32 status)
 +{
 +
 +      if (status & GRETH_RXBD_IP_FRAG)
 +              return 0;
 +
 +      if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
 +              return 0;
 +
 +      if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
 +              return 0;
 +
 +      if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
 +              return 0;
 +
 +      return 1;
 +}
 +
 +static int greth_rx_gbit(struct net_device *dev, int limit)
 +{
 +      struct greth_private *greth;
 +      struct greth_bd *bdp;
 +      struct sk_buff *skb, *newskb;
 +      int pkt_len;
 +      int bad, count = 0;
 +      u32 status, dma_addr;
 +      unsigned long flags;
 +
 +      greth = netdev_priv(dev);
 +
 +      for (count = 0; count < limit; ++count) {
 +
 +              bdp = greth->rx_bd_base + greth->rx_cur;
 +              skb = greth->rx_skbuff[greth->rx_cur];
 +              GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
 +              mb();
 +              status = greth_read_bd(&bdp->stat);
 +              bad = 0;
 +
 +              if (status & GRETH_BD_EN)
 +                      break;
 +
 +              /* Check status for errors. */
 +              if (unlikely(status & GRETH_RXBD_STATUS)) {
 +
 +                      if (status & GRETH_RXBD_ERR_FT) {
 +                              dev->stats.rx_length_errors++;
 +                              bad = 1;
 +                      } else if (status &
 +                                 (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
 +                              dev->stats.rx_frame_errors++;
 +                              bad = 1;
 +                      } else if (status & GRETH_RXBD_ERR_CRC) {
 +                              dev->stats.rx_crc_errors++;
 +                              bad = 1;
 +                      }
 +              }
 +
 +              /* Allocate new skb to replace current, not needed if the
 +               * current skb can be reused */
 +              if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
 +                      skb_reserve(newskb, NET_IP_ALIGN);
 +
 +                      dma_addr = dma_map_single(greth->dev,
 +                                                    newskb->data,
 +                                                    MAX_FRAME_SIZE + NET_IP_ALIGN,
 +                                                    DMA_FROM_DEVICE);
 +
 +                      if (!dma_mapping_error(greth->dev, dma_addr)) {
 +                              /* Process the incoming frame. */
 +                              pkt_len = status & GRETH_BD_LEN;
 +
 +                              dma_unmap_single(greth->dev,
 +                                               greth_read_bd(&bdp->addr),
 +                                               MAX_FRAME_SIZE + NET_IP_ALIGN,
 +                                               DMA_FROM_DEVICE);
 +
 +                              if (netif_msg_pktdata(greth))
 +                                      greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
 +
 +                              skb_put(skb, pkt_len);
 +
 +                              if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
 +                                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              else
 +                                      skb_checksum_none_assert(skb);
 +
 +                              skb->protocol = eth_type_trans(skb, dev);
 +                              dev->stats.rx_packets++;
++                              dev->stats.rx_bytes += pkt_len;
 +                              netif_receive_skb(skb);
 +
 +                              greth->rx_skbuff[greth->rx_cur] = newskb;
 +                              greth_write_bd(&bdp->addr, dma_addr);
 +                      } else {
 +                              if (net_ratelimit())
 +                                      dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
 +                              dev_kfree_skb(newskb);
 +                              /* reusing current skb, so it is a drop */
 +                              dev->stats.rx_dropped++;
 +                      }
 +              } else if (bad) {
 +                      /* Bad Frame transfer, the skb is reused */
 +                      dev->stats.rx_dropped++;
 +              } else {
 +                      /* Failed Allocating a new skb. This is rather stupid
 +                       * but the current "filled" skb is reused, as if
 +                       * transfer failure. One could argue that RX descriptor
 +                       * table handling should be divided into cleaning and
 +                       * filling as the TX part of the driver
 +                       */
 +                      if (net_ratelimit())
 +                              dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
 +                      /* reusing current skb, so it is a drop */
 +                      dev->stats.rx_dropped++;
 +              }
 +
 +              status = GRETH_BD_EN | GRETH_BD_IE;
 +              if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
 +                      status |= GRETH_BD_WR;
 +              }
 +
 +              wmb();
 +              greth_write_bd(&bdp->stat, status);
 +              spin_lock_irqsave(&greth->devlock, flags);
 +              greth_enable_rx(greth);
 +              spin_unlock_irqrestore(&greth->devlock, flags);
 +              greth->rx_cur = NEXT_RX(greth->rx_cur);
 +      }
 +
 +      return count;
 +
 +}
 +
 +static int greth_poll(struct napi_struct *napi, int budget)
 +{
 +      struct greth_private *greth;
 +      int work_done = 0;
 +      unsigned long flags;
 +      u32 mask, ctrl;
 +      greth = container_of(napi, struct greth_private, napi);
 +
 +restart_txrx_poll:
 +      if (netif_queue_stopped(greth->netdev)) {
 +              if (greth->gbit_mac)
 +                      greth_clean_tx_gbit(greth->netdev);
 +              else
 +                      greth_clean_tx(greth->netdev);
 +      }
 +
 +      if (greth->gbit_mac) {
 +              work_done += greth_rx_gbit(greth->netdev, budget - work_done);
 +      } else {
 +              work_done += greth_rx(greth->netdev, budget - work_done);
 +      }
 +
 +      if (work_done < budget) {
 +
 +              spin_lock_irqsave(&greth->devlock, flags);
 +
 +              ctrl = GRETH_REGLOAD(greth->regs->control);
 +              if (netif_queue_stopped(greth->netdev)) {
 +                      GRETH_REGSAVE(greth->regs->control,
 +                                      ctrl | GRETH_TXI | GRETH_RXI);
 +                      mask = GRETH_INT_RX | GRETH_INT_RE |
 +                             GRETH_INT_TX | GRETH_INT_TE;
 +              } else {
 +                      GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
 +                      mask = GRETH_INT_RX | GRETH_INT_RE;
 +              }
 +
 +              if (GRETH_REGLOAD(greth->regs->status) & mask) {
 +                      GRETH_REGSAVE(greth->regs->control, ctrl);
 +                      spin_unlock_irqrestore(&greth->devlock, flags);
 +                      goto restart_txrx_poll;
 +              } else {
 +                      __napi_complete(napi);
 +                      spin_unlock_irqrestore(&greth->devlock, flags);
 +              }
 +      }
 +
 +      return work_done;
 +}
 +
 +static int greth_set_mac_add(struct net_device *dev, void *p)
 +{
 +      struct sockaddr *addr = p;
 +      struct greth_private *greth;
 +      struct greth_regs *regs;
 +
 +      greth = netdev_priv(dev);
 +      regs = (struct greth_regs *) greth->regs;
 +
 +      if (!is_valid_ether_addr(addr->sa_data))
 +              return -EINVAL;
 +
 +      memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 +      GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 +      GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
 +                    dev->dev_addr[4] << 8 | dev->dev_addr[5]);
 +
 +      return 0;
 +}
 +
 +static u32 greth_hash_get_index(__u8 *addr)
 +{
 +      return (ether_crc(6, addr)) & 0x3F;
 +}
 +
 +static void greth_set_hash_filter(struct net_device *dev)
 +{
 +      struct netdev_hw_addr *ha;
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_regs *regs = (struct greth_regs *) greth->regs;
 +      u32 mc_filter[2];
 +      unsigned int bitnr;
 +
 +      mc_filter[0] = mc_filter[1] = 0;
 +
 +      netdev_for_each_mc_addr(ha, dev) {
 +              bitnr = greth_hash_get_index(ha->addr);
 +              mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
 +      }
 +
 +      GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
 +      GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
 +}
 +
 +static void greth_set_multicast_list(struct net_device *dev)
 +{
 +      int cfg;
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct greth_regs *regs = (struct greth_regs *) greth->regs;
 +
 +      cfg = GRETH_REGLOAD(regs->control);
 +      if (dev->flags & IFF_PROMISC)
 +              cfg |= GRETH_CTRL_PR;
 +      else
 +              cfg &= ~GRETH_CTRL_PR;
 +
 +      if (greth->multicast) {
 +              if (dev->flags & IFF_ALLMULTI) {
 +                      GRETH_REGSAVE(regs->hash_msb, -1);
 +                      GRETH_REGSAVE(regs->hash_lsb, -1);
 +                      cfg |= GRETH_CTRL_MCEN;
 +                      GRETH_REGSAVE(regs->control, cfg);
 +                      return;
 +              }
 +
 +              if (netdev_mc_empty(dev)) {
 +                      cfg &= ~GRETH_CTRL_MCEN;
 +                      GRETH_REGSAVE(regs->control, cfg);
 +                      return;
 +              }
 +
 +              /* Setup multicast filter */
 +              greth_set_hash_filter(dev);
 +              cfg |= GRETH_CTRL_MCEN;
 +      }
 +      GRETH_REGSAVE(regs->control, cfg);
 +}
 +
 +static u32 greth_get_msglevel(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      return greth->msg_enable;
 +}
 +
 +static void greth_set_msglevel(struct net_device *dev, u32 value)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      greth->msg_enable = value;
 +}
 +static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phy = greth->phy;
 +
 +      if (!phy)
 +              return -ENODEV;
 +
 +      return phy_ethtool_gset(phy, cmd);
 +}
 +
 +static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phy = greth->phy;
 +
 +      if (!phy)
 +              return -ENODEV;
 +
 +      return phy_ethtool_sset(phy, cmd);
 +}
 +
 +static int greth_get_regs_len(struct net_device *dev)
 +{
 +      return sizeof(struct greth_regs);
 +}
 +
 +static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +
 +      strncpy(info->driver, dev_driver_string(greth->dev), 32);
 +      strncpy(info->version, "revision: 1.0", 32);
 +      strncpy(info->bus_info, greth->dev->bus->name, 32);
 +      strncpy(info->fw_version, "N/A", 32);
 +      info->eedump_len = 0;
 +      info->regdump_len = sizeof(struct greth_regs);
 +}
 +
 +static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
 +{
 +      int i;
 +      struct greth_private *greth = netdev_priv(dev);
 +      u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
 +      u32 *buff = p;
 +
 +      for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
 +              buff[i] = greth_read_bd(&greth_regs[i]);
 +}
 +
 +static const struct ethtool_ops greth_ethtool_ops = {
 +      .get_msglevel           = greth_get_msglevel,
 +      .set_msglevel           = greth_set_msglevel,
 +      .get_settings           = greth_get_settings,
 +      .set_settings           = greth_set_settings,
 +      .get_drvinfo            = greth_get_drvinfo,
 +      .get_regs_len           = greth_get_regs_len,
 +      .get_regs               = greth_get_regs,
 +      .get_link               = ethtool_op_get_link,
 +};
 +
 +static struct net_device_ops greth_netdev_ops = {
 +      .ndo_open               = greth_open,
 +      .ndo_stop               = greth_close,
 +      .ndo_start_xmit         = greth_start_xmit,
 +      .ndo_set_mac_address    = greth_set_mac_add,
 +      .ndo_validate_addr      = eth_validate_addr,
 +};
 +
 +static inline int wait_for_mdio(struct greth_private *greth)
 +{
 +      unsigned long timeout = jiffies + 4*HZ/100;
 +      while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
 +              if (time_after(jiffies, timeout))
 +                      return 0;
 +      }
 +      return 1;
 +}
 +
 +static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
 +{
 +      struct greth_private *greth = bus->priv;
 +      int data;
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
 +              data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
 +              return data;
 +
 +      } else {
 +              return -1;
 +      }
 +}
 +
 +static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 +{
 +      struct greth_private *greth = bus->priv;
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      GRETH_REGSAVE(greth->regs->mdio,
 +                    ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
 +
 +      if (!wait_for_mdio(greth))
 +              return -EBUSY;
 +
 +      return 0;
 +}
 +
 +static int greth_mdio_reset(struct mii_bus *bus)
 +{
 +      return 0;
 +}
 +
 +static void greth_link_change(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phydev = greth->phy;
 +      unsigned long flags;
 +      int status_change = 0;
 +      u32 ctrl;
 +
 +      spin_lock_irqsave(&greth->devlock, flags);
 +
 +      if (phydev->link) {
 +
 +              if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
 +                      ctrl = GRETH_REGLOAD(greth->regs->control) &
 +                             ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
 +
 +                      if (phydev->duplex)
 +                              ctrl |= GRETH_CTRL_FD;
 +
 +                      if (phydev->speed == SPEED_100)
 +                              ctrl |= GRETH_CTRL_SP;
 +                      else if (phydev->speed == SPEED_1000)
 +                              ctrl |= GRETH_CTRL_GB;
 +
 +                      GRETH_REGSAVE(greth->regs->control, ctrl);
 +                      greth->speed = phydev->speed;
 +                      greth->duplex = phydev->duplex;
 +                      status_change = 1;
 +              }
 +      }
 +
 +      if (phydev->link != greth->link) {
 +              if (!phydev->link) {
 +                      greth->speed = 0;
 +                      greth->duplex = -1;
 +              }
 +              greth->link = phydev->link;
 +
 +              status_change = 1;
 +      }
 +
 +      spin_unlock_irqrestore(&greth->devlock, flags);
 +
 +      if (status_change) {
 +              if (phydev->link)
 +                      pr_debug("%s: link up (%d/%s)\n",
 +                              dev->name, phydev->speed,
 +                              DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
 +              else
 +                      pr_debug("%s: link down\n", dev->name);
 +      }
 +}
 +
 +static int greth_mdio_probe(struct net_device *dev)
 +{
 +      struct greth_private *greth = netdev_priv(dev);
 +      struct phy_device *phy = NULL;
 +      int ret;
 +
 +      /* Find the first PHY */
 +      phy = phy_find_first(greth->mdio);
 +
 +      if (!phy) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(&dev->dev, "no PHY found\n");
 +              return -ENXIO;
 +      }
 +
 +      ret = phy_connect_direct(dev, phy, &greth_link_change,
 +                      0, greth->gbit_mac ?
 +                      PHY_INTERFACE_MODE_GMII :
 +                      PHY_INTERFACE_MODE_MII);
 +      if (ret) {
 +              if (netif_msg_ifup(greth))
 +                      dev_err(&dev->dev, "could not attach to PHY\n");
 +              return ret;
 +      }
 +
 +      if (greth->gbit_mac)
 +              phy->supported &= PHY_GBIT_FEATURES;
 +      else
 +              phy->supported &= PHY_BASIC_FEATURES;
 +
 +      phy->advertising = phy->supported;
 +
 +      greth->link = 0;
 +      greth->speed = 0;
 +      greth->duplex = -1;
 +      greth->phy = phy;
 +
 +      return 0;
 +}
 +
 +static inline int phy_aneg_done(struct phy_device *phydev)
 +{
 +      int retval;
 +
 +      retval = phy_read(phydev, MII_BMSR);
 +
 +      return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
 +}
 +
 +static int greth_mdio_init(struct greth_private *greth)
 +{
 +      int ret, phy;
 +      unsigned long timeout;
 +
 +      greth->mdio = mdiobus_alloc();
 +      if (!greth->mdio) {
 +              return -ENOMEM;
 +      }
 +
 +      greth->mdio->name = "greth-mdio";
 +      snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
 +      greth->mdio->read = greth_mdio_read;
 +      greth->mdio->write = greth_mdio_write;
 +      greth->mdio->reset = greth_mdio_reset;
 +      greth->mdio->priv = greth;
 +
 +      greth->mdio->irq = greth->mdio_irqs;
 +
 +      for (phy = 0; phy < PHY_MAX_ADDR; phy++)
 +              greth->mdio->irq[phy] = PHY_POLL;
 +
 +      ret = mdiobus_register(greth->mdio);
 +      if (ret) {
 +              goto error;
 +      }
 +
 +      ret = greth_mdio_probe(greth->netdev);
 +      if (ret) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
 +              goto unreg_mdio;
 +      }
 +
 +      phy_start(greth->phy);
 +
 +      /* If Ethernet debug link is used make autoneg happen right away */
 +      if (greth->edcl && greth_edcl == 1) {
 +              phy_start_aneg(greth->phy);
 +              timeout = jiffies + 6*HZ;
 +              while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
 +              }
 +              genphy_read_status(greth->phy);
 +              greth_link_change(greth->netdev);
 +      }
 +
 +      return 0;
 +
 +unreg_mdio:
 +      mdiobus_unregister(greth->mdio);
 +error:
 +      mdiobus_free(greth->mdio);
 +      return ret;
 +}
 +
 +/* Initialize the GRETH MAC */
 +static int __devinit greth_of_probe(struct platform_device *ofdev)
 +{
 +      struct net_device *dev;
 +      struct greth_private *greth;
 +      struct greth_regs *regs;
 +
 +      int i;
 +      int err;
 +      int tmp;
 +      unsigned long timeout;
 +
 +      dev = alloc_etherdev(sizeof(struct greth_private));
 +
 +      if (dev == NULL)
 +              return -ENOMEM;
 +
 +      greth = netdev_priv(dev);
 +      greth->netdev = dev;
 +      greth->dev = &ofdev->dev;
 +
 +      if (greth_debug > 0)
 +              greth->msg_enable = greth_debug;
 +      else
 +              greth->msg_enable = GRETH_DEF_MSG_ENABLE;
 +
 +      spin_lock_init(&greth->devlock);
 +
 +      greth->regs = of_ioremap(&ofdev->resource[0], 0,
 +                               resource_size(&ofdev->resource[0]),
 +                               "grlib-greth regs");
 +
 +      if (greth->regs == NULL) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "ioremap failure.\n");
 +              err = -EIO;
 +              goto error1;
 +      }
 +
 +      regs = (struct greth_regs *) greth->regs;
 +      greth->irq = ofdev->archdata.irqs[0];
 +
 +      dev_set_drvdata(greth->dev, dev);
 +      SET_NETDEV_DEV(dev, greth->dev);
 +
 +      if (netif_msg_probe(greth))
 +              dev_dbg(greth->dev, "reseting controller.\n");
 +
 +      /* Reset the controller. */
 +      GRETH_REGSAVE(regs->control, GRETH_RESET);
 +
 +      /* Wait for MAC to reset itself */
 +      timeout = jiffies + HZ/100;
 +      while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
 +              if (time_after(jiffies, timeout)) {
 +                      err = -EIO;
 +                      if (netif_msg_probe(greth))
 +                              dev_err(greth->dev, "timeout when waiting for reset.\n");
 +                      goto error2;
 +              }
 +      }
 +
 +      /* Get default PHY address  */
 +      greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
 +
 +      /* Check if we have GBIT capable MAC */
 +      tmp = GRETH_REGLOAD(regs->control);
 +      greth->gbit_mac = (tmp >> 27) & 1;
 +
 +      /* Check for multicast capability */
 +      greth->multicast = (tmp >> 25) & 1;
 +
 +      greth->edcl = (tmp >> 31) & 1;
 +
 +      /* If we have EDCL we disable the EDCL speed-duplex FSM so
 +       * it doesn't interfere with the software */
 +      if (greth->edcl != 0)
 +              GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
 +
 +      /* Check if MAC can handle MDIO interrupts */
 +      greth->mdio_int_en = (tmp >> 26) & 1;
 +
 +      err = greth_mdio_init(greth);
 +      if (err) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "failed to register MDIO bus\n");
 +              goto error2;
 +      }
 +
 +      /* Allocate TX descriptor ring in coherent memory */
 +      greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
 +                                                                 1024,
 +                                                                 &greth->tx_bd_base_phys,
 +                                                                 GFP_KERNEL);
 +
 +      if (!greth->tx_bd_base) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(&dev->dev, "could not allocate descriptor memory.\n");
 +              err = -ENOMEM;
 +              goto error3;
 +      }
 +
 +      memset(greth->tx_bd_base, 0, 1024);
 +
 +      /* Allocate RX descriptor ring in coherent memory */
 +      greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
 +                                                                 1024,
 +                                                                 &greth->rx_bd_base_phys,
 +                                                                 GFP_KERNEL);
 +
 +      if (!greth->rx_bd_base) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "could not allocate descriptor memory.\n");
 +              err = -ENOMEM;
 +              goto error4;
 +      }
 +
 +      memset(greth->rx_bd_base, 0, 1024);
 +
 +      /* Get MAC address from: module param, OF property or ID prom */
 +      for (i = 0; i < 6; i++) {
 +              if (macaddr[i] != 0)
 +                      break;
 +      }
 +      if (i == 6) {
 +              const unsigned char *addr;
 +              int len;
 +              addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
 +                                      &len);
 +              if (addr != NULL && len == 6) {
 +                      for (i = 0; i < 6; i++)
 +                              macaddr[i] = (unsigned int) addr[i];
 +              } else {
 +#ifdef CONFIG_SPARC
 +                      for (i = 0; i < 6; i++)
 +                              macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
 +#endif
 +              }
 +      }
 +
 +      for (i = 0; i < 6; i++)
 +              dev->dev_addr[i] = macaddr[i];
 +
 +      macaddr[5]++;
 +
 +      if (!is_valid_ether_addr(&dev->dev_addr[0])) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "no valid ethernet address, aborting.\n");
 +              err = -EINVAL;
 +              goto error5;
 +      }
 +
 +      GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
 +      GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
 +                    dev->dev_addr[4] << 8 | dev->dev_addr[5]);
 +
 +      /* Clear all pending interrupts except PHY irq */
 +      GRETH_REGSAVE(regs->status, 0xFF);
 +
 +      if (greth->gbit_mac) {
 +              dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
 +                      NETIF_F_RXCSUM;
 +              dev->features = dev->hw_features | NETIF_F_HIGHDMA;
 +              greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
 +      }
 +
 +      if (greth->multicast) {
 +              greth_netdev_ops.ndo_set_rx_mode = greth_set_multicast_list;
 +              dev->flags |= IFF_MULTICAST;
 +      } else {
 +              dev->flags &= ~IFF_MULTICAST;
 +      }
 +
 +      dev->netdev_ops = &greth_netdev_ops;
 +      dev->ethtool_ops = &greth_ethtool_ops;
 +
 +      err = register_netdev(dev);
 +      if (err) {
 +              if (netif_msg_probe(greth))
 +                      dev_err(greth->dev, "netdevice registration failed.\n");
 +              goto error5;
 +      }
 +
 +      /* setup NAPI */
 +      netif_napi_add(dev, &greth->napi, greth_poll, 64);
 +
 +      return 0;
 +
 +error5:
 +      dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
 +error4:
 +      dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
 +error3:
 +      mdiobus_unregister(greth->mdio);
 +error2:
 +      of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
 +error1:
 +      free_netdev(dev);
 +      return err;
 +}
 +
 +static int __devexit greth_of_remove(struct platform_device *of_dev)
 +{
 +      struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
 +      struct greth_private *greth = netdev_priv(ndev);
 +
 +      /* Free descriptor areas */
 +      dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
 +
 +      dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
 +
 +      dev_set_drvdata(&of_dev->dev, NULL);
 +
 +      if (greth->phy)
 +              phy_stop(greth->phy);
 +      mdiobus_unregister(greth->mdio);
 +
 +      unregister_netdev(ndev);
 +      free_netdev(ndev);
 +
 +      of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
 +
 +      return 0;
 +}
 +
 +static struct of_device_id greth_of_match[] = {
 +      {
 +       .name = "GAISLER_ETHMAC",
 +       },
 +      {
 +       .name = "01_01d",
 +       },
 +      {},
 +};
 +
 +MODULE_DEVICE_TABLE(of, greth_of_match);
 +
 +static struct platform_driver greth_of_driver = {
 +      .driver = {
 +              .name = "grlib-greth",
 +              .owner = THIS_MODULE,
 +              .of_match_table = greth_of_match,
 +      },
 +      .probe = greth_of_probe,
 +      .remove = __devexit_p(greth_of_remove),
 +};
 +
 +static int __init greth_init(void)
 +{
 +      return platform_driver_register(&greth_of_driver);
 +}
 +
 +static void __exit greth_cleanup(void)
 +{
 +      platform_driver_unregister(&greth_of_driver);
 +}
 +
 +module_init(greth_init);
 +module_exit(greth_cleanup);
 +
 +MODULE_AUTHOR("Aeroflex Gaisler AB.");
 +MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
 +MODULE_LICENSE("GPL");
@@@ -103,6 -103,7 +103,7 @@@ struct greth_private 
  
        unsigned char *tx_bufs[GRETH_TXBD_NUM];
        unsigned char *rx_bufs[GRETH_RXBD_NUM];
+       u16 tx_bufs_length[GRETH_TXBD_NUM];
  
        u16 tx_next;
        u16 tx_last;
index c2b630c,0000000..7d5ded8
mode 100644,000000..100644
--- /dev/null
@@@ -1,767 -1,0 +1,770 @@@
 +/*
 + *  linux/drivers/net/am79c961.c
 + *
 + *  by Russell King <rmk@arm.linux.org.uk> 1995-2001.
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License version 2 as
 + * published by the Free Software Foundation.
 + *
 + * Derived from various things including skeleton.c
 + *
 + * This is a special driver for the am79c961A Lance chip used in the
 + * Intel (formally Digital Equipment Corp) EBSA110 platform.  Please
 + * note that this can not be built as a module (it doesn't make sense).
 + */
 +#include <linux/kernel.h>
 +#include <linux/types.h>
 +#include <linux/interrupt.h>
 +#include <linux/ioport.h>
 +#include <linux/slab.h>
 +#include <linux/string.h>
 +#include <linux/errno.h>
 +#include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
 +#include <linux/delay.h>
 +#include <linux/init.h>
 +#include <linux/crc32.h>
 +#include <linux/bitops.h>
 +#include <linux/platform_device.h>
 +#include <linux/io.h>
 +
 +#include <mach/hardware.h>
 +#include <asm/system.h>
 +
 +#define TX_BUFFERS 15
 +#define RX_BUFFERS 25
 +
 +#include "am79c961a.h"
 +
 +static irqreturn_t
 +am79c961_interrupt (int irq, void *dev_id);
 +
 +static unsigned int net_debug = NET_DEBUG;
 +
 +static const char version[] =
 +      "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
 +
 +/* --------------------------------------------------------------------------- */
 +
 +#ifdef __arm__
 +static void write_rreg(u_long base, u_int reg, u_int val)
 +{
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NET_RAP\n\t"
 +      "str%?h %0, [%2, #-4]   @ NET_RDP"
 +      :
 +      : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +}
 +
 +static inline unsigned short read_rreg(u_long base_addr, u_int reg)
 +{
 +      unsigned short v;
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NET_RAP\n\t"
 +      "ldr%?h %0, [%2, #-4]   @ NET_RDP"
 +      : "=r" (v)
 +      : "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +      return v;
 +}
 +
 +static inline void write_ireg(u_long base, u_int reg, u_int val)
 +{
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NET_RAP\n\t"
 +      "str%?h %0, [%2, #8]    @ NET_IDP"
 +      :
 +      : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +}
 +
 +static inline unsigned short read_ireg(u_long base_addr, u_int reg)
 +{
 +      u_short v;
 +      asm volatile(
 +      "str%?h %1, [%2]        @ NAT_RAP\n\t"
 +      "ldr%?h %0, [%2, #8]    @ NET_IDP\n\t"
 +      : "=r" (v)
 +      : "r" (reg), "r" (ISAIO_BASE + 0x0464));
 +      return v;
 +}
 +
 +#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
 +#define am_readword(dev,off)      __raw_readw(ISAMEM_BASE + ((off) << 1))
 +
 +static void
 +am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
 +{
 +      offset = ISAMEM_BASE + (offset << 1);
 +      length = (length + 1) & ~1;
 +      if ((int)buf & 2) {
 +              asm volatile("str%?h    %2, [%0], #4"
 +               : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
 +              buf += 2;
 +              length -= 2;
 +      }
 +      while (length > 8) {
 +              register unsigned int tmp asm("r2"), tmp2 asm("r3");
 +              asm volatile(
 +                      "ldm%?ia        %0!, {%1, %2}"
 +                      : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
 +              length -= 8;
 +              asm volatile(
 +                      "str%?h %1, [%0], #4\n\t"
 +                      "mov%?  %1, %1, lsr #16\n\t"
 +                      "str%?h %1, [%0], #4\n\t"
 +                      "str%?h %2, [%0], #4\n\t"
 +                      "mov%?  %2, %2, lsr #16\n\t"
 +                      "str%?h %2, [%0], #4"
 +              : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
 +      }
 +      while (length > 0) {
 +              asm volatile("str%?h    %2, [%0], #4"
 +               : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
 +              buf += 2;
 +              length -= 2;
 +      }
 +}
 +
 +static void
 +am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
 +{
 +      offset = ISAMEM_BASE + (offset << 1);
 +      length = (length + 1) & ~1;
 +      if ((int)buf & 2) {
 +              unsigned int tmp;
 +              asm volatile(
 +                      "ldr%?h %2, [%0], #4\n\t"
 +                      "str%?b %2, [%1], #1\n\t"
 +                      "mov%?  %2, %2, lsr #8\n\t"
 +                      "str%?b %2, [%1], #1"
 +              : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
 +              length -= 2;
 +      }
 +      while (length > 8) {
 +              register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
 +              asm volatile(
 +                      "ldr%?h %2, [%0], #4\n\t"
 +                      "ldr%?h %4, [%0], #4\n\t"
 +                      "ldr%?h %3, [%0], #4\n\t"
 +                      "orr%?  %2, %2, %4, lsl #16\n\t"
 +                      "ldr%?h %4, [%0], #4\n\t"
 +                      "orr%?  %3, %3, %4, lsl #16\n\t"
 +                      "stm%?ia        %1!, {%2, %3}"
 +              : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
 +              : "0" (offset), "1" (buf));
 +              length -= 8;
 +      }
 +      while (length > 0) {
 +              unsigned int tmp;
 +              asm volatile(
 +                      "ldr%?h %2, [%0], #4\n\t"
 +                      "str%?b %2, [%1], #1\n\t"
 +                      "mov%?  %2, %2, lsr #8\n\t"
 +                      "str%?b %2, [%1], #1"
 +              : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
 +              length -= 2;
 +      }
 +}
 +#else
 +#error Not compatible
 +#endif
 +
 +static int
 +am79c961_ramtest(struct net_device *dev, unsigned int val)
 +{
 +      unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
 +      int i, error = 0, errorcount = 0;
 +
 +      if (!buffer)
 +              return 0;
 +      memset (buffer, val, 65536);
 +      am_writebuffer(dev, 0, buffer, 65536);
 +      memset (buffer, val ^ 255, 65536);
 +      am_readbuffer(dev, 0, buffer, 65536);
 +      for (i = 0; i < 65536; i++) {
 +              if (buffer[i] != val && !error) {
 +                      printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
 +                      error = 1;
 +                      errorcount ++;
 +              } else if (error && buffer[i] == val) {
 +                      printk ("%05X\n", i);
 +                      error = 0;
 +              }
 +      }
 +      if (error)
 +              printk ("10000\n");
 +      kfree (buffer);
 +      return errorcount;
 +}
 +
 +static void am79c961_mc_hash(char *addr, u16 *hash)
 +{
 +      int idx, bit;
 +      u32 crc;
 +
 +      crc = ether_crc_le(ETH_ALEN, addr);
 +
 +      idx = crc >> 30;
 +      bit = (crc >> 26) & 15;
 +
 +      hash[idx] |= 1 << bit;
 +}
 +
 +static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
 +{
 +      unsigned int mode = MODE_PORT_10BT;
 +
 +      if (dev->flags & IFF_PROMISC) {
 +              mode |= MODE_PROMISC;
 +              memset(hash, 0xff, 4 * sizeof(*hash));
 +      } else if (dev->flags & IFF_ALLMULTI) {
 +              memset(hash, 0xff, 4 * sizeof(*hash));
 +      } else {
 +              struct netdev_hw_addr *ha;
 +
 +              memset(hash, 0, 4 * sizeof(*hash));
 +
 +              netdev_for_each_mc_addr(ha, dev)
 +                      am79c961_mc_hash(ha->addr, hash);
 +      }
 +
 +      return mode;
 +}
 +
 +static void
 +am79c961_init_for_open(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned long flags;
 +      unsigned char *p;
 +      u_int hdr_addr, first_free_addr;
 +      u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
 +      int i;
 +
 +      /*
 +       * Stop the chip.
 +       */
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +      write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +
 +      write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
 +      write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
 +      write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
 +      write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
 +
 +      for (i = LADRL; i <= LADRH; i++)
 +              write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
 +
 +      for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
 +              write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
 +
 +      write_rreg (dev->base_addr, MODE, mode);
 +      write_rreg (dev->base_addr, POLLINT, 0);
 +      write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
 +      write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
 +
 +      first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
 +      hdr_addr = 0;
 +
 +      priv->rxhead = 0;
 +      priv->rxtail = 0;
 +      priv->rxhdr = hdr_addr;
 +
 +      for (i = 0; i < RX_BUFFERS; i++) {
 +              priv->rxbuffer[i] = first_free_addr;
 +              am_writeword (dev, hdr_addr, first_free_addr);
 +              am_writeword (dev, hdr_addr + 2, RMD_OWN);
 +              am_writeword (dev, hdr_addr + 4, (-1600));
 +              am_writeword (dev, hdr_addr + 6, 0);
 +              first_free_addr += 1600;
 +              hdr_addr += 8;
 +      }
 +      priv->txhead = 0;
 +      priv->txtail = 0;
 +      priv->txhdr = hdr_addr;
 +      for (i = 0; i < TX_BUFFERS; i++) {
 +              priv->txbuffer[i] = first_free_addr;
 +              am_writeword (dev, hdr_addr, first_free_addr);
 +              am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
 +              am_writeword (dev, hdr_addr + 4, 0xf000);
 +              am_writeword (dev, hdr_addr + 6, 0);
 +              first_free_addr += 1600;
 +              hdr_addr += 8;
 +      }
 +
 +      write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
 +      write_rreg (dev->base_addr, BASERXH, 0);
 +      write_rreg (dev->base_addr, BASETXL, priv->txhdr);
 +      write_rreg (dev->base_addr, BASERXH, 0);
 +      write_rreg (dev->base_addr, CSR0, CSR0_STOP);
 +      write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
 +      write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
 +      write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
 +}
 +
 +static void am79c961_timer(unsigned long data)
 +{
 +      struct net_device *dev = (struct net_device *)data;
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned int lnkstat, carrier;
++      unsigned long flags;
 +
++      spin_lock_irqsave(&priv->chip_lock, flags);
 +      lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
++      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +      carrier = netif_carrier_ok(dev);
 +
 +      if (lnkstat && !carrier) {
 +              netif_carrier_on(dev);
 +              printk("%s: link up\n", dev->name);
 +      } else if (!lnkstat && carrier) {
 +              netif_carrier_off(dev);
 +              printk("%s: link down\n", dev->name);
 +      }
 +
 +      mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
 +}
 +
 +/*
 + * Open/initialize the board.
 + */
 +static int
 +am79c961_open(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      int ret;
 +
 +      ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
 +      if (ret)
 +              return ret;
 +
 +      am79c961_init_for_open(dev);
 +
 +      netif_carrier_off(dev);
 +
 +      priv->timer.expires = jiffies;
 +      add_timer(&priv->timer);
 +
 +      netif_start_queue(dev);
 +
 +      return 0;
 +}
 +
 +/*
 + * The inverse routine to am79c961_open().
 + */
 +static int
 +am79c961_close(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned long flags;
 +
 +      del_timer_sync(&priv->timer);
 +
 +      netif_stop_queue(dev);
 +      netif_carrier_off(dev);
 +
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +      write_rreg (dev->base_addr, CSR0, CSR0_STOP);
 +      write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +
 +      free_irq (dev->irq, dev);
 +
 +      return 0;
 +}
 +
 +/*
 + * Set or clear promiscuous/multicast mode filter for this adapter.
 + */
 +static void am79c961_setmulticastlist (struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned long flags;
 +      u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
 +      int i, stopped;
 +
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +
 +      stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
 +
 +      if (!stopped) {
 +              /*
 +               * Put the chip into suspend mode
 +               */
 +              write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
 +
 +              /*
 +               * Spin waiting for chip to report suspend mode
 +               */
 +              while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
 +                      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +                      nop();
 +                      spin_lock_irqsave(&priv->chip_lock, flags);
 +              }
 +      }
 +
 +      /*
 +       * Update the multicast hash table
 +       */
 +      for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
 +              write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
 +
 +      /*
 +       * Write the mode register
 +       */
 +      write_rreg(dev->base_addr, MODE, mode);
 +
 +      if (!stopped) {
 +              /*
 +               * Put the chip back into running mode
 +               */
 +              write_rreg(dev->base_addr, CTRL1, 0);
 +      }
 +
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +}
 +
 +static void am79c961_timeout(struct net_device *dev)
 +{
 +      printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
 +              dev->name);
 +
 +      /*
 +       * ought to do some setup of the tx side here
 +       */
 +
 +      netif_wake_queue(dev);
 +}
 +
 +/*
 + * Transmit a packet
 + */
 +static int
 +am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +      unsigned int hdraddr, bufaddr;
 +      unsigned int head;
 +      unsigned long flags;
 +
 +      head = priv->txhead;
 +      hdraddr = priv->txhdr + (head << 3);
 +      bufaddr = priv->txbuffer[head];
 +      head += 1;
 +      if (head >= TX_BUFFERS)
 +              head = 0;
 +
 +      am_writebuffer (dev, bufaddr, skb->data, skb->len);
 +      am_writeword (dev, hdraddr + 4, -skb->len);
 +      am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
 +      priv->txhead = head;
 +
 +      spin_lock_irqsave(&priv->chip_lock, flags);
 +      write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
 +      spin_unlock_irqrestore(&priv->chip_lock, flags);
 +
 +      /*
 +       * If the next packet is owned by the ethernet device,
 +       * then the tx ring is full and we can't add another
 +       * packet.
 +       */
 +      if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
 +              netif_stop_queue(dev);
 +
 +      dev_kfree_skb(skb);
 +
 +      return NETDEV_TX_OK;
 +}
 +
 +/*
 + * If we have a good packet(s), get it/them out of the buffers.
 + */
 +static void
 +am79c961_rx(struct net_device *dev, struct dev_priv *priv)
 +{
 +      do {
 +              struct sk_buff *skb;
 +              u_int hdraddr;
 +              u_int pktaddr;
 +              u_int status;
 +              int len;
 +
 +              hdraddr = priv->rxhdr + (priv->rxtail << 3);
 +              pktaddr = priv->rxbuffer[priv->rxtail];
 +
 +              status = am_readword (dev, hdraddr + 2);
 +              if (status & RMD_OWN) /* do we own it? */
 +                      break;
 +
 +              priv->rxtail ++;
 +              if (priv->rxtail >= RX_BUFFERS)
 +                      priv->rxtail = 0;
 +
 +              if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
 +                      am_writeword (dev, hdraddr + 2, RMD_OWN);
 +                      dev->stats.rx_errors++;
 +                      if (status & RMD_ERR) {
 +                              if (status & RMD_FRAM)
 +                                      dev->stats.rx_frame_errors++;
 +                              if (status & RMD_CRC)
 +                                      dev->stats.rx_crc_errors++;
 +                      } else if (status & RMD_STP)
 +                              dev->stats.rx_length_errors++;
 +                      continue;
 +              }
 +
 +              len = am_readword(dev, hdraddr + 6);
 +              skb = dev_alloc_skb(len + 2);
 +
 +              if (skb) {
 +                      skb_reserve(skb, 2);
 +
 +                      am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
 +                      am_writeword(dev, hdraddr + 2, RMD_OWN);
 +                      skb->protocol = eth_type_trans(skb, dev);
 +                      netif_rx(skb);
 +                      dev->stats.rx_bytes += len;
 +                      dev->stats.rx_packets++;
 +              } else {
 +                      am_writeword (dev, hdraddr + 2, RMD_OWN);
 +                      printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
 +                      dev->stats.rx_dropped++;
 +                      break;
 +              }
 +      } while (1);
 +}
 +
 +/*
 + * Update stats for the transmitted packet
 + */
 +static void
 +am79c961_tx(struct net_device *dev, struct dev_priv *priv)
 +{
 +      do {
 +              short len;
 +              u_int hdraddr;
 +              u_int status;
 +
 +              hdraddr = priv->txhdr + (priv->txtail << 3);
 +              status = am_readword (dev, hdraddr + 2);
 +              if (status & TMD_OWN)
 +                      break;
 +
 +              priv->txtail ++;
 +              if (priv->txtail >= TX_BUFFERS)
 +                      priv->txtail = 0;
 +
 +              if (status & TMD_ERR) {
 +                      u_int status2;
 +
 +                      dev->stats.tx_errors++;
 +
 +                      status2 = am_readword (dev, hdraddr + 6);
 +
 +                      /*
 +                       * Clear the error byte
 +                       */
 +                      am_writeword (dev, hdraddr + 6, 0);
 +
 +                      if (status2 & TST_RTRY)
 +                              dev->stats.collisions += 16;
 +                      if (status2 & TST_LCOL)
 +                              dev->stats.tx_window_errors++;
 +                      if (status2 & TST_LCAR)
 +                              dev->stats.tx_carrier_errors++;
 +                      if (status2 & TST_UFLO)
 +                              dev->stats.tx_fifo_errors++;
 +                      continue;
 +              }
 +              dev->stats.tx_packets++;
 +              len = am_readword (dev, hdraddr + 4);
 +              dev->stats.tx_bytes += -len;
 +      } while (priv->txtail != priv->txhead);
 +
 +      netif_wake_queue(dev);
 +}
 +
 +static irqreturn_t
 +am79c961_interrupt(int irq, void *dev_id)
 +{
 +      struct net_device *dev = (struct net_device *)dev_id;
 +      struct dev_priv *priv = netdev_priv(dev);
 +      u_int status, n = 100;
 +      int handled = 0;
 +
 +      do {
 +              status = read_rreg(dev->base_addr, CSR0);
 +              write_rreg(dev->base_addr, CSR0, status &
 +                         (CSR0_IENA|CSR0_TINT|CSR0_RINT|
 +                          CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
 +
 +              if (status & CSR0_RINT) {
 +                      handled = 1;
 +                      am79c961_rx(dev, priv);
 +              }
 +              if (status & CSR0_TINT) {
 +                      handled = 1;
 +                      am79c961_tx(dev, priv);
 +              }
 +              if (status & CSR0_MISS) {
 +                      handled = 1;
 +                      dev->stats.rx_dropped++;
 +              }
 +              if (status & CSR0_CERR) {
 +                      handled = 1;
 +                      mod_timer(&priv->timer, jiffies);
 +              }
 +      } while (--n && status & (CSR0_RINT | CSR0_TINT));
 +
 +      return IRQ_RETVAL(handled);
 +}
 +
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +static void am79c961_poll_controller(struct net_device *dev)
 +{
 +      unsigned long flags;
 +      local_irq_save(flags);
 +      am79c961_interrupt(dev->irq, dev);
 +      local_irq_restore(flags);
 +}
 +#endif
 +
 +/*
 + * Initialise the chip.  Note that we always expect
 + * to be entered with interrupts enabled.
 + */
 +static int
 +am79c961_hw_init(struct net_device *dev)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +
 +      spin_lock_irq(&priv->chip_lock);
 +      write_rreg (dev->base_addr, CSR0, CSR0_STOP);
 +      write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
 +      spin_unlock_irq(&priv->chip_lock);
 +
 +      am79c961_ramtest(dev, 0x66);
 +      am79c961_ramtest(dev, 0x99);
 +
 +      return 0;
 +}
 +
 +static void __init am79c961_banner(void)
 +{
 +      static unsigned version_printed;
 +
 +      if (net_debug && version_printed++ == 0)
 +              printk(KERN_INFO "%s", version);
 +}
 +static const struct net_device_ops am79c961_netdev_ops = {
 +      .ndo_open               = am79c961_open,
 +      .ndo_stop               = am79c961_close,
 +      .ndo_start_xmit         = am79c961_sendpacket,
 +      .ndo_set_rx_mode        = am79c961_setmulticastlist,
 +      .ndo_tx_timeout         = am79c961_timeout,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_change_mtu         = eth_change_mtu,
 +      .ndo_set_mac_address    = eth_mac_addr,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = am79c961_poll_controller,
 +#endif
 +};
 +
 +static int __devinit am79c961_probe(struct platform_device *pdev)
 +{
 +      struct resource *res;
 +      struct net_device *dev;
 +      struct dev_priv *priv;
 +      int i, ret;
 +
 +      res = platform_get_resource(pdev, IORESOURCE_IO, 0);
 +      if (!res)
 +              return -ENODEV;
 +
 +      dev = alloc_etherdev(sizeof(struct dev_priv));
 +      ret = -ENOMEM;
 +      if (!dev)
 +              goto out;
 +
 +      SET_NETDEV_DEV(dev, &pdev->dev);
 +
 +      priv = netdev_priv(dev);
 +
 +      /*
 +       * Fixed address and IRQ lines here.
 +       * The PNP initialisation should have been
 +       * done by the ether bootp loader.
 +       */
 +      dev->base_addr = res->start;
 +      ret = platform_get_irq(pdev, 0);
 +
 +      if (ret < 0) {
 +              ret = -ENODEV;
 +              goto nodev;
 +      }
 +      dev->irq = ret;
 +
 +      ret = -ENODEV;
 +      if (!request_region(dev->base_addr, 0x18, dev->name))
 +              goto nodev;
 +
 +      /*
 +       * Reset the device.
 +       */
 +      inb(dev->base_addr + NET_RESET);
 +      udelay(5);
 +
 +      /*
 +       * Check the manufacturer part of the
 +       * ether address.
 +       */
 +      if (inb(dev->base_addr) != 0x08 ||
 +          inb(dev->base_addr + 2) != 0x00 ||
 +          inb(dev->base_addr + 4) != 0x2b)
 +              goto release;
 +
 +      for (i = 0; i < 6; i++)
 +              dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
 +
 +      am79c961_banner();
 +
 +      spin_lock_init(&priv->chip_lock);
 +      init_timer(&priv->timer);
 +      priv->timer.data = (unsigned long)dev;
 +      priv->timer.function = am79c961_timer;
 +
 +      if (am79c961_hw_init(dev))
 +              goto release;
 +
 +      dev->netdev_ops = &am79c961_netdev_ops;
 +
 +      ret = register_netdev(dev);
 +      if (ret == 0) {
 +              printk(KERN_INFO "%s: ether address %pM\n",
 +                     dev->name, dev->dev_addr);
 +              return 0;
 +      }
 +
 +release:
 +      release_region(dev->base_addr, 0x18);
 +nodev:
 +      free_netdev(dev);
 +out:
 +      return ret;
 +}
 +
 +static struct platform_driver am79c961_driver = {
 +      .probe          = am79c961_probe,
 +      .driver         = {
 +              .name   = "am79c961",
 +      },
 +};
 +
 +static int __init am79c961_init(void)
 +{
 +      return platform_driver_register(&am79c961_driver);
 +}
 +
 +__initcall(am79c961_init);
index f127768,0000000..2f92487
mode 100644,000000..100644
--- /dev/null
@@@ -1,2009 -1,0 +1,2069 @@@
 +/* bnx2x.h: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + */
 +
 +#ifndef BNX2X_H
 +#define BNX2X_H
 +#include <linux/netdevice.h>
 +#include <linux/dma-mapping.h>
 +#include <linux/types.h>
 +
 +/* compilation time flags */
 +
 +/* define this to make the driver freeze on error to allow getting debug info
 + * (you will need to reboot afterwards) */
 +/* #define BNX2X_STOP_ON_ERROR */
 +
 +#define DRV_MODULE_VERSION      "1.70.00-0"
 +#define DRV_MODULE_RELDATE      "2011/06/13"
 +#define BNX2X_BC_VER            0x040200
 +
 +#if defined(CONFIG_DCB)
 +#define BCM_DCBNL
 +#endif
 +#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
 +#define BCM_CNIC 1
 +#include "../cnic_if.h"
 +#endif
 +
 +#ifdef BCM_CNIC
 +#define BNX2X_MIN_MSIX_VEC_CNT 3
 +#define BNX2X_MSIX_VEC_FP_START 2
 +#else
 +#define BNX2X_MIN_MSIX_VEC_CNT 2
 +#define BNX2X_MSIX_VEC_FP_START 1
 +#endif
 +
 +#include <linux/mdio.h>
 +
 +#include "bnx2x_reg.h"
 +#include "bnx2x_fw_defs.h"
 +#include "bnx2x_hsi.h"
 +#include "bnx2x_link.h"
 +#include "bnx2x_sp.h"
 +#include "bnx2x_dcb.h"
 +#include "bnx2x_stats.h"
 +
 +/* error/debug prints */
 +
 +#define DRV_MODULE_NAME               "bnx2x"
 +
 +/* for messages that are currently off */
 +#define BNX2X_MSG_OFF                 0
 +#define BNX2X_MSG_MCP                 0x010000 /* was: NETIF_MSG_HW */
 +#define BNX2X_MSG_STATS                       0x020000 /* was: NETIF_MSG_TIMER */
 +#define BNX2X_MSG_NVM                 0x040000 /* was: NETIF_MSG_HW */
 +#define BNX2X_MSG_DMAE                        0x080000 /* was: NETIF_MSG_HW */
 +#define BNX2X_MSG_SP                  0x100000 /* was: NETIF_MSG_INTR */
 +#define BNX2X_MSG_FP                  0x200000 /* was: NETIF_MSG_INTR */
 +
 +/* regular debug print */
 +#define DP(__mask, fmt, ...)                                  \
 +do {                                                          \
 +      if (bp->msg_enable & (__mask))                          \
 +              pr_notice("[%s:%d(%s)]" fmt,                    \
 +                        __func__, __LINE__,                   \
 +                        bp->dev ? (bp->dev->name) : "?",      \
 +                        ##__VA_ARGS__);                       \
 +} while (0)
 +
 +#define DP_CONT(__mask, fmt, ...)                             \
 +do {                                                          \
 +      if (bp->msg_enable & (__mask))                          \
 +              pr_cont(fmt, ##__VA_ARGS__);                    \
 +} while (0)
 +
 +/* errors debug print */
 +#define BNX2X_DBG_ERR(fmt, ...)                                       \
 +do {                                                          \
 +      if (netif_msg_probe(bp))                                \
 +              pr_err("[%s:%d(%s)]" fmt,                       \
 +                     __func__, __LINE__,                      \
 +                     bp->dev ? (bp->dev->name) : "?",         \
 +                     ##__VA_ARGS__);                          \
 +} while (0)
 +
 +/* for errors (never masked) */
 +#define BNX2X_ERR(fmt, ...)                                   \
 +do {                                                          \
 +      pr_err("[%s:%d(%s)]" fmt,                               \
 +             __func__, __LINE__,                              \
 +             bp->dev ? (bp->dev->name) : "?",                 \
 +             ##__VA_ARGS__);                                  \
 +} while (0)
 +
 +#define BNX2X_ERROR(fmt, ...)                                 \
 +      pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
 +
 +
 +/* before we have a dev->name use dev_info() */
 +#define BNX2X_DEV_INFO(fmt, ...)                               \
 +do {                                                           \
 +      if (netif_msg_probe(bp))                                 \
 +              dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__);    \
 +} while (0)
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +void bnx2x_int_disable(struct bnx2x *bp);
 +#define bnx2x_panic()                         \
 +do {                                          \
 +      bp->panic = 1;                          \
 +      BNX2X_ERR("driver assert\n");           \
 +      bnx2x_int_disable(bp);                  \
 +      bnx2x_panic_dump(bp);                   \
 +} while (0)
 +#else
 +#define bnx2x_panic()                         \
 +do {                                          \
 +      bp->panic = 1;                          \
 +      BNX2X_ERR("driver assert\n");           \
 +      bnx2x_panic_dump(bp);                   \
 +} while (0)
 +#endif
 +
 +#define bnx2x_mc_addr(ha)      ((ha)->addr)
 +#define bnx2x_uc_addr(ha)      ((ha)->addr)
 +
 +#define U64_LO(x)                     (u32)(((u64)(x)) & 0xffffffff)
 +#define U64_HI(x)                     (u32)(((u64)(x)) >> 32)
 +#define HILO_U64(hi, lo)              ((((u64)(hi)) << 32) + (lo))
 +
 +
 +#define REG_ADDR(bp, offset)          ((bp->regview) + (offset))
 +
 +#define REG_RD(bp, offset)            readl(REG_ADDR(bp, offset))
 +#define REG_RD8(bp, offset)           readb(REG_ADDR(bp, offset))
 +#define REG_RD16(bp, offset)          readw(REG_ADDR(bp, offset))
 +
 +#define REG_WR(bp, offset, val)               writel((u32)val, REG_ADDR(bp, offset))
 +#define REG_WR8(bp, offset, val)      writeb((u8)val, REG_ADDR(bp, offset))
 +#define REG_WR16(bp, offset, val)     writew((u16)val, REG_ADDR(bp, offset))
 +
 +#define REG_RD_IND(bp, offset)                bnx2x_reg_rd_ind(bp, offset)
 +#define REG_WR_IND(bp, offset, val)   bnx2x_reg_wr_ind(bp, offset, val)
 +
 +#define REG_RD_DMAE(bp, offset, valp, len32) \
 +      do { \
 +              bnx2x_read_dmae(bp, offset, len32);\
 +              memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
 +      } while (0)
 +
 +#define REG_WR_DMAE(bp, offset, valp, len32) \
 +      do { \
 +              memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
 +              bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
 +                               offset, len32); \
 +      } while (0)
 +
 +#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
 +      REG_WR_DMAE(bp, offset, valp, len32)
 +
 +#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
 +      do { \
 +              memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
 +              bnx2x_write_big_buf_wb(bp, addr, len32); \
 +      } while (0)
 +
 +#define SHMEM_ADDR(bp, field)         (bp->common.shmem_base + \
 +                                       offsetof(struct shmem_region, field))
 +#define SHMEM_RD(bp, field)           REG_RD(bp, SHMEM_ADDR(bp, field))
 +#define SHMEM_WR(bp, field, val)      REG_WR(bp, SHMEM_ADDR(bp, field), val)
 +
 +#define SHMEM2_ADDR(bp, field)                (bp->common.shmem2_base + \
 +                                       offsetof(struct shmem2_region, field))
 +#define SHMEM2_RD(bp, field)          REG_RD(bp, SHMEM2_ADDR(bp, field))
 +#define SHMEM2_WR(bp, field, val)     REG_WR(bp, SHMEM2_ADDR(bp, field), val)
 +#define MF_CFG_ADDR(bp, field)                (bp->common.mf_cfg_base + \
 +                                       offsetof(struct mf_cfg, field))
 +#define MF2_CFG_ADDR(bp, field)               (bp->common.mf2_cfg_base + \
 +                                       offsetof(struct mf2_cfg, field))
 +
 +#define MF_CFG_RD(bp, field)          REG_RD(bp, MF_CFG_ADDR(bp, field))
 +#define MF_CFG_WR(bp, field, val)     REG_WR(bp,\
 +                                             MF_CFG_ADDR(bp, field), (val))
 +#define MF2_CFG_RD(bp, field)         REG_RD(bp, MF2_CFG_ADDR(bp, field))
 +
 +#define SHMEM2_HAS(bp, field)         ((bp)->common.shmem2_base &&    \
 +                                       (SHMEM2_RD((bp), size) >       \
 +                                       offsetof(struct shmem2_region, field)))
 +
 +#define EMAC_RD(bp, reg)              REG_RD(bp, emac_base + reg)
 +#define EMAC_WR(bp, reg, val)         REG_WR(bp, emac_base + reg, val)
 +
 +/* SP SB indices */
 +
 +/* General SP events - stats query, cfc delete, etc  */
 +#define HC_SP_INDEX_ETH_DEF_CONS              3
 +
 +/* EQ completions */
 +#define HC_SP_INDEX_EQ_CONS                   7
 +
 +/* FCoE L2 connection completions */
 +#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS               6
 +#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS               4
 +/* iSCSI L2 */
 +#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS         5
 +#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS      1
 +
 +/* Special clients parameters */
 +
 +/* SB indices */
 +/* FCoE L2 */
 +#define BNX2X_FCOE_L2_RX_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
 +
 +#define BNX2X_FCOE_L2_TX_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
 +
 +/**
 + *  CIDs and CLIDs:
 + *  CLIDs below is a CLID for func 0, then the CLID for other
 + *  functions will be calculated by the formula:
 + *
 + *  FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
 + *
 + */
 +/* iSCSI L2 */
 +#define BNX2X_ISCSI_ETH_CL_ID_IDX     1
 +#define BNX2X_ISCSI_ETH_CID           49
 +
 +/* FCoE L2 */
 +#define BNX2X_FCOE_ETH_CL_ID_IDX      2
 +#define BNX2X_FCOE_ETH_CID            50
 +
 +/** Additional rings budgeting */
 +#ifdef BCM_CNIC
 +#define CNIC_PRESENT                  1
 +#define FCOE_PRESENT                  1
 +#else
 +#define CNIC_PRESENT                  0
 +#define FCOE_PRESENT                  0
 +#endif /* BCM_CNIC */
 +#define NON_ETH_CONTEXT_USE   (FCOE_PRESENT)
 +
 +#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
 +      AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
 +
 +#define SM_RX_ID                      0
 +#define SM_TX_ID                      1
 +
 +/* defines for multiple tx priority indices */
 +#define FIRST_TX_ONLY_COS_INDEX               1
 +#define FIRST_TX_COS_INDEX            0
 +
 +/* defines for decodeing the fastpath index and the cos index out of the
 + * transmission queue index
 + */
 +#define MAX_TXQS_PER_COS      FP_SB_MAX_E1x
 +
 +#define TXQ_TO_FP(txq_index)  ((txq_index) % MAX_TXQS_PER_COS)
 +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
 +
 +/* rules for calculating the cids of tx-only connections */
 +#define CID_TO_FP(cid)                ((cid) % MAX_TXQS_PER_COS)
 +#define CID_COS_TO_TX_ONLY_CID(cid, cos)      (cid + cos * MAX_TXQS_PER_COS)
 +
 +/* fp index inside class of service range */
 +#define FP_COS_TO_TXQ(fp, cos)    ((fp)->index + cos * MAX_TXQS_PER_COS)
 +
 +/*
 + * 0..15 eth cos0
 + * 16..31 eth cos1 if applicable
 + * 32..47 eth cos2 If applicable
 + * fcoe queue follows eth queues (16, 32, 48 depending on cos)
 + */
 +#define MAX_ETH_TXQ_IDX(bp)   (MAX_TXQS_PER_COS * (bp)->max_cos)
 +#define FCOE_TXQ_IDX(bp)      (MAX_ETH_TXQ_IDX(bp))
 +
 +/* fast path */
 +struct sw_rx_bd {
 +      struct sk_buff  *skb;
 +      DEFINE_DMA_UNMAP_ADDR(mapping);
 +};
 +
 +struct sw_tx_bd {
 +      struct sk_buff  *skb;
 +      u16             first_bd;
 +      u8              flags;
 +/* Set on the first BD descriptor when there is a split BD */
 +#define BNX2X_TSO_SPLIT_BD            (1<<0)
 +};
 +
 +struct sw_rx_page {
 +      struct page     *page;
 +      DEFINE_DMA_UNMAP_ADDR(mapping);
 +};
 +
 +union db_prod {
 +      struct doorbell_set_prod data;
 +      u32             raw;
 +};
 +
++/* dropless fc FW/HW related params */
++#define BRB_SIZE(bp)          (CHIP_IS_E3(bp) ? 1024 : 512)
++#define MAX_AGG_QS(bp)                (CHIP_IS_E1(bp) ? \
++                                      ETH_MAX_AGGREGATION_QUEUES_E1 :\
++                                      ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
++#define FW_DROP_LEVEL(bp)     (3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
++#define FW_PREFETCH_CNT               16
++#define DROPLESS_FC_HEADROOM  100
 +
 +/* MC hsi */
 +#define BCM_PAGE_SHIFT                12
 +#define BCM_PAGE_SIZE         (1 << BCM_PAGE_SHIFT)
 +#define BCM_PAGE_MASK         (~(BCM_PAGE_SIZE - 1))
 +#define BCM_PAGE_ALIGN(addr)  (((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
 +
 +#define PAGES_PER_SGE_SHIFT   0
 +#define PAGES_PER_SGE         (1 << PAGES_PER_SGE_SHIFT)
 +#define SGE_PAGE_SIZE         PAGE_SIZE
 +#define SGE_PAGE_SHIFT                PAGE_SHIFT
 +#define SGE_PAGE_ALIGN(addr)  PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
 +
 +/* SGE ring related macros */
 +#define NUM_RX_SGE_PAGES      2
 +#define RX_SGE_CNT            (BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
- #define MAX_RX_SGE_CNT                (RX_SGE_CNT - 2)
++#define NEXT_PAGE_SGE_DESC_CNT        2
++#define MAX_RX_SGE_CNT                (RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
 +/* RX_SGE_CNT is promised to be a power of 2 */
 +#define RX_SGE_MASK           (RX_SGE_CNT - 1)
 +#define NUM_RX_SGE            (RX_SGE_CNT * NUM_RX_SGE_PAGES)
 +#define MAX_RX_SGE            (NUM_RX_SGE - 1)
 +#define NEXT_SGE_IDX(x)               ((((x) & RX_SGE_MASK) == \
-                                 (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
++                                (MAX_RX_SGE_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
++                                      (x) + 1)
 +#define RX_SGE(x)             ((x) & MAX_RX_SGE)
 +
++/*
++ * Number of required  SGEs is the sum of two:
++ * 1. Number of possible opened aggregations (next packet for
++ *    these aggregations will probably consume SGE immidiatelly)
++ * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
++ *    after placement on BD for new TPA aggregation)
++ *
++ * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
++ */
++#define NUM_SGE_REQ           (MAX_AGG_QS(bp) + \
++                                      (BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
++#define NUM_SGE_PG_REQ                ((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
++                                              MAX_RX_SGE_CNT)
++#define SGE_TH_LO(bp)         (NUM_SGE_REQ + \
++                               NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
++#define SGE_TH_HI(bp)         (SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
 +/* Manipulate a bit vector defined as an array of u64 */
 +
 +/* Number of bits in one sge_mask array element */
 +#define BIT_VEC64_ELEM_SZ             64
 +#define BIT_VEC64_ELEM_SHIFT          6
 +#define BIT_VEC64_ELEM_MASK           ((u64)BIT_VEC64_ELEM_SZ - 1)
 +
 +
 +#define __BIT_VEC64_SET_BIT(el, bit) \
 +      do { \
 +              el = ((el) | ((u64)0x1 << (bit))); \
 +      } while (0)
 +
 +#define __BIT_VEC64_CLEAR_BIT(el, bit) \
 +      do { \
 +              el = ((el) & (~((u64)0x1 << (bit)))); \
 +      } while (0)
 +
 +
 +#define BIT_VEC64_SET_BIT(vec64, idx) \
 +      __BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
 +                         (idx) & BIT_VEC64_ELEM_MASK)
 +
 +#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
 +      __BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
 +                           (idx) & BIT_VEC64_ELEM_MASK)
 +
 +#define BIT_VEC64_TEST_BIT(vec64, idx) \
 +      (((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
 +      ((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
 +
 +/* Creates a bitmask of all ones in less significant bits.
 +   idx - index of the most significant bit in the created mask */
 +#define BIT_VEC64_ONES_MASK(idx) \
 +              (((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
 +#define BIT_VEC64_ELEM_ONE_MASK       ((u64)(~0))
 +
 +/*******************************************************/
 +
 +
 +
 +/* Number of u64 elements in SGE mask array */
 +#define RX_SGE_MASK_LEN                       ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
 +                                       BIT_VEC64_ELEM_SZ)
 +#define RX_SGE_MASK_LEN_MASK          (RX_SGE_MASK_LEN - 1)
 +#define NEXT_SGE_MASK_ELEM(el)                (((el) + 1) & RX_SGE_MASK_LEN_MASK)
 +
 +union host_hc_status_block {
 +      /* pointer to fp status block e1x */
 +      struct host_hc_status_block_e1x *e1x_sb;
 +      /* pointer to fp status block e2 */
 +      struct host_hc_status_block_e2  *e2_sb;
 +};
 +
 +struct bnx2x_agg_info {
 +      /*
 +       * First aggregation buffer is an skb, the following - are pages.
 +       * We will preallocate the skbs for each aggregation when
 +       * we open the interface and will replace the BD at the consumer
 +       * with this one when we receive the TPA_START CQE in order to
 +       * keep the Rx BD ring consistent.
 +       */
 +      struct sw_rx_bd         first_buf;
 +      u8                      tpa_state;
 +#define BNX2X_TPA_START                       1
 +#define BNX2X_TPA_STOP                        2
 +#define BNX2X_TPA_ERROR                       3
 +      u8                      placement_offset;
 +      u16                     parsing_flags;
 +      u16                     vlan_tag;
 +      u16                     len_on_bd;
 +};
 +
 +#define Q_STATS_OFFSET32(stat_name) \
 +                      (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
 +
 +struct bnx2x_fp_txdata {
 +
 +      struct sw_tx_bd         *tx_buf_ring;
 +
 +      union eth_tx_bd_types   *tx_desc_ring;
 +      dma_addr_t              tx_desc_mapping;
 +
 +      u32                     cid;
 +
 +      union db_prod           tx_db;
 +
 +      u16                     tx_pkt_prod;
 +      u16                     tx_pkt_cons;
 +      u16                     tx_bd_prod;
 +      u16                     tx_bd_cons;
 +
 +      unsigned long           tx_pkt;
 +
 +      __le16                  *tx_cons_sb;
 +
 +      int                     txq_index;
 +};
 +
 +struct bnx2x_fastpath {
 +      struct bnx2x            *bp; /* parent */
 +
 +#define BNX2X_NAPI_WEIGHT       128
 +      struct napi_struct      napi;
 +      union host_hc_status_block      status_blk;
 +      /* chip independed shortcuts into sb structure */
 +      __le16                  *sb_index_values;
 +      __le16                  *sb_running_index;
 +      /* chip independed shortcut into rx_prods_offset memory */
 +      u32                     ustorm_rx_prods_offset;
 +
 +      u32                     rx_buf_size;
 +
 +      dma_addr_t              status_blk_mapping;
 +
 +      u8                      max_cos; /* actual number of active tx coses */
 +      struct bnx2x_fp_txdata  txdata[BNX2X_MULTI_TX_COS];
 +
 +      struct sw_rx_bd         *rx_buf_ring;   /* BDs mappings ring */
 +      struct sw_rx_page       *rx_page_ring;  /* SGE pages mappings ring */
 +
 +      struct eth_rx_bd        *rx_desc_ring;
 +      dma_addr_t              rx_desc_mapping;
 +
 +      union eth_rx_cqe        *rx_comp_ring;
 +      dma_addr_t              rx_comp_mapping;
 +
 +      /* SGE ring */
 +      struct eth_rx_sge       *rx_sge_ring;
 +      dma_addr_t              rx_sge_mapping;
 +
 +      u64                     sge_mask[RX_SGE_MASK_LEN];
 +
 +      u32                     cid;
 +
 +      __le16                  fp_hc_idx;
 +
 +      u8                      index;          /* number in fp array */
 +      u8                      cl_id;          /* eth client id */
 +      u8                      cl_qzone_id;
 +      u8                      fw_sb_id;       /* status block number in FW */
 +      u8                      igu_sb_id;      /* status block number in HW */
 +
 +      u16                     rx_bd_prod;
 +      u16                     rx_bd_cons;
 +      u16                     rx_comp_prod;
 +      u16                     rx_comp_cons;
 +      u16                     rx_sge_prod;
 +      /* The last maximal completed SGE */
 +      u16                     last_max_sge;
 +      __le16                  *rx_cons_sb;
 +      unsigned long           rx_pkt,
 +                              rx_calls;
 +
 +      /* TPA related */
 +      struct bnx2x_agg_info   tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
 +      u8                      disable_tpa;
 +#ifdef BNX2X_STOP_ON_ERROR
 +      u64                     tpa_queue_used;
 +#endif
 +
 +      struct tstorm_per_queue_stats old_tclient;
 +      struct ustorm_per_queue_stats old_uclient;
 +      struct xstorm_per_queue_stats old_xclient;
 +      struct bnx2x_eth_q_stats eth_q_stats;
 +
 +      /* The size is calculated using the following:
 +           sizeof name field from netdev structure +
 +           4 ('-Xx-' string) +
 +           4 (for the digits and to make it DWORD aligned) */
 +#define FP_NAME_SIZE          (sizeof(((struct net_device *)0)->name) + 8)
 +      char                    name[FP_NAME_SIZE];
 +
 +      /* MACs object */
 +      struct bnx2x_vlan_mac_obj mac_obj;
 +
 +      /* Queue State object */
 +      struct bnx2x_queue_sp_obj q_obj;
 +
 +};
 +
 +#define bnx2x_fp(bp, nr, var)         (bp->fp[nr].var)
 +
 +/* Use 2500 as a mini-jumbo MTU for FCoE */
 +#define BNX2X_FCOE_MINI_JUMBO_MTU     2500
 +
 +/* FCoE L2 `fastpath' entry is right after the eth entries */
 +#define FCOE_IDX                      BNX2X_NUM_ETH_QUEUES(bp)
 +#define bnx2x_fcoe_fp(bp)             (&bp->fp[FCOE_IDX])
 +#define bnx2x_fcoe(bp, var)           (bnx2x_fcoe_fp(bp)->var)
 +#define bnx2x_fcoe_tx(bp, var)                (bnx2x_fcoe_fp(bp)-> \
 +                                              txdata[FIRST_TX_COS_INDEX].var)
 +
 +
 +#define IS_ETH_FP(fp)                 (fp->index < \
 +                                       BNX2X_NUM_ETH_QUEUES(fp->bp))
 +#ifdef BCM_CNIC
 +#define IS_FCOE_FP(fp)                        (fp->index == FCOE_IDX)
 +#define IS_FCOE_IDX(idx)              ((idx) == FCOE_IDX)
 +#else
 +#define IS_FCOE_FP(fp)                false
 +#define IS_FCOE_IDX(idx)      false
 +#endif
 +
 +
 +/* MC hsi */
 +#define MAX_FETCH_BD          13      /* HW max BDs per packet */
 +#define RX_COPY_THRESH                92
 +
 +#define NUM_TX_RINGS          16
 +#define TX_DESC_CNT           (BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
- #define MAX_TX_DESC_CNT               (TX_DESC_CNT - 1)
++#define NEXT_PAGE_TX_DESC_CNT 1
++#define MAX_TX_DESC_CNT               (TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
 +#define NUM_TX_BD             (TX_DESC_CNT * NUM_TX_RINGS)
 +#define MAX_TX_BD             (NUM_TX_BD - 1)
 +#define MAX_TX_AVAIL          (MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
 +#define NEXT_TX_IDX(x)                ((((x) & MAX_TX_DESC_CNT) == \
-                                 (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++                                (MAX_TX_DESC_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
++                                      (x) + 1)
 +#define TX_BD(x)              ((x) & MAX_TX_BD)
 +#define TX_BD_POFF(x)         ((x) & MAX_TX_DESC_CNT)
 +
 +/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
 +#define NUM_RX_RINGS          8
 +#define RX_DESC_CNT           (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
- #define MAX_RX_DESC_CNT               (RX_DESC_CNT - 2)
++#define NEXT_PAGE_RX_DESC_CNT 2
++#define MAX_RX_DESC_CNT               (RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
 +#define RX_DESC_MASK          (RX_DESC_CNT - 1)
 +#define NUM_RX_BD             (RX_DESC_CNT * NUM_RX_RINGS)
 +#define MAX_RX_BD             (NUM_RX_BD - 1)
 +#define MAX_RX_AVAIL          (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
- #define MIN_RX_AVAIL          128
++
++/* dropless fc calculations for BDs
++ *
++ * Number of BDs should as number of buffers in BRB:
++ * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
++ * "next" elements on each page
++ */
++#define NUM_BD_REQ            BRB_SIZE(bp)
++#define NUM_BD_PG_REQ         ((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
++                                            MAX_RX_DESC_CNT)
++#define BD_TH_LO(bp)          (NUM_BD_REQ + \
++                               NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
++                               FW_DROP_LEVEL(bp))
++#define BD_TH_HI(bp)          (BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
++#define MIN_RX_AVAIL          ((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
 +
 +#define MIN_RX_SIZE_TPA_HW    (CHIP_IS_E1(bp) ? \
 +                                      ETH_MIN_RX_CQES_WITH_TPA_E1 : \
 +                                      ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
 +#define MIN_RX_SIZE_NONTPA_HW   ETH_MIN_RX_CQES_WITHOUT_TPA
 +#define MIN_RX_SIZE_TPA               (max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
 +#define MIN_RX_SIZE_NONTPA    (max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
 +                                                              MIN_RX_AVAIL))
 +
 +#define NEXT_RX_IDX(x)                ((((x) & RX_DESC_MASK) == \
-                                 (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
++                                (MAX_RX_DESC_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
++                                      (x) + 1)
 +#define RX_BD(x)              ((x) & MAX_RX_BD)
 +
 +/*
 + * As long as CQE is X times bigger than BD entry we have to allocate X times
 + * more pages for CQ ring in order to keep it balanced with BD ring
 + */
 +#define CQE_BD_REL    (sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
 +#define NUM_RCQ_RINGS         (NUM_RX_RINGS * CQE_BD_REL)
 +#define RCQ_DESC_CNT          (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
- #define MAX_RCQ_DESC_CNT      (RCQ_DESC_CNT - 1)
++#define NEXT_PAGE_RCQ_DESC_CNT        1
++#define MAX_RCQ_DESC_CNT      (RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
 +#define NUM_RCQ_BD            (RCQ_DESC_CNT * NUM_RCQ_RINGS)
 +#define MAX_RCQ_BD            (NUM_RCQ_BD - 1)
 +#define MAX_RCQ_AVAIL         (MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
 +#define NEXT_RCQ_IDX(x)               ((((x) & MAX_RCQ_DESC_CNT) == \
-                                 (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
++                                (MAX_RCQ_DESC_CNT - 1)) ? \
++                                      (x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
++                                      (x) + 1)
 +#define RCQ_BD(x)             ((x) & MAX_RCQ_BD)
 +
++/* dropless fc calculations for RCQs
++ *
++ * Number of RCQs should be as number of buffers in BRB:
++ * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
++ * "next" elements on each page
++ */
++#define NUM_RCQ_REQ           BRB_SIZE(bp)
++#define NUM_RCQ_PG_REQ                ((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
++                                            MAX_RCQ_DESC_CNT)
++#define RCQ_TH_LO(bp)         (NUM_RCQ_REQ + \
++                               NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
++                               FW_DROP_LEVEL(bp))
++#define RCQ_TH_HI(bp)         (RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)
++
 +
 +/* This is needed for determining of last_max */
 +#define SUB_S16(a, b)         (s16)((s16)(a) - (s16)(b))
 +#define SUB_S32(a, b)         (s32)((s32)(a) - (s32)(b))
 +
 +
 +#define BNX2X_SWCID_SHIFT     17
 +#define BNX2X_SWCID_MASK      ((0x1 << BNX2X_SWCID_SHIFT) - 1)
 +
 +/* used on a CID received from the HW */
 +#define SW_CID(x)                     (le32_to_cpu(x) & BNX2X_SWCID_MASK)
 +#define CQE_CMD(x)                    (le32_to_cpu(x) >> \
 +                                      COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
 +
 +#define BD_UNMAP_ADDR(bd)             HILO_U64(le32_to_cpu((bd)->addr_hi), \
 +                                               le32_to_cpu((bd)->addr_lo))
 +#define BD_UNMAP_LEN(bd)              (le16_to_cpu((bd)->nbytes))
 +
 +#define BNX2X_DB_MIN_SHIFT            3       /* 8 bytes */
 +#define BNX2X_DB_SHIFT                        7       /* 128 bytes*/
 +#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
 +#error "Min DB doorbell stride is 8"
 +#endif
 +#define DPM_TRIGER_TYPE                       0x40
 +#define DOORBELL(bp, cid, val) \
 +      do { \
 +              writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
 +                     DPM_TRIGER_TYPE); \
 +      } while (0)
 +
 +
 +/* TX CSUM helpers */
 +#define SKB_CS_OFF(skb)               (offsetof(struct tcphdr, check) - \
 +                               skb->csum_offset)
 +#define SKB_CS(skb)           (*(u16 *)(skb_transport_header(skb) + \
 +                                        skb->csum_offset))
 +
 +#define pbd_tcp_flags(skb)    (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
 +
 +#define XMIT_PLAIN                    0
 +#define XMIT_CSUM_V4                  0x1
 +#define XMIT_CSUM_V6                  0x2
 +#define XMIT_CSUM_TCP                 0x4
 +#define XMIT_GSO_V4                   0x8
 +#define XMIT_GSO_V6                   0x10
 +
 +#define XMIT_CSUM                     (XMIT_CSUM_V4 | XMIT_CSUM_V6)
 +#define XMIT_GSO                      (XMIT_GSO_V4 | XMIT_GSO_V6)
 +
 +
 +/* stuff added to make the code fit 80Col */
 +#define CQE_TYPE(cqe_fp_flags)         ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
 +#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
 +#define CQE_TYPE_STOP(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
 +#define CQE_TYPE_SLOW(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
 +#define CQE_TYPE_FAST(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
 +
 +#define ETH_RX_ERROR_FALGS            ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
 +
 +#define BNX2X_IP_CSUM_ERR(cqe) \
 +                      (!((cqe)->fast_path_cqe.status_flags & \
 +                         ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
 +                       ((cqe)->fast_path_cqe.type_error_flags & \
 +                        ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
 +
 +#define BNX2X_L4_CSUM_ERR(cqe) \
 +                      (!((cqe)->fast_path_cqe.status_flags & \
 +                         ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
 +                       ((cqe)->fast_path_cqe.type_error_flags & \
 +                        ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
 +
 +#define BNX2X_RX_CSUM_OK(cqe) \
 +                      (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
 +
 +#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
 +                              (((le16_to_cpu(flags) & \
 +                                 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
 +                                PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
 +                               == PRS_FLAG_OVERETH_IPV4)
 +#define BNX2X_RX_SUM_FIX(cqe) \
 +      BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
 +
 +
 +#define FP_USB_FUNC_OFF       \
 +                      offsetof(struct cstorm_status_block_u, func)
 +#define FP_CSB_FUNC_OFF       \
 +                      offsetof(struct cstorm_status_block_c, func)
 +
- #define HC_INDEX_TOE_RX_CQ_CONS               0 /* Formerly Ustorm TOE CQ index */
-                                         /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */
- #define HC_INDEX_ETH_RX_CQ_CONS               1 /* Formerly Ustorm ETH CQ index */
-                                         /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */
- #define HC_INDEX_ETH_RX_BD_CONS               2 /* Formerly Ustorm ETH BD index */
-                                         /* (HC_INDEX_U_ETH_RX_BD_CONS)  */
- #define HC_INDEX_TOE_TX_CQ_CONS               4 /* Formerly Cstorm TOE CQ index   */
-                                         /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS0  5 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS1  6 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
- #define HC_INDEX_ETH_TX_CQ_CONS_COS2  7 /* Formerly Cstorm ETH CQ index   */
-                                         /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
++#define HC_INDEX_ETH_RX_CQ_CONS               1
 +
- #define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
++#define HC_INDEX_OOO_TX_CQ_CONS               4
 +
++#define HC_INDEX_ETH_TX_CQ_CONS_COS0  5
++
++#define HC_INDEX_ETH_TX_CQ_CONS_COS1  6
++
++#define HC_INDEX_ETH_TX_CQ_CONS_COS2  7
++
++#define HC_INDEX_ETH_FIRST_TX_CQ_CONS HC_INDEX_ETH_TX_CQ_CONS_COS0
 +
 +#define BNX2X_RX_SB_INDEX \
 +      (&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
 +
 +#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
 +
 +#define BNX2X_TX_SB_INDEX_COS0 \
 +      (&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
 +
 +/* end of fast path */
 +
 +/* common */
 +
 +struct bnx2x_common {
 +
 +      u32                     chip_id;
 +/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
 +#define CHIP_ID(bp)                   (bp->common.chip_id & 0xfffffff0)
 +
 +#define CHIP_NUM(bp)                  (bp->common.chip_id >> 16)
 +#define CHIP_NUM_57710                        0x164e
 +#define CHIP_NUM_57711                        0x164f
 +#define CHIP_NUM_57711E                       0x1650
 +#define CHIP_NUM_57712                        0x1662
 +#define CHIP_NUM_57712_MF             0x1663
 +#define CHIP_NUM_57713                        0x1651
 +#define CHIP_NUM_57713E                       0x1652
 +#define CHIP_NUM_57800                        0x168a
 +#define CHIP_NUM_57800_MF             0x16a5
 +#define CHIP_NUM_57810                        0x168e
 +#define CHIP_NUM_57810_MF             0x16ae
 +#define CHIP_NUM_57840                        0x168d
 +#define CHIP_NUM_57840_MF             0x16ab
 +#define CHIP_IS_E1(bp)                        (CHIP_NUM(bp) == CHIP_NUM_57710)
 +#define CHIP_IS_57711(bp)             (CHIP_NUM(bp) == CHIP_NUM_57711)
 +#define CHIP_IS_57711E(bp)            (CHIP_NUM(bp) == CHIP_NUM_57711E)
 +#define CHIP_IS_57712(bp)             (CHIP_NUM(bp) == CHIP_NUM_57712)
 +#define CHIP_IS_57712_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
 +#define CHIP_IS_57800(bp)             (CHIP_NUM(bp) == CHIP_NUM_57800)
 +#define CHIP_IS_57800_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
 +#define CHIP_IS_57810(bp)             (CHIP_NUM(bp) == CHIP_NUM_57810)
 +#define CHIP_IS_57810_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
 +#define CHIP_IS_57840(bp)             (CHIP_NUM(bp) == CHIP_NUM_57840)
 +#define CHIP_IS_57840_MF(bp)          (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
 +#define CHIP_IS_E1H(bp)                       (CHIP_IS_57711(bp) || \
 +                                       CHIP_IS_57711E(bp))
 +#define CHIP_IS_E2(bp)                        (CHIP_IS_57712(bp) || \
 +                                       CHIP_IS_57712_MF(bp))
 +#define CHIP_IS_E3(bp)                        (CHIP_IS_57800(bp) || \
 +                                       CHIP_IS_57800_MF(bp) || \
 +                                       CHIP_IS_57810(bp) || \
 +                                       CHIP_IS_57810_MF(bp) || \
 +                                       CHIP_IS_57840(bp) || \
 +                                       CHIP_IS_57840_MF(bp))
 +#define CHIP_IS_E1x(bp)                       (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
 +#define USES_WARPCORE(bp)             (CHIP_IS_E3(bp))
 +#define IS_E1H_OFFSET                 (!CHIP_IS_E1(bp))
 +
 +#define CHIP_REV_SHIFT                        12
 +#define CHIP_REV_MASK                 (0xF << CHIP_REV_SHIFT)
 +#define CHIP_REV_VAL(bp)              (bp->common.chip_id & CHIP_REV_MASK)
 +#define CHIP_REV_Ax                   (0x0 << CHIP_REV_SHIFT)
 +#define CHIP_REV_Bx                   (0x1 << CHIP_REV_SHIFT)
 +/* assume maximum 5 revisions */
 +#define CHIP_REV_IS_SLOW(bp)          (CHIP_REV_VAL(bp) > 0x00005000)
 +/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
 +#define CHIP_REV_IS_EMUL(bp)          ((CHIP_REV_IS_SLOW(bp)) && \
 +                                       !(CHIP_REV_VAL(bp) & 0x00001000))
 +/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
 +#define CHIP_REV_IS_FPGA(bp)          ((CHIP_REV_IS_SLOW(bp)) && \
 +                                       (CHIP_REV_VAL(bp) & 0x00001000))
 +
 +#define CHIP_TIME(bp)                 ((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
 +                                      ((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
 +
 +#define CHIP_METAL(bp)                        (bp->common.chip_id & 0x00000ff0)
 +#define CHIP_BOND_ID(bp)              (bp->common.chip_id & 0x0000000f)
 +#define CHIP_REV_SIM(bp)              (((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
 +                                         (CHIP_REV_SHIFT + 1)) \
 +                                              << CHIP_REV_SHIFT)
 +#define CHIP_REV(bp)                  (CHIP_REV_IS_SLOW(bp) ? \
 +                                              CHIP_REV_SIM(bp) :\
 +                                              CHIP_REV_VAL(bp))
 +#define CHIP_IS_E3B0(bp)              (CHIP_IS_E3(bp) && \
 +                                       (CHIP_REV(bp) == CHIP_REV_Bx))
 +#define CHIP_IS_E3A0(bp)              (CHIP_IS_E3(bp) && \
 +                                       (CHIP_REV(bp) == CHIP_REV_Ax))
 +
 +      int                     flash_size;
 +#define BNX2X_NVRAM_1MB_SIZE                  0x20000 /* 1M bit in bytes */
 +#define BNX2X_NVRAM_TIMEOUT_COUNT             30000
 +#define BNX2X_NVRAM_PAGE_SIZE                 256
 +
 +      u32                     shmem_base;
 +      u32                     shmem2_base;
 +      u32                     mf_cfg_base;
 +      u32                     mf2_cfg_base;
 +
 +      u32                     hw_config;
 +
 +      u32                     bc_ver;
 +
 +      u8                      int_block;
 +#define INT_BLOCK_HC                  0
 +#define INT_BLOCK_IGU                 1
 +#define INT_BLOCK_MODE_NORMAL         0
 +#define INT_BLOCK_MODE_BW_COMP                2
 +#define CHIP_INT_MODE_IS_NBC(bp)              \
 +                      (!CHIP_IS_E1x(bp) &&    \
 +                      !((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
 +#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
 +
 +      u8                      chip_port_mode;
 +#define CHIP_4_PORT_MODE                      0x0
 +#define CHIP_2_PORT_MODE                      0x1
 +#define CHIP_PORT_MODE_NONE                   0x2
 +#define CHIP_MODE(bp)                 (bp->common.chip_port_mode)
 +#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
 +};
 +
 +/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
 +#define BNX2X_IGU_STAS_MSG_VF_CNT 64
 +#define BNX2X_IGU_STAS_MSG_PF_CNT 4
 +
 +/* end of common */
 +
 +/* port */
 +
 +struct bnx2x_port {
 +      u32                     pmf;
 +
 +      u32                     link_config[LINK_CONFIG_SIZE];
 +
 +      u32                     supported[LINK_CONFIG_SIZE];
 +/* link settings - missing defines */
 +#define SUPPORTED_2500baseX_Full      (1 << 15)
 +
 +      u32                     advertising[LINK_CONFIG_SIZE];
 +/* link settings - missing defines */
 +#define ADVERTISED_2500baseX_Full     (1 << 15)
 +
 +      u32                     phy_addr;
 +
 +      /* used to synchronize phy accesses */
 +      struct mutex            phy_mutex;
 +      int                     need_hw_lock;
 +
 +      u32                     port_stx;
 +
 +      struct nig_stats        old_nig_stats;
 +};
 +
 +/* end of port */
 +
 +#define STATS_OFFSET32(stat_name) \
 +                      (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
 +
 +/* slow path */
 +
 +/* slow path work-queue */
 +extern struct workqueue_struct *bnx2x_wq;
 +
 +#define BNX2X_MAX_NUM_OF_VFS  64
 +#define BNX2X_VF_ID_INVALID   0xFF
 +
 +/*
 + * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
 + * control by the number of fast-path status blocks supported by the
 + * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
 + * status block represents an independent interrupts context that can
 + * serve a regular L2 networking queue. However special L2 queues such
 + * as the FCoE queue do not require a FP-SB and other components like
 + * the CNIC may consume FP-SB reducing the number of possible L2 queues
 + *
 + * If the maximum number of FP-SB available is X then:
 + * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
 + *    regular L2 queues is Y=X-1
 + * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
 + * c. If the FCoE L2 queue is supported the actual number of L2 queues
 + *    is Y+1
 + * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
 + *    slow-path interrupts) or Y+2 if CNIC is supported (one additional
 + *    FP interrupt context for the CNIC).
 + * e. The number of HW context (CID count) is always X or X+1 if FCoE
 + *    L2 queue is supported. the cid for the FCoE L2 queue is always X.
 + */
 +
 +/* fast-path interrupt contexts E1x */
 +#define FP_SB_MAX_E1x         16
 +/* fast-path interrupt contexts E2 */
 +#define FP_SB_MAX_E2          HC_SB_MAX_SB_E2
 +
 +union cdu_context {
 +      struct eth_context eth;
 +      char pad[1024];
 +};
 +
 +/* CDU host DB constants */
 +#define CDU_ILT_PAGE_SZ_HW    3
 +#define CDU_ILT_PAGE_SZ               (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
 +#define ILT_PAGE_CIDS         (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
 +
 +#ifdef BCM_CNIC
 +#define CNIC_ISCSI_CID_MAX    256
 +#define CNIC_FCOE_CID_MAX     2048
 +#define CNIC_CID_MAX          (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
 +#define CNIC_ILT_LINES                DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
 +#endif
 +
 +#define QM_ILT_PAGE_SZ_HW     0
 +#define QM_ILT_PAGE_SZ                (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
 +#define QM_CID_ROUND          1024
 +
 +#ifdef BCM_CNIC
 +/* TM (timers) host DB constants */
 +#define TM_ILT_PAGE_SZ_HW     0
 +#define TM_ILT_PAGE_SZ                (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
 +/* #define TM_CONN_NUM                (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
 +#define TM_CONN_NUM           1024
 +#define TM_ILT_SZ             (8 * TM_CONN_NUM)
 +#define TM_ILT_LINES          DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
 +
 +/* SRC (Searcher) host DB constants */
 +#define SRC_ILT_PAGE_SZ_HW    0
 +#define SRC_ILT_PAGE_SZ               (4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
 +#define SRC_HASH_BITS         10
 +#define SRC_CONN_NUM          (1 << SRC_HASH_BITS) /* 1024 */
 +#define SRC_ILT_SZ            (sizeof(struct src_ent) * SRC_CONN_NUM)
 +#define SRC_T2_SZ             SRC_ILT_SZ
 +#define SRC_ILT_LINES         DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
 +
 +#endif
 +
 +#define MAX_DMAE_C            8
 +
 +/* DMA memory not used in fastpath */
 +struct bnx2x_slowpath {
 +      union {
 +              struct mac_configuration_cmd            e1x;
 +              struct eth_classify_rules_ramrod_data   e2;
 +      } mac_rdata;
 +
 +
 +      union {
 +              struct tstorm_eth_mac_filter_config     e1x;
 +              struct eth_filter_rules_ramrod_data     e2;
 +      } rx_mode_rdata;
 +
 +      union {
 +              struct mac_configuration_cmd            e1;
 +              struct eth_multicast_rules_ramrod_data  e2;
 +      } mcast_rdata;
 +
 +      struct eth_rss_update_ramrod_data       rss_rdata;
 +
 +      /* Queue State related ramrods are always sent under rtnl_lock */
 +      union {
 +              struct client_init_ramrod_data  init_data;
 +              struct client_update_ramrod_data update_data;
 +      } q_rdata;
 +
 +      union {
 +              struct function_start_data      func_start;
 +              /* pfc configuration for DCBX ramrod */
 +              struct flow_control_configuration pfc_config;
 +      } func_rdata;
 +
 +      /* used by dmae command executer */
 +      struct dmae_command             dmae[MAX_DMAE_C];
 +
 +      u32                             stats_comp;
 +      union mac_stats                 mac_stats;
 +      struct nig_stats                nig_stats;
 +      struct host_port_stats          port_stats;
 +      struct host_func_stats          func_stats;
 +      struct host_func_stats          func_stats_base;
 +
 +      u32                             wb_comp;
 +      u32                             wb_data[4];
 +};
 +
 +#define bnx2x_sp(bp, var)             (&bp->slowpath->var)
 +#define bnx2x_sp_mapping(bp, var) \
 +              (bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
 +
 +
 +/* attn group wiring */
 +#define MAX_DYNAMIC_ATTN_GRPS         8
 +
 +struct attn_route {
 +      u32 sig[5];
 +};
 +
 +struct iro {
 +      u32 base;
 +      u16 m1;
 +      u16 m2;
 +      u16 m3;
 +      u16 size;
 +};
 +
 +struct hw_context {
 +      union cdu_context *vcxt;
 +      dma_addr_t cxt_mapping;
 +      size_t size;
 +};
 +
 +/* forward */
 +struct bnx2x_ilt;
 +
 +
 +enum bnx2x_recovery_state {
 +      BNX2X_RECOVERY_DONE,
 +      BNX2X_RECOVERY_INIT,
 +      BNX2X_RECOVERY_WAIT,
 +      BNX2X_RECOVERY_FAILED
 +};
 +
 +/*
 + * Event queue (EQ or event ring) MC hsi
 + * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
 + */
 +#define NUM_EQ_PAGES          1
 +#define EQ_DESC_CNT_PAGE      (BCM_PAGE_SIZE / sizeof(union event_ring_elem))
 +#define EQ_DESC_MAX_PAGE      (EQ_DESC_CNT_PAGE - 1)
 +#define NUM_EQ_DESC           (EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
 +#define EQ_DESC_MASK          (NUM_EQ_DESC - 1)
 +#define MAX_EQ_AVAIL          (EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
 +
 +/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
 +#define NEXT_EQ_IDX(x)                ((((x) & EQ_DESC_MAX_PAGE) == \
 +                                (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
 +
 +/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
 +#define EQ_DESC(x)            ((x) & EQ_DESC_MASK)
 +
 +#define BNX2X_EQ_INDEX \
 +      (&bp->def_status_blk->sp_sb.\
 +      index_values[HC_SP_INDEX_EQ_CONS])
 +
 +/* This is a data that will be used to create a link report message.
 + * We will keep the data used for the last link report in order
 + * to prevent reporting the same link parameters twice.
 + */
 +struct bnx2x_link_report_data {
 +      u16 line_speed;                 /* Effective line speed */
 +      unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
 +};
 +
 +enum {
 +      BNX2X_LINK_REPORT_FD,           /* Full DUPLEX */
 +      BNX2X_LINK_REPORT_LINK_DOWN,
 +      BNX2X_LINK_REPORT_RX_FC_ON,
 +      BNX2X_LINK_REPORT_TX_FC_ON,
 +};
 +
 +enum {
 +      BNX2X_PORT_QUERY_IDX,
 +      BNX2X_PF_QUERY_IDX,
 +      BNX2X_FIRST_QUEUE_QUERY_IDX,
 +};
 +
 +struct bnx2x_fw_stats_req {
 +      struct stats_query_header hdr;
 +      struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
 +};
 +
 +struct bnx2x_fw_stats_data {
 +      struct stats_counter    storm_counters;
 +      struct per_port_stats   port;
 +      struct per_pf_stats     pf;
 +      struct per_queue_stats  queue_stats[1];
 +};
 +
 +/* Public slow path states */
 +enum {
 +      BNX2X_SP_RTNL_SETUP_TC,
 +      BNX2X_SP_RTNL_TX_TIMEOUT,
 +};
 +
 +
 +struct bnx2x {
 +      /* Fields used in the tx and intr/napi performance paths
 +       * are grouped together in the beginning of the structure
 +       */
 +      struct bnx2x_fastpath   *fp;
 +      void __iomem            *regview;
 +      void __iomem            *doorbells;
 +      u16                     db_size;
 +
 +      u8                      pf_num; /* absolute PF number */
 +      u8                      pfid;   /* per-path PF number */
 +      int                     base_fw_ndsb; /**/
 +#define BP_PATH(bp)                   (CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
 +#define BP_PORT(bp)                   (bp->pfid & 1)
 +#define BP_FUNC(bp)                   (bp->pfid)
 +#define BP_ABS_FUNC(bp)                       (bp->pf_num)
- #define BP_E1HVN(bp)                  (bp->pfid >> 1)
- #define BP_VN(bp)                     (BP_E1HVN(bp)) /*remove when approved*/
- #define BP_L_ID(bp)                   (BP_E1HVN(bp) << 2)
- #define BP_FW_MB_IDX(bp)              (BP_PORT(bp) +\
-         BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
++#define BP_VN(bp)                     ((bp)->pfid >> 1)
++#define BP_MAX_VN_NUM(bp)             (CHIP_MODE_IS_4_PORT(bp) ? 2 : 4)
++#define BP_L_ID(bp)                   (BP_VN(bp) << 2)
++#define BP_FW_MB_IDX_VN(bp, vn)               (BP_PORT(bp) +\
++        (vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
++#define BP_FW_MB_IDX(bp)              BP_FW_MB_IDX_VN(bp, BP_VN(bp))
 +
 +      struct net_device       *dev;
 +      struct pci_dev          *pdev;
 +
 +      const struct iro        *iro_arr;
 +#define IRO (bp->iro_arr)
 +
 +      enum bnx2x_recovery_state recovery_state;
 +      int                     is_leader;
 +      struct msix_entry       *msix_table;
 +
 +      int                     tx_ring_size;
 +
 +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 +#define ETH_OVREHEAD          (ETH_HLEN + 8 + 8)
 +#define ETH_MIN_PACKET_SIZE           60
 +#define ETH_MAX_PACKET_SIZE           1500
 +#define ETH_MAX_JUMBO_PACKET_SIZE     9600
 +
 +      /* Max supported alignment is 256 (8 shift) */
 +#define BNX2X_RX_ALIGN_SHIFT          ((L1_CACHE_SHIFT < 8) ? \
 +                                       L1_CACHE_SHIFT : 8)
 +      /* FW use 2 Cache lines Alignment for start packet and size  */
 +#define BNX2X_FW_RX_ALIGN             (2 << BNX2X_RX_ALIGN_SHIFT)
 +#define BNX2X_PXP_DRAM_ALIGN          (BNX2X_RX_ALIGN_SHIFT - 5)
 +
 +      struct host_sp_status_block *def_status_blk;
 +#define DEF_SB_IGU_ID                 16
 +#define DEF_SB_ID                     HC_SP_SB_ID
 +      __le16                  def_idx;
 +      __le16                  def_att_idx;
 +      u32                     attn_state;
 +      struct attn_route       attn_group[MAX_DYNAMIC_ATTN_GRPS];
 +
 +      /* slow path ring */
 +      struct eth_spe          *spq;
 +      dma_addr_t              spq_mapping;
 +      u16                     spq_prod_idx;
 +      struct eth_spe          *spq_prod_bd;
 +      struct eth_spe          *spq_last_bd;
 +      __le16                  *dsb_sp_prod;
 +      atomic_t                cq_spq_left; /* ETH_XXX ramrods credit */
 +      /* used to synchronize spq accesses */
 +      spinlock_t              spq_lock;
 +
 +      /* event queue */
 +      union event_ring_elem   *eq_ring;
 +      dma_addr_t              eq_mapping;
 +      u16                     eq_prod;
 +      u16                     eq_cons;
 +      __le16                  *eq_cons_sb;
 +      atomic_t                eq_spq_left; /* COMMON_XXX ramrods credit */
 +
 +
 +
 +      /* Counter for marking that there is a STAT_QUERY ramrod pending */
 +      u16                     stats_pending;
 +      /*  Counter for completed statistics ramrods */
 +      u16                     stats_comp;
 +
 +      /* End of fields used in the performance code paths */
 +
 +      int                     panic;
 +      int                     msg_enable;
 +
 +      u32                     flags;
 +#define PCIX_FLAG                     (1 << 0)
 +#define PCI_32BIT_FLAG                        (1 << 1)
 +#define ONE_PORT_FLAG                 (1 << 2)
 +#define NO_WOL_FLAG                   (1 << 3)
 +#define USING_DAC_FLAG                        (1 << 4)
 +#define USING_MSIX_FLAG                       (1 << 5)
 +#define USING_MSI_FLAG                        (1 << 6)
 +#define DISABLE_MSI_FLAG              (1 << 7)
 +#define TPA_ENABLE_FLAG                       (1 << 8)
 +#define NO_MCP_FLAG                   (1 << 9)
 +
 +#define BP_NOMCP(bp)                  (bp->flags & NO_MCP_FLAG)
 +#define MF_FUNC_DIS                   (1 << 11)
 +#define OWN_CNIC_IRQ                  (1 << 12)
 +#define NO_ISCSI_OOO_FLAG             (1 << 13)
 +#define NO_ISCSI_FLAG                 (1 << 14)
 +#define NO_FCOE_FLAG                  (1 << 15)
 +
 +#define NO_ISCSI(bp)          ((bp)->flags & NO_ISCSI_FLAG)
 +#define NO_ISCSI_OOO(bp)      ((bp)->flags & NO_ISCSI_OOO_FLAG)
 +#define NO_FCOE(bp)           ((bp)->flags & NO_FCOE_FLAG)
 +
 +      int                     pm_cap;
 +      int                     mrrs;
 +
 +      struct delayed_work     sp_task;
 +      struct delayed_work     sp_rtnl_task;
 +
 +      struct delayed_work     period_task;
 +      struct timer_list       timer;
 +      int                     current_interval;
 +
 +      u16                     fw_seq;
 +      u16                     fw_drv_pulse_wr_seq;
 +      u32                     func_stx;
 +
 +      struct link_params      link_params;
 +      struct link_vars        link_vars;
 +      u32                     link_cnt;
 +      struct bnx2x_link_report_data last_reported_link;
 +
 +      struct mdio_if_info     mdio;
 +
 +      struct bnx2x_common     common;
 +      struct bnx2x_port       port;
 +
 +      struct cmng_struct_per_port cmng;
 +      u32                     vn_weight_sum;
 +      u32                     mf_config[E1HVN_MAX];
 +      u32                     mf2_config[E2_FUNC_MAX];
 +      u32                     path_has_ovlan; /* E3 */
 +      u16                     mf_ov;
 +      u8                      mf_mode;
 +#define IS_MF(bp)             (bp->mf_mode != 0)
 +#define IS_MF_SI(bp)          (bp->mf_mode == MULTI_FUNCTION_SI)
 +#define IS_MF_SD(bp)          (bp->mf_mode == MULTI_FUNCTION_SD)
 +
 +      u8                      wol;
 +
 +      int                     rx_ring_size;
 +
 +      u16                     tx_quick_cons_trip_int;
 +      u16                     tx_quick_cons_trip;
 +      u16                     tx_ticks_int;
 +      u16                     tx_ticks;
 +
 +      u16                     rx_quick_cons_trip_int;
 +      u16                     rx_quick_cons_trip;
 +      u16                     rx_ticks_int;
 +      u16                     rx_ticks;
 +/* Maximal coalescing timeout in us */
 +#define BNX2X_MAX_COALESCE_TOUT               (0xf0*12)
 +
 +      u32                     lin_cnt;
 +
 +      u16                     state;
 +#define BNX2X_STATE_CLOSED            0
 +#define BNX2X_STATE_OPENING_WAIT4_LOAD        0x1000
 +#define BNX2X_STATE_OPENING_WAIT4_PORT        0x2000
 +#define BNX2X_STATE_OPEN              0x3000
 +#define BNX2X_STATE_CLOSING_WAIT4_HALT        0x4000
 +#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
 +
 +#define BNX2X_STATE_DIAG              0xe000
 +#define BNX2X_STATE_ERROR             0xf000
 +
 +      int                     multi_mode;
 +#define BNX2X_MAX_PRIORITY            8
 +#define BNX2X_MAX_ENTRIES_PER_PRI     16
 +#define BNX2X_MAX_COS                 3
 +#define BNX2X_MAX_TX_COS              2
 +      int                     num_queues;
 +      int                     disable_tpa;
 +
 +      u32                     rx_mode;
 +#define BNX2X_RX_MODE_NONE            0
 +#define BNX2X_RX_MODE_NORMAL          1
 +#define BNX2X_RX_MODE_ALLMULTI                2
 +#define BNX2X_RX_MODE_PROMISC         3
 +#define BNX2X_MAX_MULTICAST           64
 +
 +      u8                      igu_dsb_id;
 +      u8                      igu_base_sb;
 +      u8                      igu_sb_cnt;
 +      dma_addr_t              def_status_blk_mapping;
 +
 +      struct bnx2x_slowpath   *slowpath;
 +      dma_addr_t              slowpath_mapping;
 +
 +      /* Total number of FW statistics requests */
 +      u8                      fw_stats_num;
 +
 +      /*
 +       * This is a memory buffer that will contain both statistics
 +       * ramrod request and data.
 +       */
 +      void                    *fw_stats;
 +      dma_addr_t              fw_stats_mapping;
 +
 +      /*
 +       * FW statistics request shortcut (points at the
 +       * beginning of fw_stats buffer).
 +       */
 +      struct bnx2x_fw_stats_req       *fw_stats_req;
 +      dma_addr_t                      fw_stats_req_mapping;
 +      int                             fw_stats_req_sz;
 +
 +      /*
 +       * FW statistics data shortcut (points at the begining of
 +       * fw_stats buffer + fw_stats_req_sz).
 +       */
 +      struct bnx2x_fw_stats_data      *fw_stats_data;
 +      dma_addr_t                      fw_stats_data_mapping;
 +      int                             fw_stats_data_sz;
 +
 +      struct hw_context       context;
 +
 +      struct bnx2x_ilt        *ilt;
 +#define BP_ILT(bp)            ((bp)->ilt)
 +#define ILT_MAX_LINES         256
 +/*
 + * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
 + * to CNIC.
 + */
 +#define BNX2X_MAX_RSS_COUNT(bp)       ((bp)->igu_sb_cnt - CNIC_PRESENT)
 +
 +/*
 + * Maximum CID count that might be required by the bnx2x:
 + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
 + */
 +#define BNX2X_L2_CID_COUNT(bp)        (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
 +                                      NON_ETH_CONTEXT_USE + CNIC_PRESENT)
 +#define L2_ILT_LINES(bp)      (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
 +                                      ILT_PAGE_CIDS))
 +#define BNX2X_DB_SIZE(bp)     (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
 +
 +      int                     qm_cid_count;
 +
 +      int                     dropless_fc;
 +
 +#ifdef BCM_CNIC
 +      u32                     cnic_flags;
 +#define BNX2X_CNIC_FLAG_MAC_SET               1
 +      void                    *t2;
 +      dma_addr_t              t2_mapping;
 +      struct cnic_ops __rcu   *cnic_ops;
 +      void                    *cnic_data;
 +      u32                     cnic_tag;
 +      struct cnic_eth_dev     cnic_eth_dev;
 +      union host_hc_status_block cnic_sb;
 +      dma_addr_t              cnic_sb_mapping;
 +      struct eth_spe          *cnic_kwq;
 +      struct eth_spe          *cnic_kwq_prod;
 +      struct eth_spe          *cnic_kwq_cons;
 +      struct eth_spe          *cnic_kwq_last;
 +      u16                     cnic_kwq_pending;
 +      u16                     cnic_spq_pending;
 +      u8                      fip_mac[ETH_ALEN];
 +      struct mutex            cnic_mutex;
 +      struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
 +
 +      /* Start index of the "special" (CNIC related) L2 cleints */
 +      u8                              cnic_base_cl_id;
 +#endif
 +
 +      int                     dmae_ready;
 +      /* used to synchronize dmae accesses */
 +      spinlock_t              dmae_lock;
 +
 +      /* used to protect the FW mail box */
 +      struct mutex            fw_mb_mutex;
 +
 +      /* used to synchronize stats collecting */
 +      int                     stats_state;
 +
 +      /* used for synchronization of concurrent threads statistics handling */
 +      spinlock_t              stats_lock;
 +
 +      /* used by dmae command loader */
 +      struct dmae_command     stats_dmae;
 +      int                     executer_idx;
 +
 +      u16                     stats_counter;
 +      struct bnx2x_eth_stats  eth_stats;
 +
 +      struct z_stream_s       *strm;
 +      void                    *gunzip_buf;
 +      dma_addr_t              gunzip_mapping;
 +      int                     gunzip_outlen;
 +#define FW_BUF_SIZE                   0x8000
 +#define GUNZIP_BUF(bp)                        (bp->gunzip_buf)
 +#define GUNZIP_PHYS(bp)                       (bp->gunzip_mapping)
 +#define GUNZIP_OUTLEN(bp)             (bp->gunzip_outlen)
 +
 +      struct raw_op           *init_ops;
 +      /* Init blocks offsets inside init_ops */
 +      u16                     *init_ops_offsets;
 +      /* Data blob - has 32 bit granularity */
 +      u32                     *init_data;
 +      u32                     init_mode_flags;
 +#define INIT_MODE_FLAGS(bp)   (bp->init_mode_flags)
 +      /* Zipped PRAM blobs - raw data */
 +      const u8                *tsem_int_table_data;
 +      const u8                *tsem_pram_data;
 +      const u8                *usem_int_table_data;
 +      const u8                *usem_pram_data;
 +      const u8                *xsem_int_table_data;
 +      const u8                *xsem_pram_data;
 +      const u8                *csem_int_table_data;
 +      const u8                *csem_pram_data;
 +#define INIT_OPS(bp)                  (bp->init_ops)
 +#define INIT_OPS_OFFSETS(bp)          (bp->init_ops_offsets)
 +#define INIT_DATA(bp)                 (bp->init_data)
 +#define INIT_TSEM_INT_TABLE_DATA(bp)  (bp->tsem_int_table_data)
 +#define INIT_TSEM_PRAM_DATA(bp)               (bp->tsem_pram_data)
 +#define INIT_USEM_INT_TABLE_DATA(bp)  (bp->usem_int_table_data)
 +#define INIT_USEM_PRAM_DATA(bp)               (bp->usem_pram_data)
 +#define INIT_XSEM_INT_TABLE_DATA(bp)  (bp->xsem_int_table_data)
 +#define INIT_XSEM_PRAM_DATA(bp)               (bp->xsem_pram_data)
 +#define INIT_CSEM_INT_TABLE_DATA(bp)  (bp->csem_int_table_data)
 +#define INIT_CSEM_PRAM_DATA(bp)               (bp->csem_pram_data)
 +
 +#define PHY_FW_VER_LEN                        20
 +      char                    fw_ver[32];
 +      const struct firmware   *firmware;
 +
 +      /* DCB support on/off */
 +      u16 dcb_state;
 +#define BNX2X_DCB_STATE_OFF                   0
 +#define BNX2X_DCB_STATE_ON                    1
 +
 +      /* DCBX engine mode */
 +      int dcbx_enabled;
 +#define BNX2X_DCBX_ENABLED_OFF                        0
 +#define BNX2X_DCBX_ENABLED_ON_NEG_OFF         1
 +#define BNX2X_DCBX_ENABLED_ON_NEG_ON          2
 +#define BNX2X_DCBX_ENABLED_INVALID            (-1)
 +
 +      bool dcbx_mode_uset;
 +
 +      struct bnx2x_config_dcbx_params         dcbx_config_params;
 +      struct bnx2x_dcbx_port_params           dcbx_port_params;
 +      int                                     dcb_version;
 +
 +      /* CAM credit pools */
 +      struct bnx2x_credit_pool_obj            macs_pool;
 +
 +      /* RX_MODE object */
 +      struct bnx2x_rx_mode_obj                rx_mode_obj;
 +
 +      /* MCAST object */
 +      struct bnx2x_mcast_obj                  mcast_obj;
 +
 +      /* RSS configuration object */
 +      struct bnx2x_rss_config_obj             rss_conf_obj;
 +
 +      /* Function State controlling object */
 +      struct bnx2x_func_sp_obj                func_obj;
 +
 +      unsigned long                           sp_state;
 +
 +      /* operation indication for the sp_rtnl task */
 +      unsigned long                           sp_rtnl_state;
 +
 +      /* DCBX Negotation results */
 +      struct dcbx_features                    dcbx_local_feat;
 +      u32                                     dcbx_error;
 +
 +#ifdef BCM_DCBNL
 +      struct dcbx_features                    dcbx_remote_feat;
 +      u32                                     dcbx_remote_flags;
 +#endif
 +      u32                                     pending_max;
 +
 +      /* multiple tx classes of service */
 +      u8                                      max_cos;
 +
 +      /* priority to cos mapping */
 +      u8                                      prio_to_cos[8];
 +};
 +
 +/* Tx queues may be less or equal to Rx queues */
 +extern int num_queues;
 +#define BNX2X_NUM_QUEUES(bp)  (bp->num_queues)
 +#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
 +#define BNX2X_NUM_RX_QUEUES(bp)       BNX2X_NUM_QUEUES(bp)
 +
 +#define is_multi(bp)          (BNX2X_NUM_QUEUES(bp) > 1)
 +
 +#define BNX2X_MAX_QUEUES(bp)  BNX2X_MAX_RSS_COUNT(bp)
 +/* #define is_eth_multi(bp)   (BNX2X_NUM_ETH_QUEUES(bp) > 1) */
 +
 +#define RSS_IPV4_CAP_MASK                                             \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
 +
 +#define RSS_IPV4_TCP_CAP_MASK                                         \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
 +
 +#define RSS_IPV6_CAP_MASK                                             \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
 +
 +#define RSS_IPV6_TCP_CAP_MASK                                         \
 +      TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 +
 +/* func init flags */
 +#define FUNC_FLG_RSS          0x0001
 +#define FUNC_FLG_STATS                0x0002
 +/* removed  FUNC_FLG_UNMATCHED        0x0004 */
 +#define FUNC_FLG_TPA          0x0008
 +#define FUNC_FLG_SPQ          0x0010
 +#define FUNC_FLG_LEADING      0x0020  /* PF only */
 +
 +
 +struct bnx2x_func_init_params {
 +      /* dma */
 +      dma_addr_t      fw_stat_map;    /* valid iff FUNC_FLG_STATS */
 +      dma_addr_t      spq_map;        /* valid iff FUNC_FLG_SPQ */
 +
 +      u16             func_flgs;
 +      u16             func_id;        /* abs fid */
 +      u16             pf_id;
 +      u16             spq_prod;       /* valid iff FUNC_FLG_SPQ */
 +};
 +
 +#define for_each_eth_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
 +
 +#define for_each_nondefault_eth_queue(bp, var) \
 +      for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
 +
 +#define for_each_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
 +/* Skip forwarding FP */
 +#define for_each_rx_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_rx_queue(bp, var))     \
 +                      continue;               \
 +              else
 +
 +/* Skip OOO FP */
 +#define for_each_tx_queue(bp, var) \
 +      for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_tx_queue(bp, var))     \
 +                      continue;               \
 +              else
 +
 +#define for_each_nondefault_queue(bp, var) \
 +      for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
 +              if (skip_queue(bp, var))        \
 +                      continue;               \
 +              else
 +
 +#define for_each_cos_in_tx_queue(fp, var) \
 +      for ((var) = 0; (var) < (fp)->max_cos; (var)++)
 +
 +/* skip rx queue
 + * if FCOE l2 support is disabled and this is the fcoe L2 queue
 + */
 +#define skip_rx_queue(bp, idx)        (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +/* skip tx queue
 + * if FCOE l2 support is disabled and this is the fcoe L2 queue
 + */
 +#define skip_tx_queue(bp, idx)        (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +#define skip_queue(bp, idx)   (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 +
 +
 +
 +
 +/**
 + * bnx2x_set_mac_one - configure a single MAC address
 + *
 + * @bp:                       driver handle
 + * @mac:              MAC to configure
 + * @obj:              MAC object handle
 + * @set:              if 'true' add a new MAC, otherwise - delete
 + * @mac_type:         the type of the MAC to configure (e.g. ETH, UC list)
 + * @ramrod_flags:     RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
 + *
 + * Configures one MAC according to provided parameters or continues the
 + * execution of previously scheduled commands if RAMROD_CONT is set in
 + * ramrod_flags.
 + *
 + * Returns zero if operation has successfully completed, a positive value if the
 + * operation has been successfully scheduled and a negative - if a requested
 + * operations has failed.
 + */
 +int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 +                    struct bnx2x_vlan_mac_obj *obj, bool set,
 +                    int mac_type, unsigned long *ramrod_flags);
 +/**
 + * Deletes all MACs configured for the specific MAC object.
 + *
 + * @param bp Function driver instance
 + * @param mac_obj MAC object to cleanup
 + *
 + * @return zero if all MACs were cleaned
 + */
 +
 +/**
 + * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
 + *
 + * @bp:                       driver handle
 + * @mac_obj:          MAC object handle
 + * @mac_type:         type of the MACs to clear (BNX2X_XXX_MAC)
 + * @wait_for_comp:    if 'true' block until completion
 + *
 + * Deletes all MACs of the specific type (e.g. ETH, UC list).
 + *
 + * Returns zero if operation has successfully completed, a positive value if the
 + * operation has been successfully scheduled and a negative - if a requested
 + * operations has failed.
 + */
 +int bnx2x_del_all_macs(struct bnx2x *bp,
 +                     struct bnx2x_vlan_mac_obj *mac_obj,
 +                     int mac_type, bool wait_for_comp);
 +
 +/* Init Function API  */
 +void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
 +int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
 +int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 +int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
 +int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
 +void bnx2x_read_mf_cfg(struct bnx2x *bp);
 +
 +
 +/* dmae */
 +void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
 +void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
 +                    u32 len32);
 +void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
 +u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
 +u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
 +u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 +                    bool with_comp, u8 comp_type);
 +
 +
 +void bnx2x_calc_fc_adv(struct bnx2x *bp);
 +int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 +                u32 data_hi, u32 data_lo, int cmd_type);
 +void bnx2x_update_coalesce(struct bnx2x *bp);
 +int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
 +
 +static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
 +                         int wait)
 +{
 +      u32 val;
 +
 +      do {
 +              val = REG_RD(bp, reg);
 +              if (val == expected)
 +                      break;
 +              ms -= wait;
 +              msleep(wait);
 +
 +      } while (ms > 0);
 +
 +      return val;
 +}
 +
 +#define BNX2X_ILT_ZALLOC(x, y, size) \
 +      do { \
 +              x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
 +              if (x) \
 +                      memset(x, 0, size); \
 +      } while (0)
 +
 +#define BNX2X_ILT_FREE(x, y, size) \
 +      do { \
 +              if (x) { \
 +                      dma_free_coherent(&bp->pdev->dev, size, x, y); \
 +                      x = NULL; \
 +                      y = 0; \
 +              } \
 +      } while (0)
 +
 +#define ILOG2(x)      (ilog2((x)))
 +
 +#define ILT_NUM_PAGE_ENTRIES  (3072)
 +/* In 57710/11 we use whole table since we have 8 func
 + * In 57712 we have only 4 func, but use same size per func, then only half of
 + * the table in use
 + */
 +#define ILT_PER_FUNC          (ILT_NUM_PAGE_ENTRIES/8)
 +
 +#define FUNC_ILT_BASE(func)   (func * ILT_PER_FUNC)
 +/*
 + * the phys address is shifted right 12 bits and has an added
 + * 1=valid bit added to the 53rd bit
 + * then since this is a wide register(TM)
 + * we split it into two 32 bit writes
 + */
 +#define ONCHIP_ADDR1(x)               ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
 +#define ONCHIP_ADDR2(x)               ((u32)((1 << 20) | ((u64)x >> 44)))
 +
 +/* load/unload mode */
 +#define LOAD_NORMAL                   0
 +#define LOAD_OPEN                     1
 +#define LOAD_DIAG                     2
 +#define UNLOAD_NORMAL                 0
 +#define UNLOAD_CLOSE                  1
 +#define UNLOAD_RECOVERY                       2
 +
 +
 +/* DMAE command defines */
 +#define DMAE_TIMEOUT                  -1
 +#define DMAE_PCI_ERROR                        -2      /* E2 and onward */
 +#define DMAE_NOT_RDY                  -3
 +#define DMAE_PCI_ERR_FLAG             0x80000000
 +
 +#define DMAE_SRC_PCI                  0
 +#define DMAE_SRC_GRC                  1
 +
 +#define DMAE_DST_NONE                 0
 +#define DMAE_DST_PCI                  1
 +#define DMAE_DST_GRC                  2
 +
 +#define DMAE_COMP_PCI                 0
 +#define DMAE_COMP_GRC                 1
 +
 +/* E2 and onward - PCI error handling in the completion */
 +
 +#define DMAE_COMP_REGULAR             0
 +#define DMAE_COM_SET_ERR              1
 +
 +#define DMAE_CMD_SRC_PCI              (DMAE_SRC_PCI << \
 +                                              DMAE_COMMAND_SRC_SHIFT)
 +#define DMAE_CMD_SRC_GRC              (DMAE_SRC_GRC << \
 +                                              DMAE_COMMAND_SRC_SHIFT)
 +
 +#define DMAE_CMD_DST_PCI              (DMAE_DST_PCI << \
 +                                              DMAE_COMMAND_DST_SHIFT)
 +#define DMAE_CMD_DST_GRC              (DMAE_DST_GRC << \
 +                                              DMAE_COMMAND_DST_SHIFT)
 +
 +#define DMAE_CMD_C_DST_PCI            (DMAE_COMP_PCI << \
 +                                              DMAE_COMMAND_C_DST_SHIFT)
 +#define DMAE_CMD_C_DST_GRC            (DMAE_COMP_GRC << \
 +                                              DMAE_COMMAND_C_DST_SHIFT)
 +
 +#define DMAE_CMD_C_ENABLE             DMAE_COMMAND_C_TYPE_ENABLE
 +
 +#define DMAE_CMD_ENDIANITY_NO_SWAP    (0 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +#define DMAE_CMD_ENDIANITY_B_SWAP     (1 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +#define DMAE_CMD_ENDIANITY_DW_SWAP    (2 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +#define DMAE_CMD_ENDIANITY_B_DW_SWAP  (3 << DMAE_COMMAND_ENDIANITY_SHIFT)
 +
 +#define DMAE_CMD_PORT_0                       0
 +#define DMAE_CMD_PORT_1                       DMAE_COMMAND_PORT
 +
 +#define DMAE_CMD_SRC_RESET            DMAE_COMMAND_SRC_RESET
 +#define DMAE_CMD_DST_RESET            DMAE_COMMAND_DST_RESET
 +#define DMAE_CMD_E1HVN_SHIFT          DMAE_COMMAND_E1HVN_SHIFT
 +
 +#define DMAE_SRC_PF                   0
 +#define DMAE_SRC_VF                   1
 +
 +#define DMAE_DST_PF                   0
 +#define DMAE_DST_VF                   1
 +
 +#define DMAE_C_SRC                    0
 +#define DMAE_C_DST                    1
 +
 +#define DMAE_LEN32_RD_MAX             0x80
 +#define DMAE_LEN32_WR_MAX(bp)         (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
 +
 +#define DMAE_COMP_VAL                 0x60d0d0ae /* E2 and on - upper bit
 +                                                      indicates eror */
 +
 +#define MAX_DMAE_C_PER_PORT           8
 +#define INIT_DMAE_C(bp)                       (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
-                                        BP_E1HVN(bp))
++                                       BP_VN(bp))
 +#define PMF_DMAE_C(bp)                        (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
 +                                       E1HVN_MAX)
 +
 +/* PCIE link and speed */
 +#define PCICFG_LINK_WIDTH             0x1f00000
 +#define PCICFG_LINK_WIDTH_SHIFT               20
 +#define PCICFG_LINK_SPEED             0xf0000
 +#define PCICFG_LINK_SPEED_SHIFT               16
 +
 +
 +#define BNX2X_NUM_TESTS                       7
 +
 +#define BNX2X_PHY_LOOPBACK            0
 +#define BNX2X_MAC_LOOPBACK            1
 +#define BNX2X_PHY_LOOPBACK_FAILED     1
 +#define BNX2X_MAC_LOOPBACK_FAILED     2
 +#define BNX2X_LOOPBACK_FAILED         (BNX2X_MAC_LOOPBACK_FAILED | \
 +                                       BNX2X_PHY_LOOPBACK_FAILED)
 +
 +
 +#define STROM_ASSERT_ARRAY_SIZE               50
 +
 +
 +/* must be used on a CID before placing it on a HW ring */
 +#define HW_CID(bp, x)                 ((BP_PORT(bp) << 23) | \
-                                        (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
++                                       (BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
 +                                       (x))
 +
 +#define SP_DESC_CNT           (BCM_PAGE_SIZE / sizeof(struct eth_spe))
 +#define MAX_SP_DESC_CNT                       (SP_DESC_CNT - 1)
 +
 +
 +#define BNX2X_BTR                     4
 +#define MAX_SPQ_PENDING                       8
 +
 +/* CMNG constants, as derived from system spec calculations */
 +/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
 +#define DEF_MIN_RATE                                  100
 +/* resolution of the rate shaping timer - 400 usec */
 +#define RS_PERIODIC_TIMEOUT_USEC                      400
 +/* number of bytes in single QM arbitration cycle -
 + * coefficient for calculating the fairness timer */
 +#define QM_ARB_BYTES                                  160000
 +/* resolution of Min algorithm 1:100 */
 +#define MIN_RES                                               100
 +/* how many bytes above threshold for the minimal credit of Min algorithm*/
 +#define MIN_ABOVE_THRESH                              32768
 +/* Fairness algorithm integration time coefficient -
 + * for calculating the actual Tfair */
 +#define T_FAIR_COEF   ((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
 +/* Memory of fairness algorithm . 2 cycles */
 +#define FAIR_MEM                                      2
 +
 +
 +#define ATTN_NIG_FOR_FUNC             (1L << 8)
 +#define ATTN_SW_TIMER_4_FUNC          (1L << 9)
 +#define GPIO_2_FUNC                   (1L << 10)
 +#define GPIO_3_FUNC                   (1L << 11)
 +#define GPIO_4_FUNC                   (1L << 12)
 +#define ATTN_GENERAL_ATTN_1           (1L << 13)
 +#define ATTN_GENERAL_ATTN_2           (1L << 14)
 +#define ATTN_GENERAL_ATTN_3           (1L << 15)
 +#define ATTN_GENERAL_ATTN_4           (1L << 13)
 +#define ATTN_GENERAL_ATTN_5           (1L << 14)
 +#define ATTN_GENERAL_ATTN_6           (1L << 15)
 +
 +#define ATTN_HARD_WIRED_MASK          0xff00
 +#define ATTENTION_ID                  4
 +
 +
 +/* stuff added to make the code fit 80Col */
 +
 +#define BNX2X_PMF_LINK_ASSERT \
 +      GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
 +
 +#define BNX2X_MC_ASSERT_BITS \
 +      (GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
 +       GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
 +       GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
 +       GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
 +
 +#define BNX2X_MCP_ASSERT \
 +      GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
 +
 +#define BNX2X_GRC_TIMEOUT     GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
 +#define BNX2X_GRC_RSV         (GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
 +                               GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
 +
 +#define HW_INTERRUT_ASSERT_SET_0 \
 +                              (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
 +#define HW_PRTY_ASSERT_SET_0  (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
 +#define HW_INTERRUT_ASSERT_SET_1 \
 +                              (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
 +#define HW_PRTY_ASSERT_SET_1  (AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
 +                           AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
 +#define HW_INTERRUT_ASSERT_SET_2 \
 +                              (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
 +                               AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
 +                      AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
 +                               AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
 +#define HW_PRTY_ASSERT_SET_2  (AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
 +                      AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
 +                               AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
 +                               AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
 +
 +#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
 +              AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
 +              AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
 +              AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 +
 +#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
 +                            AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
 +
 +#define RSS_FLAGS(bp) \
 +              (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
 +               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
 +               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
 +               TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
 +               (bp->multi_mode << \
 +                TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
 +#define MULTI_MASK                    0x7f
 +
 +
 +#define DEF_USB_FUNC_OFF      offsetof(struct cstorm_def_status_block_u, func)
 +#define DEF_CSB_FUNC_OFF      offsetof(struct cstorm_def_status_block_c, func)
 +#define DEF_XSB_FUNC_OFF      offsetof(struct xstorm_def_status_block, func)
 +#define DEF_TSB_FUNC_OFF      offsetof(struct tstorm_def_status_block, func)
 +
 +#define DEF_USB_IGU_INDEX_OFF \
 +                      offsetof(struct cstorm_def_status_block_u, igu_index)
 +#define DEF_CSB_IGU_INDEX_OFF \
 +                      offsetof(struct cstorm_def_status_block_c, igu_index)
 +#define DEF_XSB_IGU_INDEX_OFF \
 +                      offsetof(struct xstorm_def_status_block, igu_index)
 +#define DEF_TSB_IGU_INDEX_OFF \
 +                      offsetof(struct tstorm_def_status_block, igu_index)
 +
 +#define DEF_USB_SEGMENT_OFF \
 +                      offsetof(struct cstorm_def_status_block_u, segment)
 +#define DEF_CSB_SEGMENT_OFF \
 +                      offsetof(struct cstorm_def_status_block_c, segment)
 +#define DEF_XSB_SEGMENT_OFF \
 +                      offsetof(struct xstorm_def_status_block, segment)
 +#define DEF_TSB_SEGMENT_OFF \
 +                      offsetof(struct tstorm_def_status_block, segment)
 +
 +#define BNX2X_SP_DSB_INDEX \
 +              (&bp->def_status_blk->sp_sb.\
 +                                      index_values[HC_SP_INDEX_ETH_DEF_CONS])
 +
 +#define SET_FLAG(value, mask, flag) \
 +      do {\
 +              (value) &= ~(mask);\
 +              (value) |= ((flag) << (mask##_SHIFT));\
 +      } while (0)
 +
 +#define GET_FLAG(value, mask) \
 +      (((value) & (mask)) >> (mask##_SHIFT))
 +
 +#define GET_FIELD(value, fname) \
 +      (((value) & (fname##_MASK)) >> (fname##_SHIFT))
 +
 +#define CAM_IS_INVALID(x) \
 +      (GET_FLAG(x.flags, \
 +      MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
 +      (T_ETH_MAC_COMMAND_INVALIDATE))
 +
 +/* Number of u32 elements in MC hash array */
 +#define MC_HASH_SIZE                  8
 +#define MC_HASH_OFFSET(bp, i)         (BAR_TSTRORM_INTMEM + \
 +      TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
 +
 +
 +#ifndef PXP2_REG_PXP2_INT_STS
 +#define PXP2_REG_PXP2_INT_STS         PXP2_REG_PXP2_INT_STS_0
 +#endif
 +
 +#ifndef ETH_MAX_RX_CLIENTS_E2
 +#define ETH_MAX_RX_CLIENTS_E2         ETH_MAX_RX_CLIENTS_E1H
 +#endif
 +
 +#define BNX2X_VPD_LEN                 128
 +#define VENDOR_ID_LEN                 4
 +
 +/* Congestion management fairness mode */
 +#define CMNG_FNS_NONE         0
 +#define CMNG_FNS_MINMAX               1
 +
 +#define HC_SEG_ACCESS_DEF             0   /*Driver decision 0-3*/
 +#define HC_SEG_ACCESS_ATTN            4
 +#define HC_SEG_ACCESS_NORM            0   /*Driver decision 0-1*/
 +
 +static const u32 dmae_reg_go_c[] = {
 +      DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
 +      DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
 +      DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
 +      DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
 +};
 +
 +void bnx2x_set_ethtool_ops(struct net_device *netdev);
 +void bnx2x_notify_link_changed(struct bnx2x *bp);
 +#endif /* bnx2x.h */
index 5c3eb17,0000000..e575e89
mode 100644,000000..100644
--- /dev/null
@@@ -1,3597 -1,0 +1,3598 @@@
 +/* bnx2x_cmn.c: Broadcom Everest network driver.
 + *
 + * Copyright (c) 2007-2011 Broadcom Corporation
 + *
 + * This program is free software; you can redistribute it and/or modify
 + * it under the terms of the GNU General Public License as published by
 + * the Free Software Foundation.
 + *
 + * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 + * Written by: Eliezer Tamir
 + * Based on code from Michael Chan's bnx2 driver
 + * UDP CSUM errata workaround by Arik Gendelman
 + * Slowpath and fastpath rework by Vladislav Zolotarov
 + * Statistics and Link management by Yitchak Gertner
 + *
 + */
 +
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
 +#include <linux/etherdevice.h>
 +#include <linux/if_vlan.h>
 +#include <linux/interrupt.h>
 +#include <linux/ip.h>
 +#include <net/ipv6.h>
 +#include <net/ip6_checksum.h>
 +#include <linux/firmware.h>
 +#include <linux/prefetch.h>
 +#include "bnx2x_cmn.h"
 +#include "bnx2x_init.h"
 +#include "bnx2x_sp.h"
 +
 +
 +
 +/**
 + * bnx2x_bz_fp - zero content of the fastpath structure.
 + *
 + * @bp:               driver handle
 + * @index:    fastpath index to be zeroed
 + *
 + * Makes sure the contents of the bp->fp[index].napi is kept
 + * intact.
 + */
 +static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
 +{
 +      struct bnx2x_fastpath *fp = &bp->fp[index];
 +      struct napi_struct orig_napi = fp->napi;
 +      /* bzero bnx2x_fastpath contents */
 +      memset(fp, 0, sizeof(*fp));
 +
 +      /* Restore the NAPI object as it has been already initialized */
 +      fp->napi = orig_napi;
 +
 +      fp->bp = bp;
 +      fp->index = index;
 +      if (IS_ETH_FP(fp))
 +              fp->max_cos = bp->max_cos;
 +      else
 +              /* Special queues support only one CoS */
 +              fp->max_cos = 1;
 +
 +      /*
 +       * set the tpa flag for each queue. The tpa flag determines the queue
 +       * minimal size so it must be set prior to queue memory allocation
 +       */
 +      fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
 +
 +#ifdef BCM_CNIC
 +      /* We don't want TPA on an FCoE L2 ring */
 +      if (IS_FCOE_FP(fp))
 +              fp->disable_tpa = 1;
 +#endif
 +}
 +
 +/**
 + * bnx2x_move_fp - move content of the fastpath structure.
 + *
 + * @bp:               driver handle
 + * @from:     source FP index
 + * @to:               destination FP index
 + *
 + * Makes sure the contents of the bp->fp[to].napi is kept
 + * intact.
 + */
 +static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
 +{
 +      struct bnx2x_fastpath *from_fp = &bp->fp[from];
 +      struct bnx2x_fastpath *to_fp = &bp->fp[to];
 +      struct napi_struct orig_napi = to_fp->napi;
 +      /* Move bnx2x_fastpath contents */
 +      memcpy(to_fp, from_fp, sizeof(*to_fp));
 +      to_fp->index = to;
 +
 +      /* Restore the NAPI object as it has been already initialized */
 +      to_fp->napi = orig_napi;
 +}
 +
 +int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
 +
 +/* free skb in the packet ring at pos idx
 + * return idx of last bd freed
 + */
 +static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
 +                           u16 idx)
 +{
 +      struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
 +      struct eth_tx_start_bd *tx_start_bd;
 +      struct eth_tx_bd *tx_data_bd;
 +      struct sk_buff *skb = tx_buf->skb;
 +      u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
 +      int nbd;
 +
 +      /* prefetch skb end pointer to speedup dev_kfree_skb() */
 +      prefetch(&skb->end);
 +
 +      DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
 +         txdata->txq_index, idx, tx_buf, skb);
 +
 +      /* unmap first bd */
 +      DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
 +      tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
 +      dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 +                       BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 +
 +
 +      nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
 +              BNX2X_ERR("BAD nbd!\n");
 +              bnx2x_panic();
 +      }
 +#endif
 +      new_cons = nbd + tx_buf->first_bd;
 +
 +      /* Get the next bd */
 +      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +
 +      /* Skip a parse bd... */
 +      --nbd;
 +      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +
 +      /* ...and the TSO split header bd since they have no mapping */
 +      if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
 +              --nbd;
 +              bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +      }
 +
 +      /* now free frags */
 +      while (nbd > 0) {
 +
 +              DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
 +              tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
 +              dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
 +                             BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
 +              if (--nbd)
 +                      bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 +      }
 +
 +      /* release skb */
 +      WARN_ON(!skb);
 +      dev_kfree_skb_any(skb);
 +      tx_buf->first_bd = 0;
 +      tx_buf->skb = NULL;
 +
 +      return new_cons;
 +}
 +
 +int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
 +{
 +      struct netdev_queue *txq;
 +      u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return -1;
 +#endif
 +
 +      txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
 +      hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
 +      sw_cons = txdata->tx_pkt_cons;
 +
 +      while (sw_cons != hw_cons) {
 +              u16 pkt_cons;
 +
 +              pkt_cons = TX_BD(sw_cons);
 +
 +              DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
 +                                    " pkt_cons %u\n",
 +                 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
 +
 +              bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
 +              sw_cons++;
 +      }
 +
 +      txdata->tx_pkt_cons = sw_cons;
 +      txdata->tx_bd_cons = bd_cons;
 +
 +      /* Need to make the tx_bd_cons update visible to start_xmit()
 +       * before checking for netif_tx_queue_stopped().  Without the
 +       * memory barrier, there is a small possibility that
 +       * start_xmit() will miss it and cause the queue to be stopped
 +       * forever.
 +       * On the other hand we need an rmb() here to ensure the proper
 +       * ordering of bit testing in the following
 +       * netif_tx_queue_stopped(txq) call.
 +       */
 +      smp_mb();
 +
 +      if (unlikely(netif_tx_queue_stopped(txq))) {
 +              /* Taking tx_lock() is needed to prevent reenabling the queue
 +               * while it's empty. This could have happen if rx_action() gets
 +               * suspended in bnx2x_tx_int() after the condition before
 +               * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
 +               *
 +               * stops the queue->sees fresh tx_bd_cons->releases the queue->
 +               * sends some packets consuming the whole queue again->
 +               * stops the queue
 +               */
 +
 +              __netif_tx_lock(txq, smp_processor_id());
 +
 +              if ((netif_tx_queue_stopped(txq)) &&
 +                  (bp->state == BNX2X_STATE_OPEN) &&
 +                  (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
 +                      netif_tx_wake_queue(txq);
 +
 +              __netif_tx_unlock(txq);
 +      }
 +      return 0;
 +}
 +
 +static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
 +                                           u16 idx)
 +{
 +      u16 last_max = fp->last_max_sge;
 +
 +      if (SUB_S16(idx, last_max) > 0)
 +              fp->last_max_sge = idx;
 +}
 +
 +static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
 +                                struct eth_fast_path_rx_cqe *fp_cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
 +                                   le16_to_cpu(fp_cqe->len_on_bd)) >>
 +                    SGE_PAGE_SHIFT;
 +      u16 last_max, last_elem, first_elem;
 +      u16 delta = 0;
 +      u16 i;
 +
 +      if (!sge_len)
 +              return;
 +
 +      /* First mark all used pages */
 +      for (i = 0; i < sge_len; i++)
 +              BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 +                      RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
 +
 +      DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
 +         sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 +
 +      /* Here we assume that the last SGE index is the biggest */
 +      prefetch((void *)(fp->sge_mask));
 +      bnx2x_update_last_max_sge(fp,
 +              le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 +
 +      last_max = RX_SGE(fp->last_max_sge);
 +      last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
 +      first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 +
 +      /* If ring is not full */
 +      if (last_elem + 1 != first_elem)
 +              last_elem++;
 +
 +      /* Now update the prod */
 +      for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
 +              if (likely(fp->sge_mask[i]))
 +                      break;
 +
 +              fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
 +              delta += BIT_VEC64_ELEM_SZ;
 +      }
 +
 +      if (delta > 0) {
 +              fp->rx_sge_prod += delta;
 +              /* clear page-end entries */
 +              bnx2x_clear_sge_mask_next_elems(fp);
 +      }
 +
 +      DP(NETIF_MSG_RX_STATUS,
 +         "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
 +         fp->last_max_sge, fp->rx_sge_prod);
 +}
 +
 +static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 +                          struct sk_buff *skb, u16 cons, u16 prod,
 +                          struct eth_fast_path_rx_cqe *cqe)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 +      struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 +      struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 +      dma_addr_t mapping;
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 +
 +      /* print error if current state != stop */
 +      if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 +              BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 +
 +      /* Try to map an empty skb from the aggregation info  */
 +      mapping = dma_map_single(&bp->pdev->dev,
 +                               first_buf->skb->data,
 +                               fp->rx_buf_size, DMA_FROM_DEVICE);
 +      /*
 +       *  ...if it fails - move the skb from the consumer to the producer
 +       *  and set the current aggregation state as ERROR to drop it
 +       *  when TPA_STOP arrives.
 +       */
 +
 +      if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 +              /* Move the BD from the consumer to the producer */
 +              bnx2x_reuse_rx_skb(fp, cons, prod);
 +              tpa_info->tpa_state = BNX2X_TPA_ERROR;
 +              return;
 +      }
 +
 +      /* move empty skb from pool to prod */
 +      prod_rx_buf->skb = first_buf->skb;
 +      dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 +      /* point prod_bd to new skb */
 +      prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 +      prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 +
 +      /* move partial skb from cons to pool (don't unmap yet) */
 +      *first_buf = *cons_rx_buf;
 +
 +      /* mark bin state as START */
 +      tpa_info->parsing_flags =
 +              le16_to_cpu(cqe->pars_flags.flags);
 +      tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
 +      tpa_info->tpa_state = BNX2X_TPA_START;
 +      tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
 +      tpa_info->placement_offset = cqe->placement_offset;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      fp->tpa_queue_used |= (1 << queue);
 +#ifdef _ASM_GENERIC_INT_L64_H
 +      DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
 +#else
 +      DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
 +#endif
 +         fp->tpa_queue_used);
 +#endif
 +}
 +
 +/* Timestamp option length allowed for TPA aggregation:
 + *
 + *            nop nop kind length echo val
 + */
 +#define TPA_TSTAMP_OPT_LEN    12
 +/**
 + * bnx2x_set_lro_mss - calculate the approximate value of the MSS
 + *
 + * @bp:                       driver handle
 + * @parsing_flags:    parsing flags from the START CQE
 + * @len_on_bd:                total length of the first packet for the
 + *                    aggregation.
 + *
 + * Approximate value of the MSS for this aggregation calculated using
 + * the first packet of it.
 + */
 +static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
 +                                  u16 len_on_bd)
 +{
 +      /*
 +       * TPA arrgregation won't have either IP options or TCP options
 +       * other than timestamp or IPv6 extension headers.
 +       */
 +      u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
 +
 +      if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
 +          PRS_FLAG_OVERETH_IPV6)
 +              hdrs_len += sizeof(struct ipv6hdr);
 +      else /* IPv4 */
 +              hdrs_len += sizeof(struct iphdr);
 +
 +
 +      /* Check if there was a TCP timestamp, if there is it's will
 +       * always be 12 bytes length: nop nop kind length echo val.
 +       *
 +       * Otherwise FW would close the aggregation.
 +       */
 +      if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
 +              hdrs_len += TPA_TSTAMP_OPT_LEN;
 +
 +      return len_on_bd - hdrs_len;
 +}
 +
 +static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                             u16 queue, struct sk_buff *skb,
 +                             struct eth_end_agg_rx_cqe *cqe,
 +                             u16 cqe_idx)
 +{
 +      struct sw_rx_page *rx_pg, old_rx_pg;
 +      u32 i, frag_len, frag_size, pages;
 +      int err;
 +      int j;
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      u16 len_on_bd = tpa_info->len_on_bd;
 +
 +      frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 +      pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 +
 +      /* This is needed in order to enable forwarding support */
 +      if (frag_size)
 +              skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
 +                                      tpa_info->parsing_flags, len_on_bd);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
 +              BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 +                        pages, cqe_idx);
 +              BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 +              bnx2x_panic();
 +              return -EINVAL;
 +      }
 +#endif
 +
 +      /* Run through the SGL and compose the fragmented skb */
 +      for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
 +              u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 +
 +              /* FW gives the indices of the SGE as if the ring is an array
 +                 (meaning that "next" element will consume 2 indices) */
 +              frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
 +              rx_pg = &fp->rx_page_ring[sge_idx];
 +              old_rx_pg = *rx_pg;
 +
 +              /* If we fail to allocate a substitute page, we simply stop
 +                 where we are and drop the whole packet */
 +              err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
 +              if (unlikely(err)) {
 +                      fp->eth_q_stats.rx_skb_alloc_failed++;
 +                      return err;
 +              }
 +
 +              /* Unmap the page as we r going to pass it to the stack */
 +              dma_unmap_page(&bp->pdev->dev,
 +                             dma_unmap_addr(&old_rx_pg, mapping),
 +                             SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
 +
 +              /* Add one frag and update the appropriate fields in the skb */
 +              skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
 +
 +              skb->data_len += frag_len;
 +              skb->truesize += frag_len;
 +              skb->len += frag_len;
 +
 +              frag_size -= frag_len;
 +      }
 +
 +      return 0;
 +}
 +
 +static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 +                         u16 queue, struct eth_end_agg_rx_cqe *cqe,
 +                         u16 cqe_idx)
 +{
 +      struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
 +      struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
 +      u8 pad = tpa_info->placement_offset;
 +      u16 len = tpa_info->len_on_bd;
 +      struct sk_buff *skb = rx_buf->skb;
 +      /* alloc new skb */
 +      struct sk_buff *new_skb;
 +      u8 old_tpa_state = tpa_info->tpa_state;
 +
 +      tpa_info->tpa_state = BNX2X_TPA_STOP;
 +
 +      /* If we there was an error during the handling of the TPA_START -
 +       * drop this aggregation.
 +       */
 +      if (old_tpa_state == BNX2X_TPA_ERROR)
 +              goto drop;
 +
 +      /* Try to allocate the new skb */
 +      new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 +
 +      /* Unmap skb in the pool anyway, as we are going to change
 +         pool entry status to BNX2X_TPA_STOP even if new skb allocation
 +         fails. */
 +      dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
 +                       fp->rx_buf_size, DMA_FROM_DEVICE);
 +
 +      if (likely(new_skb)) {
 +              prefetch(skb);
 +              prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (pad + len > fp->rx_buf_size) {
 +                      BNX2X_ERR("skb_put is about to fail...  "
 +                                "pad %d  len %d  rx_buf_size %d\n",
 +                                pad, len, fp->rx_buf_size);
 +                      bnx2x_panic();
 +                      return;
 +              }
 +#endif
 +
 +              skb_reserve(skb, pad);
 +              skb_put(skb, len);
 +
 +              skb->protocol = eth_type_trans(skb, bp->dev);
 +              skb->ip_summed = CHECKSUM_UNNECESSARY;
 +
 +              if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
 +                      if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
 +                              __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 +                      napi_gro_receive(&fp->napi, skb);
 +              } else {
 +                      DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
 +                         " - dropping packet!\n");
 +                      dev_kfree_skb_any(skb);
 +              }
 +
 +
 +              /* put new skb in bin */
 +              rx_buf->skb = new_skb;
 +
 +              return;
 +      }
 +
 +drop:
 +      /* drop the packet and keep the buffer in the bin */
 +      DP(NETIF_MSG_RX_STATUS,
 +         "Failed to allocate or map a new skb - dropping packet!\n");
 +      fp->eth_q_stats.rx_skb_alloc_failed++;
 +}
 +
 +/* Set Toeplitz hash value in the skb using the value from the
 + * CQE (calculated by HW).
 + */
 +static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
 +                                      struct sk_buff *skb)
 +{
 +      /* Set Toeplitz hash from CQE */
 +      if ((bp->dev->features & NETIF_F_RXHASH) &&
 +          (cqe->fast_path_cqe.status_flags &
 +           ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
 +              skb->rxhash =
 +              le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
 +}
 +
 +int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
 +      u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
 +      int rx_pkt = 0;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return 0;
 +#endif
 +
 +      /* CQ "next element" is of the size of the regular element,
 +         that's why it's ok here */
 +      hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
 +      if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
 +              hw_comp_cons++;
 +
 +      bd_cons = fp->rx_bd_cons;
 +      bd_prod = fp->rx_bd_prod;
 +      bd_prod_fw = bd_prod;
 +      sw_comp_cons = fp->rx_comp_cons;
 +      sw_comp_prod = fp->rx_comp_prod;
 +
 +      /* Memory barrier necessary as speculative reads of the rx
 +       * buffer can be ahead of the index in the status block
 +       */
 +      rmb();
 +
 +      DP(NETIF_MSG_RX_STATUS,
 +         "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
 +         fp->index, hw_comp_cons, sw_comp_cons);
 +
 +      while (sw_comp_cons != hw_comp_cons) {
 +              struct sw_rx_bd *rx_buf = NULL;
 +              struct sk_buff *skb;
 +              union eth_rx_cqe *cqe;
 +              struct eth_fast_path_rx_cqe *cqe_fp;
 +              u8 cqe_fp_flags;
 +              enum eth_rx_cqe_type cqe_fp_type;
 +              u16 len, pad;
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +              if (unlikely(bp->panic))
 +                      return 0;
 +#endif
 +
 +              comp_ring_cons = RCQ_BD(sw_comp_cons);
 +              bd_prod = RX_BD(bd_prod);
 +              bd_cons = RX_BD(bd_cons);
 +
 +              /* Prefetch the page containing the BD descriptor
 +                 at producer's index. It will be needed when new skb is
 +                 allocated */
 +              prefetch((void *)(PAGE_ALIGN((unsigned long)
 +                                           (&fp->rx_desc_ring[bd_prod])) -
 +                                PAGE_SIZE + 1));
 +
 +              cqe = &fp->rx_comp_ring[comp_ring_cons];
 +              cqe_fp = &cqe->fast_path_cqe;
 +              cqe_fp_flags = cqe_fp->type_error_flags;
 +              cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 +
 +              DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 +                 "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
 +                 cqe_fp_flags, cqe_fp->status_flags,
 +                 le32_to_cpu(cqe_fp->rss_hash_result),
 +                 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
 +
 +              /* is this a slowpath msg? */
 +              if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 +                      bnx2x_sp_event(fp, cqe);
 +                      goto next_cqe;
 +
 +              /* this is an rx packet */
 +              } else {
 +                      rx_buf = &fp->rx_buf_ring[bd_cons];
 +                      skb = rx_buf->skb;
 +                      prefetch(skb);
 +
 +                      if (!CQE_TYPE_FAST(cqe_fp_type)) {
 +#ifdef BNX2X_STOP_ON_ERROR
 +                              /* sanity check */
 +                              if (fp->disable_tpa &&
 +                                  (CQE_TYPE_START(cqe_fp_type) ||
 +                                   CQE_TYPE_STOP(cqe_fp_type)))
 +                                      BNX2X_ERR("START/STOP packet while "
 +                                                "disable_tpa type %x\n",
 +                                                CQE_TYPE(cqe_fp_type));
 +#endif
 +
 +                              if (CQE_TYPE_START(cqe_fp_type)) {
 +                                      u16 queue = cqe_fp->queue_index;
 +                                      DP(NETIF_MSG_RX_STATUS,
 +                                         "calling tpa_start on queue %d\n",
 +                                         queue);
 +
 +                                      bnx2x_tpa_start(fp, queue, skb,
 +                                                      bd_cons, bd_prod,
 +                                                      cqe_fp);
 +
 +                                      /* Set Toeplitz hash for LRO skb */
 +                                      bnx2x_set_skb_rxhash(bp, cqe, skb);
 +
 +                                      goto next_rx;
 +
 +                              } else {
 +                                      u16 queue =
 +                                              cqe->end_agg_cqe.queue_index;
 +                                      DP(NETIF_MSG_RX_STATUS,
 +                                         "calling tpa_stop on queue %d\n",
 +                                         queue);
 +
 +                                      bnx2x_tpa_stop(bp, fp, queue,
 +                                                     &cqe->end_agg_cqe,
 +                                                     comp_ring_cons);
 +#ifdef BNX2X_STOP_ON_ERROR
 +                                      if (bp->panic)
 +                                              return 0;
 +#endif
 +
 +                                      bnx2x_update_sge_prod(fp, cqe_fp);
 +                                      goto next_cqe;
 +                              }
 +                      }
 +                      /* non TPA */
 +                      len = le16_to_cpu(cqe_fp->pkt_len);
 +                      pad = cqe_fp->placement_offset;
 +                      dma_sync_single_for_cpu(&bp->pdev->dev,
 +                                      dma_unmap_addr(rx_buf, mapping),
 +                                                     pad + RX_COPY_THRESH,
 +                                                     DMA_FROM_DEVICE);
 +                      prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 +
 +                      /* is this an error packet? */
 +                      if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
 +                              DP(NETIF_MSG_RX_ERR,
 +                                 "ERROR  flags %x  rx packet %u\n",
 +                                 cqe_fp_flags, sw_comp_cons);
 +                              fp->eth_q_stats.rx_err_discard_pkt++;
 +                              goto reuse_rx;
 +                      }
 +
 +                      /* Since we don't have a jumbo ring
 +                       * copy small packets if mtu > 1500
 +                       */
 +                      if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
 +                          (len <= RX_COPY_THRESH)) {
 +                              struct sk_buff *new_skb;
 +
 +                              new_skb = netdev_alloc_skb(bp->dev, len + pad);
 +                              if (new_skb == NULL) {
 +                                      DP(NETIF_MSG_RX_ERR,
 +                                         "ERROR  packet dropped "
 +                                         "because of alloc failure\n");
 +                                      fp->eth_q_stats.rx_skb_alloc_failed++;
 +                                      goto reuse_rx;
 +                              }
 +
 +                              /* aligned copy */
 +                              skb_copy_from_linear_data_offset(skb, pad,
 +                                                  new_skb->data + pad, len);
 +                              skb_reserve(new_skb, pad);
 +                              skb_put(new_skb, len);
 +
 +                              bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
 +
 +                              skb = new_skb;
 +
 +                      } else
 +                      if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
 +                              dma_unmap_single(&bp->pdev->dev,
 +                                      dma_unmap_addr(rx_buf, mapping),
 +                                               fp->rx_buf_size,
 +                                               DMA_FROM_DEVICE);
 +                              skb_reserve(skb, pad);
 +                              skb_put(skb, len);
 +
 +                      } else {
 +                              DP(NETIF_MSG_RX_ERR,
 +                                 "ERROR  packet dropped because "
 +                                 "of alloc failure\n");
 +                              fp->eth_q_stats.rx_skb_alloc_failed++;
 +reuse_rx:
 +                              bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
 +                              goto next_rx;
 +                      }
 +
 +                      skb->protocol = eth_type_trans(skb, bp->dev);
 +
 +                      /* Set Toeplitz hash for a none-LRO skb */
 +                      bnx2x_set_skb_rxhash(bp, cqe, skb);
 +
 +                      skb_checksum_none_assert(skb);
 +
 +                      if (bp->dev->features & NETIF_F_RXCSUM) {
 +
 +                              if (likely(BNX2X_RX_CSUM_OK(cqe)))
 +                                      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +                              else
 +                                      fp->eth_q_stats.hw_csum_err++;
 +                      }
 +              }
 +
 +              skb_record_rx_queue(skb, fp->index);
 +
 +              if (le16_to_cpu(cqe_fp->pars_flags.flags) &
 +                  PARSING_FLAGS_VLAN)
 +                      __vlan_hwaccel_put_tag(skb,
 +                                             le16_to_cpu(cqe_fp->vlan_tag));
 +              napi_gro_receive(&fp->napi, skb);
 +
 +
 +next_rx:
 +              rx_buf->skb = NULL;
 +
 +              bd_cons = NEXT_RX_IDX(bd_cons);
 +              bd_prod = NEXT_RX_IDX(bd_prod);
 +              bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
 +              rx_pkt++;
 +next_cqe:
 +              sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
 +              sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
 +
 +              if (rx_pkt == budget)
 +                      break;
 +      } /* while */
 +
 +      fp->rx_bd_cons = bd_cons;
 +      fp->rx_bd_prod = bd_prod_fw;
 +      fp->rx_comp_cons = sw_comp_cons;
 +      fp->rx_comp_prod = sw_comp_prod;
 +
 +      /* Update producers */
 +      bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
 +                           fp->rx_sge_prod);
 +
 +      fp->rx_pkt += rx_pkt;
 +      fp->rx_calls++;
 +
 +      return rx_pkt;
 +}
 +
 +static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
 +{
 +      struct bnx2x_fastpath *fp = fp_cookie;
 +      struct bnx2x *bp = fp->bp;
 +      u8 cos;
 +
 +      DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
 +                       "[fp %d fw_sd %d igusb %d]\n",
 +         fp->index, fp->fw_sb_id, fp->igu_sb_id);
 +      bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
 +
 +#ifdef BNX2X_STOP_ON_ERROR
 +      if (unlikely(bp->panic))
 +              return IRQ_HANDLED;
 +#endif
 +
 +      /* Handle Rx and Tx according to MSI-X vector */
 +      prefetch(fp->rx_cons_sb);
 +
 +      for_each_cos_in_tx_queue(fp, cos)
 +              prefetch(fp->txdata[cos].tx_cons_sb);
 +
 +      prefetch(&fp->sb_running_index[SM_RX_ID]);
 +      napi_schedule(&bnx2x_fp(bp, fp->index, napi));
 +
 +      return IRQ_HANDLED;
 +}
 +
 +/* HW Lock for shared dual port PHYs */
 +void bnx2x_acquire_phy_lock(struct bnx2x *bp)
 +{
 +      mutex_lock(&bp->port.phy_mutex);
 +
 +      if (bp->port.need_hw_lock)
 +              bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 +}
 +
 +void bnx2x_release_phy_lock(struct bnx2x *bp)
 +{
 +      if (bp->port.need_hw_lock)
 +              bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
 +
 +      mutex_unlock(&bp->port.phy_mutex);
 +}
 +
 +/* calculates MF speed according to current linespeed and MF configuration */
 +u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 +{
 +      u16 line_speed = bp->link_vars.line_speed;
 +      if (IS_MF(bp)) {
 +              u16 maxCfg = bnx2x_extract_max_cfg(bp,
 +                                                 bp->mf_config[BP_VN(bp)]);
 +
 +              /* Calculate the current MAX line speed limit for the MF
 +               * devices
 +               */
 +              if (IS_MF_SI(bp))
 +                      line_speed = (line_speed * maxCfg) / 100;
 +              else { /* SD mode */
 +                      u16 vn_max_rate = maxCfg * 100;
 +
 +                      if (vn_max_rate < line_speed)
 +                              line_speed = vn_max_rate;
 +              }
 +      }
 +
 +      return line_speed;
 +}
 +
 +/**
 + * bnx2x_fill_report_data - fill link report data to report
 + *
 + * @bp:               driver handle
 + * @data:     link state to update
 + *
 + * It uses a none-atomic bit operations because is called under the mutex.
 + */
 +static inline void bnx2x_fill_report_data(struct bnx2x *bp,
 +                                        struct bnx2x_link_report_data *data)
 +{
 +      u16 line_speed = bnx2x_get_mf_speed(bp);
 +
 +      memset(data, 0, sizeof(*data));
 +
 +      /* Fill the report data: efective line speed */
 +      data->line_speed = line_speed;
 +
 +      /* Link is down */
 +      if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
 +              __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                        &data->link_report_flags);
 +
 +      /* Full DUPLEX */
 +      if (bp->link_vars.duplex == DUPLEX_FULL)
 +              __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
 +
 +      /* Rx Flow Control is ON */
 +      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
 +              __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
 +
 +      /* Tx Flow Control is ON */
 +      if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
 +              __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
 +}
 +
 +/**
 + * bnx2x_link_report - report link status to OS.
 + *
 + * @bp:               driver handle
 + *
 + * Calls the __bnx2x_link_report() under the same locking scheme
 + * as a link/PHY state managing code to ensure a consistent link
 + * reporting.
 + */
 +
 +void bnx2x_link_report(struct bnx2x *bp)
 +{
 +      bnx2x_acquire_phy_lock(bp);
 +      __bnx2x_link_report(bp);
 +      bnx2x_release_phy_lock(bp);
 +}
 +
 +/**
 + * __bnx2x_link_report - report link status to OS.
 + *
 + * @bp:               driver handle
 + *
 + * None atomic inmlementation.
 + * Should be called under the phy_lock.
 + */
 +void __bnx2x_link_report(struct bnx2x *bp)
 +{
 +      struct bnx2x_link_report_data cur_data;
 +
 +      /* reread mf_cfg */
 +      if (!CHIP_IS_E1(bp))
 +              bnx2x_read_mf_cfg(bp);
 +
 +      /* Read the current link report info */
 +      bnx2x_fill_report_data(bp, &cur_data);
 +
 +      /* Don't report link down or exactly the same link status twice */
 +      if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
 +          (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                    &bp->last_reported_link.link_report_flags) &&
 +           test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                    &cur_data.link_report_flags)))
 +              return;
 +
 +      bp->link_cnt++;
 +
 +      /* We are going to report a new link parameters now -
 +       * remember the current data for the next time.
 +       */
 +      memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
 +
 +      if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
 +                   &cur_data.link_report_flags)) {
 +              netif_carrier_off(bp->dev);
 +              netdev_err(bp->dev, "NIC Link is Down\n");
 +              return;
 +      } else {
 +              const char *duplex;
 +              const char *flow;
 +
 +              netif_carrier_on(bp->dev);
 +
 +              if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
 +                                     &cur_data.link_report_flags))
 +                      duplex = "full";
 +              else
 +                      duplex = "half";
 +
 +              /* Handle the FC at the end so that only these flags would be
 +               * possibly set. This way we may easily check if there is no FC
 +               * enabled.
 +               */
 +              if (cur_data.link_report_flags) {
 +                      if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
 +                                   &cur_data.link_report_flags)) {
 +                              if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
 +                                   &cur_data.link_report_flags))
 +                                      flow = "ON - receive & transmit";
 +                              else
 +                                      flow = "ON - receive";
 +                      } else {
 +                              flow = "ON - transmit";
 +                      }
 +              } else {
 +                      flow = "none";
 +              }
 +              netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
 +                          cur_data.line_speed, duplex, flow);
 +      }
 +}
 +
 +void bnx2x_init_rx_rings(struct bnx2x *bp)
 +{
 +      int func = BP_FUNC(bp);
-       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                             ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
 +      u16 ring_prod;
 +      int i, j;
 +
 +      /* Allocate TPA resources */
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 +
 +              if (!fp->disable_tpa) {
 +                      /* Fill the per-aggregtion pool */
-                       for (i = 0; i < max_agg_queues; i++) {
++                      for (i = 0; i < MAX_AGG_QS(bp); i++) {
 +                              struct bnx2x_agg_info *tpa_info =
 +                                      &fp->tpa_info[i];
 +                              struct sw_rx_bd *first_buf =
 +                                      &tpa_info->first_buf;
 +
 +                              first_buf->skb = netdev_alloc_skb(bp->dev,
 +                                                     fp->rx_buf_size);
 +                              if (!first_buf->skb) {
 +                                      BNX2X_ERR("Failed to allocate TPA "
 +                                                "skb pool for queue[%d] - "
 +                                                "disabling TPA on this "
 +                                                "queue!\n", j);
 +                                      bnx2x_free_tpa_pool(bp, fp, i);
 +                                      fp->disable_tpa = 1;
 +                                      break;
 +                              }
 +                              dma_unmap_addr_set(first_buf, mapping, 0);
 +                              tpa_info->tpa_state = BNX2X_TPA_STOP;
 +                      }
 +
 +                      /* "next page" elements initialization */
 +                      bnx2x_set_next_page_sgl(fp);
 +
 +                      /* set SGEs bit mask */
 +                      bnx2x_init_sge_ring_bit_mask(fp);
 +
 +                      /* Allocate SGEs and initialize the ring elements */
 +                      for (i = 0, ring_prod = 0;
 +                           i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
 +
 +                              if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
 +                                      BNX2X_ERR("was only able to allocate "
 +                                                "%d rx sges\n", i);
 +                                      BNX2X_ERR("disabling TPA for "
 +                                                "queue[%d]\n", j);
 +                                      /* Cleanup already allocated elements */
 +                                      bnx2x_free_rx_sge_range(bp, fp,
 +                                                              ring_prod);
 +                                      bnx2x_free_tpa_pool(bp, fp,
-                                                           max_agg_queues);
++                                                          MAX_AGG_QS(bp));
 +                                      fp->disable_tpa = 1;
 +                                      ring_prod = 0;
 +                                      break;
 +                              }
 +                              ring_prod = NEXT_SGE_IDX(ring_prod);
 +                      }
 +
 +                      fp->rx_sge_prod = ring_prod;
 +              }
 +      }
 +
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              fp->rx_bd_cons = 0;
 +
 +              /* Activate BD ring */
 +              /* Warning!
 +               * this will generate an interrupt (to the TSTORM)
 +               * must only be done after chip is initialized
 +               */
 +              bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
 +                                   fp->rx_sge_prod);
 +
 +              if (j != 0)
 +                      continue;
 +
 +              if (CHIP_IS_E1(bp)) {
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
 +                             U64_LO(fp->rx_comp_mapping));
 +                      REG_WR(bp, BAR_USTRORM_INTMEM +
 +                             USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
 +                             U64_HI(fp->rx_comp_mapping));
 +              }
 +      }
 +}
 +
 +static void bnx2x_free_tx_skbs(struct bnx2x *bp)
 +{
 +      int i;
 +      u8 cos;
 +
 +      for_each_tx_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              for_each_cos_in_tx_queue(fp, cos) {
 +                      struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
 +
 +                      u16 bd_cons = txdata->tx_bd_cons;
 +                      u16 sw_prod = txdata->tx_pkt_prod;
 +                      u16 sw_cons = txdata->tx_pkt_cons;
 +
 +                      while (sw_cons != sw_prod) {
 +                              bd_cons = bnx2x_free_tx_pkt(bp, txdata,
 +                                                          TX_BD(sw_cons));
 +                              sw_cons++;
 +                      }
 +              }
 +      }
 +}
 +
 +static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
 +{
 +      struct bnx2x *bp = fp->bp;
 +      int i;
 +
 +      /* ring wasn't allocated */
 +      if (fp->rx_buf_ring == NULL)
 +              return;
 +
 +      for (i = 0; i < NUM_RX_BD; i++) {
 +              struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
 +              struct sk_buff *skb = rx_buf->skb;
 +
 +              if (skb == NULL)
 +                      continue;
 +              dma_unmap_single(&bp->pdev->dev,
 +                               dma_unmap_addr(rx_buf, mapping),
 +                               fp->rx_buf_size, DMA_FROM_DEVICE);
 +
 +              rx_buf->skb = NULL;
 +              dev_kfree_skb(skb);
 +      }
 +}
 +
 +static void bnx2x_free_rx_skbs(struct bnx2x *bp)
 +{
 +      int j;
 +
 +      for_each_rx_queue(bp, j) {
 +              struct bnx2x_fastpath *fp = &bp->fp[j];
 +
 +              bnx2x_free_rx_bds(fp);
 +
 +              if (!fp->disable_tpa)
-                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
-                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
-                                           ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
++                      bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
 +      }
 +}
 +
 +void bnx2x_free_skbs(struct bnx2x *bp)
 +{
 +      bnx2x_free_tx_skbs(bp);
 +      bnx2x_free_rx_skbs(bp);
 +}
 +
 +void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
 +{
 +      /* load old values */
 +      u32 mf_cfg = bp->mf_config[BP_VN(bp)];
 +
 +      if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
 +              /* leave all but MAX value */
 +              mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
 +
 +              /* set new MAX value */
 +              mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
 +                              & FUNC_MF_CFG_MAX_BW_MASK;
 +
 +              bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
 +      }
 +}
 +
 +/**
 + * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
 + *
 + * @bp:               driver handle
 + * @nvecs:    number of vectors to be released
 + */
 +static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
 +{
 +      int i, offset = 0;
 +
 +      if (nvecs == offset)
 +              return;
 +      free_irq(bp->msix_table[offset].vector, bp->dev);
 +      DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
 +         bp->msix_table[offset].vector);
 +      offset++;
 +#ifdef BCM_CNIC
 +      if (nvecs == offset)
 +              return;
 +      offset++;
 +#endif
 +
 +      for_each_eth_queue(bp, i) {
 +              if (nvecs == offset)
 +                      return;
 +              DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
 +                 "irq\n", i, bp->msix_table[offset].vector);
 +
 +              free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
 +      }
 +}
 +
 +void bnx2x_free_irq(struct bnx2x *bp)
 +{
 +      if (bp->flags & USING_MSIX_FLAG)
 +              bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
 +                                   CNIC_PRESENT + 1);
 +      else if (bp->flags & USING_MSI_FLAG)
 +              free_irq(bp->pdev->irq, bp->dev);
 +      else
 +              free_irq(bp->pdev->irq, bp->dev);
 +}
 +
 +int bnx2x_enable_msix(struct bnx2x *bp)
 +{
 +      int msix_vec = 0, i, rc, req_cnt;
 +
 +      bp->msix_table[msix_vec].entry = msix_vec;
 +      DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
 +         bp->msix_table[0].entry);
 +      msix_vec++;
 +
 +#ifdef BCM_CNIC
 +      bp->msix_table[msix_vec].entry = msix_vec;
 +      DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
 +         bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
 +      msix_vec++;
 +#endif
 +      /* We need separate vectors for ETH queues only (not FCoE) */
 +      for_each_eth_queue(bp, i) {
 +              bp->msix_table[msix_vec].entry = msix_vec;
 +              DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
 +                 "(fastpath #%u)\n", msix_vec, msix_vec, i);
 +              msix_vec++;
 +      }
 +
 +      req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
 +
 +      rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
 +
 +      /*
 +       * reconfigure number of tx/rx queues according to available
 +       * MSI-X vectors
 +       */
 +      if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
 +              /* how less vectors we will have? */
 +              int diff = req_cnt - rc;
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "Trying to use less MSI-X vectors: %d\n", rc);
 +
 +              rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
 +
 +              if (rc) {
 +                      DP(NETIF_MSG_IFUP,
 +                         "MSI-X is not attainable  rc %d\n", rc);
 +                      return rc;
 +              }
 +              /*
 +               * decrease number of queues by number of unallocated entries
 +               */
 +              bp->num_queues -= diff;
 +
 +              DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
 +                                bp->num_queues);
 +      } else if (rc) {
 +              /* fall to INTx if not enough memory */
 +              if (rc == -ENOMEM)
 +                      bp->flags |= DISABLE_MSI_FLAG;
 +              DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
 +              return rc;
 +      }
 +
 +      bp->flags |= USING_MSIX_FLAG;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_req_msix_irqs(struct bnx2x *bp)
 +{
 +      int i, rc, offset = 0;
 +
 +      rc = request_irq(bp->msix_table[offset++].vector,
 +                       bnx2x_msix_sp_int, 0,
 +                       bp->dev->name, bp->dev);
 +      if (rc) {
 +              BNX2X_ERR("request sp irq failed\n");
 +              return -EBUSY;
 +      }
 +
 +#ifdef BCM_CNIC
 +      offset++;
 +#endif
 +      for_each_eth_queue(bp, i) {
 +              struct bnx2x_fastpath *fp = &bp->fp[i];
 +              snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
 +                       bp->dev->name, i);
 +
 +              rc = request_irq(bp->msix_table[offset].vector,
 +                               bnx2x_msix_fp_int, 0, fp->name, fp);
 +              if (rc) {
 +                      BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
 +                            bp->msix_table[offset].vector, rc);
 +                      bnx2x_free_msix_irqs(bp, offset);
 +                      return -EBUSY;
 +              }
 +
 +              offset++;
 +      }
 +
 +      i = BNX2X_NUM_ETH_QUEUES(bp);
 +      offset = 1 + CNIC_PRESENT;
 +      netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
 +             " ... fp[%d] %d\n",
 +             bp->msix_table[0].vector,
 +             0, bp->msix_table[offset].vector,
 +             i - 1, bp->msix_table[offset + i - 1].vector);
 +
 +      return 0;
 +}
 +
 +int bnx2x_enable_msi(struct bnx2x *bp)
 +{
 +      int rc;
 +
 +      rc = pci_enable_msi(bp->pdev);
 +      if (rc) {
 +              DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
 +              return -1;
 +      }
 +      bp->flags |= USING_MSI_FLAG;
 +
 +      return 0;
 +}
 +
 +static int bnx2x_req_irq(struct bnx2x *bp)
 +{
 +      unsigned long flags;
 +      int rc;
 +
 +      if (bp->flags & USING_MSI_FLAG)
 +              flags = 0;
 +      else
 +              flags = IRQF_SHARED;
 +
 +      rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
 +                       bp->dev->name, bp->dev);
 +      return rc;
 +}
 +
 +static inline int bnx2x_setup_irqs(struct bnx2x *bp)
 +{
 +      int rc = 0;
 +      if (bp->flags & USING_MSIX_FLAG) {
 +              rc = bnx2x_req_msix_irqs(bp);
 +              if (rc)
 +                      return rc;
 +      } else {
 +              bnx2x_ack_int(bp);
 +              rc = bnx2x_req_irq(bp);
 +              if (rc) {
 +                      BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
 +                      return rc;
 +              }
 +              if (bp->flags & USING_MSI_FLAG) {
 +                      bp->dev->irq = bp->pdev->irq;
 +                      netdev_info(bp->dev, "using MSI  IRQ %d\n",
 +                             bp->pdev->irq);
 +              }
 +      }
 +
 +      return 0;
 +}
 +
 +static inline void bnx2x_napi_enable(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_rx_queue(bp, i)
 +              napi_enable(&bnx2x_fp(bp, i, napi));
 +}
 +
 +static inline void bnx2x_napi_disable(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for_each_rx_queue(bp, i)
 +              napi_disable(&bnx2x_fp(bp, i, napi));
 +}
&nbs