Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
David S. Miller [Wed, 9 Feb 2011 01:19:01 +0000 (17:19 -0800)]
Conflicts:
drivers/net/e1000e/netdev.c

14 files changed:
1  2 
drivers/net/bnx2x/bnx2x_main.c
drivers/net/e1000e/netdev.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/wl1251/main.c
net/batman-adv/unicast.c
net/core/dev.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/status.c
net/mac80211/tx.c

@@@ -586,7 -586,7 +586,7 @@@ static int bnx2x_issue_dmae_with_comp(s
           bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  
        /* lock the dmae channel */
 -      mutex_lock(&bp->dmae_mutex);
 +      spin_lock_bh(&bp->dmae_lock);
  
        /* reset completion */
        *wb_comp = 0;
           bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  
  unlock:
 -      mutex_unlock(&bp->dmae_mutex);
 +      spin_unlock_bh(&bp->dmae_lock);
        return rc;
  }
  
@@@ -1397,7 -1397,7 +1397,7 @@@ void bnx2x_sp_event(struct bnx2x_fastpa
        }
  
        smp_mb__before_atomic_inc();
 -      atomic_inc(&bp->spq_left);
 +      atomic_inc(&bp->cq_spq_left);
        /* push the change in fp->state and towards the memory */
        smp_wmb();
  
@@@ -2473,14 -2473,8 +2473,14 @@@ static void bnx2x_pf_rx_cl_prep(struct 
        rxq_init->sge_map = fp->rx_sge_mapping;
        rxq_init->rcq_map = fp->rx_comp_mapping;
        rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
 -      rxq_init->mtu = bp->dev->mtu;
 -      rxq_init->buf_sz = bp->rx_buf_size;
 +
 +      /* Always use mini-jumbo MTU for FCoE L2 ring */
 +      if (IS_FCOE_FP(fp))
 +              rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
 +      else
 +              rxq_init->mtu = bp->dev->mtu;
 +
 +      rxq_init->buf_sz = fp->rx_buf_size;
        rxq_init->cl_qzone_id = fp->cl_qzone_id;
        rxq_init->cl_id = fp->cl_id;
        rxq_init->spcl_id = fp->cl_id;
@@@ -2732,18 -2726,11 +2732,18 @@@ int bnx2x_sp_post(struct bnx2x *bp, in
  
        spin_lock_bh(&bp->spq_lock);
  
 -      if (!atomic_read(&bp->spq_left)) {
 -              BNX2X_ERR("BUG! SPQ ring full!\n");
 -              spin_unlock_bh(&bp->spq_lock);
 -              bnx2x_panic();
 -              return -EBUSY;
 +      if (common) {
 +              if (!atomic_read(&bp->eq_spq_left)) {
 +                      BNX2X_ERR("BUG! EQ ring full!\n");
 +                      spin_unlock_bh(&bp->spq_lock);
 +                      bnx2x_panic();
 +                      return -EBUSY;
 +              }
 +      } else if (!atomic_read(&bp->cq_spq_left)) {
 +                      BNX2X_ERR("BUG! SPQ ring full!\n");
 +                      spin_unlock_bh(&bp->spq_lock);
 +                      bnx2x_panic();
 +                      return -EBUSY;
        }
  
        spe = bnx2x_sp_get_next(bp);
        spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
  
        /* stats ramrod has it's own slot on the spq */
 -      if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
 +      if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
                /* It's ok if the actual decrement is issued towards the memory
                 * somewhere between the spin_lock and spin_unlock. Thus no
                 * more explict memory barrier is needed.
                 */
 -              atomic_dec(&bp->spq_left);
 +              if (common)
 +                      atomic_dec(&bp->eq_spq_left);
 +              else
 +                      atomic_dec(&bp->cq_spq_left);
 +      }
 +
  
        DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
           "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
 -         "type(0x%x) left %x\n",
 +         "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
           bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
           (u32)(U64_LO(bp->spq_mapping) +
           (void *)bp->spq_prod_bd - (void *)bp->spq), command,
 -         HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
 +         HW_CID(bp, cid), data_hi, data_lo, type,
 +         atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
  
        bnx2x_sp_prod_update(bp);
        spin_unlock_bh(&bp->spq_lock);
@@@ -3705,8 -3686,8 +3705,8 @@@ static void bnx2x_eq_int(struct bnx2x *
        sw_cons = bp->eq_cons;
        sw_prod = bp->eq_prod;
  
 -      DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
 -                      hw_cons, sw_cons, atomic_read(&bp->spq_left));
 +      DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->cq_spq_left %u\n",
 +                      hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
  
        for (; sw_cons != hw_cons;
              sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
                case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
                case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
                        DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
 -                      bp->set_mac_pending = 0;
 +                      if (elem->message.data.set_mac_event.echo)
 +                              bp->set_mac_pending = 0;
                        break;
  
                case (EVENT_RING_OPCODE_SET_MAC |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
                        DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
 -                      bp->set_mac_pending = 0;
 +                      if (elem->message.data.set_mac_event.echo)
 +                              bp->set_mac_pending = 0;
                        break;
                default:
                        /* unknown event log error and continue */
@@@ -3791,7 -3770,7 +3791,7 @@@ next_spqe
        } /* for */
  
        smp_mb__before_atomic_inc();
 -      atomic_add(spqe_cnt, &bp->spq_left);
 +      atomic_add(spqe_cnt, &bp->eq_spq_left);
  
        bp->eq_cons = sw_cons;
        bp->eq_prod = sw_prod;
@@@ -4224,7 -4203,7 +4224,7 @@@ void bnx2x_update_coalesce(struct bnx2
  static void bnx2x_init_sp_ring(struct bnx2x *bp)
  {
        spin_lock_init(&bp->spq_lock);
 -      atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
 +      atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
  
        bp->spq_prod_idx = 0;
        bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@@ -4249,9 -4228,6 +4249,9 @@@ static void bnx2x_init_eq_ring(struct b
        bp->eq_cons = 0;
        bp->eq_prod = NUM_EQ_DESC;
        bp->eq_cons_sb = BNX2X_EQ_INDEX;
 +      /* we want a warning message before it gets rought... */
 +      atomic_set(&bp->eq_spq_left,
 +              min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
  }
  
  static void bnx2x_init_ind_table(struct bnx2x *bp)
@@@ -4300,9 -4276,12 +4300,12 @@@ void bnx2x_set_storm_rx_mode(struct bnx
                def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
                                BNX2X_ACCEPT_MULTICAST;
  #ifdef BCM_CNIC
-               cl_id = bnx2x_fcoe(bp, cl_id);
-               bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-                                         BNX2X_ACCEPT_MULTICAST);
+               if (!NO_FCOE(bp)) {
+                       cl_id = bnx2x_fcoe(bp, cl_id);
+                       bnx2x_rxq_set_mac_filters(bp, cl_id,
+                                                 BNX2X_ACCEPT_UNICAST |
+                                                 BNX2X_ACCEPT_MULTICAST);
+               }
  #endif
                break;
  
                def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
                                BNX2X_ACCEPT_ALL_MULTICAST;
  #ifdef BCM_CNIC
-               cl_id = bnx2x_fcoe(bp, cl_id);
-               bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-                                         BNX2X_ACCEPT_MULTICAST);
+               /*
+                *  Prevent duplication of multicast packets by configuring FCoE
+                *  L2 Client to receive only matched unicast frames.
+                */
+               if (!NO_FCOE(bp)) {
+                       cl_id = bnx2x_fcoe(bp, cl_id);
+                       bnx2x_rxq_set_mac_filters(bp, cl_id,
+                                                 BNX2X_ACCEPT_UNICAST);
+               }
  #endif
                break;
  
        case BNX2X_RX_MODE_PROMISC:
                def_q_filters |= BNX2X_PROMISCUOUS_MODE;
  #ifdef BCM_CNIC
-               cl_id = bnx2x_fcoe(bp, cl_id);
-               bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-                                         BNX2X_ACCEPT_MULTICAST);
+               /*
+                *  Prevent packets duplication by configuring DROP_ALL for FCoE
+                *  L2 Client.
+                */
+               if (!NO_FCOE(bp)) {
+                       cl_id = bnx2x_fcoe(bp, cl_id);
+                       bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+               }
  #endif
                /* pass management unicast packets as well */
                llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@@ -5850,7 -5840,7 +5864,7 @@@ int bnx2x_init_hw(struct bnx2x *bp, u3
           BP_ABS_FUNC(bp), load_code);
  
        bp->dmae_ready = 0;
 -      mutex_init(&bp->dmae_mutex);
 +      spin_lock_init(&bp->dmae_lock);
        rc = bnx2x_gunzip_init(bp);
        if (rc)
                return rc;
@@@ -6185,14 -6175,12 +6199,14 @@@ static void bnx2x_set_mac_addr_gen(stru
        int ramrod_flags = WAIT_RAMROD_COMMON;
  
        bp->set_mac_pending = 1;
 -      smp_wmb();
  
        config->hdr.length = 1;
        config->hdr.offset = cam_offset;
        config->hdr.client_id = 0xff;
 -      config->hdr.reserved1 = 0;
 +      /* Mark the single MAC configuration ramrod as opposed to a
 +       * UC/MC list configuration).
 +       */
 +      config->hdr.echo = 1;
  
        /* primary MAC */
        config->config_table[0].msb_mac_addr =
           config->config_table[0].middle_mac_addr,
           config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
  
 +      mb();
 +
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
                      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@@ -6290,15 -6276,20 +6304,15 @@@ static u8 bnx2x_e1h_cam_offset(struct b
        if (CHIP_IS_E1H(bp))
                return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
        else if (CHIP_MODE_IS_4_PORT(bp))
 -              return BP_FUNC(bp) * 32  + rel_offset;
 +              return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
        else
 -              return BP_VN(bp) * 32  + rel_offset;
 +              return E2_FUNC_MAX * rel_offset + BP_VN(bp);
  }
  
  /**
   *  LLH CAM line allocations: currently only iSCSI and ETH macs are
   *  relevant. In addition, current implementation is tuned for a
   *  single ETH MAC.
 - *
 - *  When multiple unicast ETH MACs PF configuration in switch
 - *  independent mode is required (NetQ, multiple netdev MACs,
 - *  etc.), consider better utilisation of 16 per function MAC
 - *  entries in the LLH memory.
   */
  enum {
        LLH_CAM_ISCSI_ETH_LINE = 0,
@@@ -6373,37 -6364,14 +6387,37 @@@ void bnx2x_set_eth_mac(struct bnx2x *bp
                bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
        }
  }
 -static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
 +
 +static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
 +{
 +      return CHIP_REV_IS_SLOW(bp) ?
 +              (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
 +              (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
 +}
 +
 +/* set mc list, do not wait as wait implies sleep and
 + * set_rx_mode can be invoked from non-sleepable context.
 + *
 + * Instead we use the same ramrod data buffer each time we need
 + * to configure a list of addresses, and use the fact that the
 + * list of MACs is changed in an incremental way and that the
 + * function is called under the netif_addr_lock. A temporary
 + * inconsistent CAM configuration (possible in case of a very fast
 + * sequence of add/del/add on the host side) will shortly be
 + * restored by the handler of the last ramrod.
 + */
 +static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
  {
        int i = 0, old;
        struct net_device *dev = bp->dev;
 +      u8 offset = bnx2x_e1_cam_mc_offset(bp);
        struct netdev_hw_addr *ha;
        struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
        dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  
 +      if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
 +              return -EINVAL;
 +
        netdev_for_each_mc_addr(ha, dev) {
                /* copy mac */
                config_cmd->config_table[i].msb_mac_addr =
                }
        }
  
 +      wmb();
 +
        config_cmd->hdr.length = i;
        config_cmd->hdr.offset = offset;
        config_cmd->hdr.client_id = 0xff;
 -      config_cmd->hdr.reserved1 = 0;
 +      /* Mark that this ramrod doesn't use bp->set_mac_pending for
 +       * synchronization.
 +       */
 +      config_cmd->hdr.echo = 0;
  
 -      bp->set_mac_pending = 1;
 -      smp_wmb();
 +      mb();
  
 -      bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
 +      return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  }
 -static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
 +
 +void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
  {
        int i;
        struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
        dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
        int ramrod_flags = WAIT_RAMROD_COMMON;
 +      u8 offset = bnx2x_e1_cam_mc_offset(bp);
  
 -      bp->set_mac_pending = 1;
 -      smp_wmb();
 -
 -      for (i = 0; i < config_cmd->hdr.length; i++)
 +      for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
                SET_FLAG(config_cmd->config_table[i].flags,
                        MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
                        T_ETH_MAC_COMMAND_INVALIDATE);
  
 +      wmb();
 +
 +      config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
 +      config_cmd->hdr.offset = offset;
 +      config_cmd->hdr.client_id = 0xff;
 +      /* We'll wait for a completion this time... */
 +      config_cmd->hdr.echo = 1;
 +
 +      bp->set_mac_pending = 1;
 +
 +      mb();
 +
        bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
                      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  
  
  }
  
 +/* Accept one or more multicasts */
 +static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
 +{
 +      struct net_device *dev = bp->dev;
 +      struct netdev_hw_addr *ha;
 +      u32 mc_filter[MC_HASH_SIZE];
 +      u32 crc, bit, regidx;
 +      int i;
 +
 +      memset(mc_filter, 0, 4 * MC_HASH_SIZE);
 +
 +      netdev_for_each_mc_addr(ha, dev) {
 +              DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
 +                 bnx2x_mc_addr(ha));
 +
 +              crc = crc32c_le(0, bnx2x_mc_addr(ha),
 +                              ETH_ALEN);
 +              bit = (crc >> 24) & 0xff;
 +              regidx = bit >> 5;
 +              bit &= 0x1f;
 +              mc_filter[regidx] |= (1 << bit);
 +      }
 +
 +      for (i = 0; i < MC_HASH_SIZE; i++)
 +              REG_WR(bp, MC_HASH_OFFSET(bp, i),
 +                     mc_filter[i]);
 +
 +      return 0;
 +}
 +
 +void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
 +{
 +      int i;
 +
 +      for (i = 0; i < MC_HASH_SIZE; i++)
 +              REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
 +}
 +
  #ifdef BCM_CNIC
  /**
   * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@@ -6550,13 -6465,12 +6564,13 @@@ static int bnx2x_set_iscsi_eth_mac_addr
        u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
                BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
 +      u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
  
        /* Send a SET_MAC ramrod */
 -      bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
 +      bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
                               cam_offset, 0);
  
 -      bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
 +      bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
  
        return 0;
  }
@@@ -7198,15 -7112,20 +7212,15 @@@ void bnx2x_chip_cleanup(struct bnx2x *b
        /* Give HW time to discard old tx messages */
        msleep(1);
  
 -      if (CHIP_IS_E1(bp)) {
 -              /* invalidate mc list,
 -               * wait and poll (interrupts are off)
 -               */
 -              bnx2x_invlidate_e1_mc_list(bp);
 -              bnx2x_set_eth_mac(bp, 0);
 +      bnx2x_set_eth_mac(bp, 0);
  
 -      } else {
 -              REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
 +      bnx2x_invalidate_uc_list(bp);
  
 -              bnx2x_set_eth_mac(bp, 0);
 -
 -              for (i = 0; i < MC_HASH_SIZE; i++)
 -                      REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
 +      if (CHIP_IS_E1(bp))
 +              bnx2x_invalidate_e1_mc_list(bp);
 +      else {
 +              bnx2x_invalidate_e1h_mc_list(bp);
 +              REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
        }
  
  #ifdef BCM_CNIC
@@@ -8475,47 -8394,11 +8489,47 @@@ static void __devinit bnx2x_get_port_hw
                                                        bp->common.shmem2_base);
  }
  
 +#ifdef BCM_CNIC
 +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
 +{
 +      u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 +                              drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
 +      u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
 +                              drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
 +
 +      /* Get the number of maximum allowed iSCSI and FCoE connections */
 +      bp->cnic_eth_dev.max_iscsi_conn =
 +              (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
 +              BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
 +
 +      bp->cnic_eth_dev.max_fcoe_conn =
 +              (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
 +              BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
 +
 +      BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
 +                     bp->cnic_eth_dev.max_iscsi_conn,
 +                     bp->cnic_eth_dev.max_fcoe_conn);
 +
 +      /* If mamimum allowed number of connections is zero -
 +       * disable the feature.
 +       */
 +      if (!bp->cnic_eth_dev.max_iscsi_conn)
 +              bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 +
 +      if (!bp->cnic_eth_dev.max_fcoe_conn)
 +              bp->flags |= NO_FCOE_FLAG;
 +}
 +#endif
 +
  static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
  {
        u32 val, val2;
        int func = BP_ABS_FUNC(bp);
        int port = BP_PORT(bp);
 +#ifdef BCM_CNIC
 +      u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
 +      u8 *fip_mac = bp->fip_mac;
 +#endif
  
        if (BP_NOMCP(bp)) {
                BNX2X_ERROR("warning: random MAC workaround active\n");
                        bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
  
  #ifdef BCM_CNIC
 -              /* iSCSI NPAR MAC */
 +              /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
 +               * FCoE MAC then the appropriate feature should be disabled.
 +               */
                if (IS_MF_SI(bp)) {
                        u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
                        if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
                                                     iscsi_mac_addr_upper);
                                val = MF_CFG_RD(bp, func_ext_config[func].
                                                    iscsi_mac_addr_lower);
 -                              bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
 -                      }
 +                              BNX2X_DEV_INFO("Read iSCSI MAC: "
 +                                             "0x%x:0x%04x\n", val2, val);
 +                              bnx2x_set_mac_buf(iscsi_mac, val, val2);
 +
 +                              /* Disable iSCSI OOO if MAC configuration is
 +                               * invalid.
 +                               */
 +                              if (!is_valid_ether_addr(iscsi_mac)) {
 +                                      bp->flags |= NO_ISCSI_OOO_FLAG |
 +                                                   NO_ISCSI_FLAG;
 +                                      memset(iscsi_mac, 0, ETH_ALEN);
 +                              }
 +                      } else
 +                              bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
 +
 +                      if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
 +                              val2 = MF_CFG_RD(bp, func_ext_config[func].
 +                                                   fcoe_mac_addr_upper);
 +                              val = MF_CFG_RD(bp, func_ext_config[func].
 +                                                  fcoe_mac_addr_lower);
 +                              BNX2X_DEV_INFO("Read FCoE MAC to "
 +                                             "0x%x:0x%04x\n", val2, val);
 +                              bnx2x_set_mac_buf(fip_mac, val, val2);
 +
 +                              /* Disable FCoE if MAC configuration is
 +                               * invalid.
 +                               */
 +                              if (!is_valid_ether_addr(fip_mac)) {
 +                                      bp->flags |= NO_FCOE_FLAG;
 +                                      memset(bp->fip_mac, 0, ETH_ALEN);
 +                              }
 +                      } else
 +                              bp->flags |= NO_FCOE_FLAG;
                }
  #endif
        } else {
                                    iscsi_mac_upper);
                val = SHMEM_RD(bp, dev_info.port_hw_config[port].
                                   iscsi_mac_lower);
 -              bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
 +              bnx2x_set_mac_buf(iscsi_mac, val, val2);
  #endif
        }
  
        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  
  #ifdef BCM_CNIC
 -      /* Inform the upper layers about FCoE MAC */
 +      /* Set the FCoE MAC in modes other then MF_SI */
        if (!CHIP_IS_E1x(bp)) {
                if (IS_MF_SD(bp))
 -                      memcpy(bp->fip_mac, bp->dev->dev_addr,
 -                             sizeof(bp->fip_mac));
 -              else
 -                      memcpy(bp->fip_mac, bp->iscsi_mac,
 -                             sizeof(bp->fip_mac));
 +                      memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
 +              else if (!IS_MF(bp))
 +                      memcpy(fip_mac, iscsi_mac, ETH_ALEN);
        }
  #endif
  }
@@@ -8760,10 -8612,6 +8774,10 @@@ static int __devinit bnx2x_get_hwinfo(s
        /* Get MAC addresses */
        bnx2x_get_mac_hwinfo(bp);
  
 +#ifdef BCM_CNIC
 +      bnx2x_get_cnic_info(bp);
 +#endif
 +
        return rc;
  }
  
@@@ -8978,197 -8826,12 +8992,197 @@@ static int bnx2x_close(struct net_devic
        return 0;
  }
  
 +#define E1_MAX_UC_LIST        29
 +#define E1H_MAX_UC_LIST       30
 +#define E2_MAX_UC_LIST        14
 +static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E1(bp))
 +              return E1_MAX_UC_LIST;
 +      else if (CHIP_IS_E1H(bp))
 +              return E1H_MAX_UC_LIST;
 +      else
 +              return E2_MAX_UC_LIST;
 +}
 +
 +
 +static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
 +{
 +      if (CHIP_IS_E1(bp))
 +              /* CAM Entries for Port0:
 +               *      0 - prim ETH MAC
 +               *      1 - BCAST MAC
 +               *      2 - iSCSI L2 ring ETH MAC
 +               *      3-31 - UC MACs
 +               *
 +               * Port1 entries are allocated the same way starting from
 +               * entry 32.
 +               */
 +              return 3 + 32 * BP_PORT(bp);
 +      else if (CHIP_IS_E1H(bp)) {
 +              /* CAM Entries:
 +               *      0-7  - prim ETH MAC for each function
 +               *      8-15 - iSCSI L2 ring ETH MAC for each function
 +               *      16 till 255 UC MAC lists for each function
 +               *
 +               * Remark: There is no FCoE support for E1H, thus FCoE related
 +               *         MACs are not considered.
 +               */
 +              return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
 +                      bnx2x_max_uc_list(bp) * BP_FUNC(bp);
 +      } else {
 +              /* CAM Entries (there is a separate CAM per engine):
 +               *      0-4  - prim ETH MAC for each function
 +               *      4-7 - iSCSI L2 ring ETH MAC for each function
 +               *      8-11 - FIP ucast L2 MAC for each function
 +               *      12-15 - ALL_ENODE_MACS mcast MAC for each function
 +               *      16 till 71 UC MAC lists for each function
 +               */
 +              u8 func_idx =
 +                      (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
 +
 +              return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
 +                      bnx2x_max_uc_list(bp) * func_idx;
 +      }
 +}
 +
 +/* set uc list, do not wait as wait implies sleep and
 + * set_rx_mode can be invoked from non-sleepable context.
 + *
 + * Instead we use the same ramrod data buffer each time we need
 + * to configure a list of addresses, and use the fact that the
 + * list of MACs is changed in an incremental way and that the
 + * function is called under the netif_addr_lock. A temporary
 + * inconsistent CAM configuration (possible in case of very fast
 + * sequence of add/del/add on the host side) will shortly be
 + * restored by the handler of the last ramrod.
 + */
 +static int bnx2x_set_uc_list(struct bnx2x *bp)
 +{
 +      int i = 0, old;
 +      struct net_device *dev = bp->dev;
 +      u8 offset = bnx2x_uc_list_cam_offset(bp);
 +      struct netdev_hw_addr *ha;
 +      struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
 +      dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
 +
 +      if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
 +              return -EINVAL;
 +
 +      netdev_for_each_uc_addr(ha, dev) {
 +              /* copy mac */
 +              config_cmd->config_table[i].msb_mac_addr =
 +                      swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
 +              config_cmd->config_table[i].middle_mac_addr =
 +                      swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
 +              config_cmd->config_table[i].lsb_mac_addr =
 +                      swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
 +
 +              config_cmd->config_table[i].vlan_id = 0;
 +              config_cmd->config_table[i].pf_id = BP_FUNC(bp);
 +              config_cmd->config_table[i].clients_bit_vector =
 +                      cpu_to_le32(1 << BP_L_ID(bp));
 +
 +              SET_FLAG(config_cmd->config_table[i].flags,
 +                      MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 +                      T_ETH_MAC_COMMAND_SET);
 +
 +              DP(NETIF_MSG_IFUP,
 +                 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
 +                 config_cmd->config_table[i].msb_mac_addr,
 +                 config_cmd->config_table[i].middle_mac_addr,
 +                 config_cmd->config_table[i].lsb_mac_addr);
 +
 +              i++;
 +
 +              /* Set uc MAC in NIG */
 +              bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
 +                                   LLH_CAM_ETH_LINE + i);
 +      }
 +      old = config_cmd->hdr.length;
 +      if (old > i) {
 +              for (; i < old; i++) {
 +                      if (CAM_IS_INVALID(config_cmd->
 +                                         config_table[i])) {
 +                              /* already invalidated */
 +                              break;
 +                      }
 +                      /* invalidate */
 +                      SET_FLAG(config_cmd->config_table[i].flags,
 +                              MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 +                              T_ETH_MAC_COMMAND_INVALIDATE);
 +              }
 +      }
 +
 +      wmb();
 +
 +      config_cmd->hdr.length = i;
 +      config_cmd->hdr.offset = offset;
 +      config_cmd->hdr.client_id = 0xff;
 +      /* Mark that this ramrod doesn't use bp->set_mac_pending for
 +       * synchronization.
 +       */
 +      config_cmd->hdr.echo = 0;
 +
 +      mb();
 +
 +      return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
 +                 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 +
 +}
 +
 +void bnx2x_invalidate_uc_list(struct bnx2x *bp)
 +{
 +      int i;
 +      struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
 +      dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
 +      int ramrod_flags = WAIT_RAMROD_COMMON;
 +      u8 offset = bnx2x_uc_list_cam_offset(bp);
 +      u8 max_list_size = bnx2x_max_uc_list(bp);
 +
 +      for (i = 0; i < max_list_size; i++) {
 +              SET_FLAG(config_cmd->config_table[i].flags,
 +                      MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 +                      T_ETH_MAC_COMMAND_INVALIDATE);
 +              bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
 +      }
 +
 +      wmb();
 +
 +      config_cmd->hdr.length = max_list_size;
 +      config_cmd->hdr.offset = offset;
 +      config_cmd->hdr.client_id = 0xff;
 +      /* We'll wait for a completion this time... */
 +      config_cmd->hdr.echo = 1;
 +
 +      bp->set_mac_pending = 1;
 +
 +      mb();
 +
 +      bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
 +                    U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 +
 +      /* Wait for a completion */
 +      bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
 +                              ramrod_flags);
 +
 +}
 +
 +static inline int bnx2x_set_mc_list(struct bnx2x *bp)
 +{
 +      /* some multicasts */
 +      if (CHIP_IS_E1(bp)) {
 +              return bnx2x_set_e1_mc_list(bp);
 +      } else { /* E1H and newer */
 +              return bnx2x_set_e1h_mc_list(bp);
 +      }
 +}
 +
  /* called with netif_tx_lock from dev_mcast.c */
  void bnx2x_set_rx_mode(struct net_device *dev)
  {
        struct bnx2x *bp = netdev_priv(dev);
        u32 rx_mode = BNX2X_RX_MODE_NORMAL;
 -      int port = BP_PORT(bp);
  
        if (bp->state != BNX2X_STATE_OPEN) {
                DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  
        if (dev->flags & IFF_PROMISC)
                rx_mode = BNX2X_RX_MODE_PROMISC;
 -      else if ((dev->flags & IFF_ALLMULTI) ||
 -               ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
 -                CHIP_IS_E1(bp)))
 +      else if (dev->flags & IFF_ALLMULTI)
                rx_mode = BNX2X_RX_MODE_ALLMULTI;
 -      else { /* some multicasts */
 -              if (CHIP_IS_E1(bp)) {
 -                      /*
 -                       * set mc list, do not wait as wait implies sleep
 -                       * and set_rx_mode can be invoked from non-sleepable
 -                       * context
 -                       */
 -                      u8 offset = (CHIP_REV_IS_SLOW(bp) ?
 -                                   BNX2X_MAX_EMUL_MULTI*(1 + port) :
 -                                   BNX2X_MAX_MULTICAST*(1 + port));
 -
 -                      bnx2x_set_e1_mc_list(bp, offset);
 -              } else { /* E1H */
 -                      /* Accept one or more multicasts */
 -                      struct netdev_hw_addr *ha;
 -                      u32 mc_filter[MC_HASH_SIZE];
 -                      u32 crc, bit, regidx;
 -                      int i;
 -
 -                      memset(mc_filter, 0, 4 * MC_HASH_SIZE);
 -
 -                      netdev_for_each_mc_addr(ha, dev) {
 -                              DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
 -                                 bnx2x_mc_addr(ha));
 -
 -                              crc = crc32c_le(0, bnx2x_mc_addr(ha),
 -                                              ETH_ALEN);
 -                              bit = (crc >> 24) & 0xff;
 -                              regidx = bit >> 5;
 -                              bit &= 0x1f;
 -                              mc_filter[regidx] |= (1 << bit);
 -                      }
 +      else {
 +              /* some multicasts */
 +              if (bnx2x_set_mc_list(bp))
 +                      rx_mode = BNX2X_RX_MODE_ALLMULTI;
  
 -                      for (i = 0; i < MC_HASH_SIZE; i++)
 -                              REG_WR(bp, MC_HASH_OFFSET(bp, i),
 -                                     mc_filter[i]);
 -              }
 +              /* some unicasts */
 +              if (bnx2x_set_uc_list(bp))
 +                      rx_mode = BNX2X_RX_MODE_PROMISC;
        }
  
        bp->rx_mode = rx_mode;
@@@ -9269,7 -8963,7 +9283,7 @@@ static const struct net_device_ops bnx2
        .ndo_stop               = bnx2x_close,
        .ndo_start_xmit         = bnx2x_start_xmit,
        .ndo_select_queue       = bnx2x_select_queue,
 -      .ndo_set_multicast_list = bnx2x_set_rx_mode,
 +      .ndo_set_rx_mode        = bnx2x_set_rx_mode,
        .ndo_set_mac_address    = bnx2x_change_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_do_ioctl           = bnx2x_ioctl,
@@@ -10095,21 -9789,15 +10109,21 @@@ static void bnx2x_cnic_sp_post(struct b
                                        HW_CID(bp, BNX2X_ISCSI_ETH_CID));
                }
  
 -              /* There may be not more than 8 L2 and COMMON SPEs and not more
 -               * than 8 L5 SPEs in the air.
 +              /* There may be not more than 8 L2 and not more than 8 L5 SPEs
 +               * We also check that the number of outstanding
 +               * COMMON ramrods is not more than the EQ and SPQ can
 +               * accommodate.
                 */
 -              if ((type == NONE_CONNECTION_TYPE) ||
 -                  (type == ETH_CONNECTION_TYPE)) {
 -                      if (!atomic_read(&bp->spq_left))
 +              if (type == ETH_CONNECTION_TYPE) {
 +                      if (!atomic_read(&bp->cq_spq_left))
                                break;
                        else
 -                              atomic_dec(&bp->spq_left);
 +                              atomic_dec(&bp->cq_spq_left);
 +              } else if (type == NONE_CONNECTION_TYPE) {
 +                      if (!atomic_read(&bp->eq_spq_left))
 +                              break;
 +                      else
 +                              atomic_dec(&bp->eq_spq_left);
                } else if ((type == ISCSI_CONNECTION_TYPE) ||
                           (type == FCOE_CONNECTION_TYPE)) {
                        if (bp->cnic_spq_pending >=
@@@ -10187,8 -9875,7 +10201,8 @@@ static int bnx2x_cnic_ctl_send(struct b
        int rc = 0;
  
        mutex_lock(&bp->cnic_mutex);
 -      c_ops = bp->cnic_ops;
 +      c_ops = rcu_dereference_protected(bp->cnic_ops,
 +                                        lockdep_is_held(&bp->cnic_mutex));
        if (c_ops)
                rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
        mutex_unlock(&bp->cnic_mutex);
@@@ -10302,7 -9989,7 +10316,7 @@@ static int bnx2x_drv_ctl(struct net_dev
                int count = ctl->data.credit.credit_count;
  
                smp_mb__before_atomic_inc();
 -              atomic_add(count, &bp->spq_left);
 +              atomic_add(count, &bp->cq_spq_left);
                smp_mb__after_atomic_inc();
                break;
        }
@@@ -10398,13 -10085,6 +10412,13 @@@ struct cnic_eth_dev *bnx2x_cnic_probe(s
        struct bnx2x *bp = netdev_priv(dev);
        struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  
 +      /* If both iSCSI and FCoE are disabled - return NULL in
 +       * order to indicate CNIC that it should not try to work
 +       * with this device.
 +       */
 +      if (NO_ISCSI(bp) && NO_FCOE(bp))
 +              return NULL;
 +
        cp->drv_owner = THIS_MODULE;
        cp->chip_id = CHIP_ID(bp);
        cp->pdev = bp->pdev;
                BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
        cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
  
 +      if (NO_ISCSI_OOO(bp))
 +              cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
 +
 +      if (NO_ISCSI(bp))
 +              cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
 +
 +      if (NO_FCOE(bp))
 +              cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
 +
        DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
                         "starting cid %d\n",
           cp->ctx_blk_size,
@@@ -900,6 -900,8 +900,6 @@@ next_desc
  
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
 -      netdev->stats.rx_bytes += total_rx_bytes;
 -      netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
  }
  
@@@ -1055,6 -1057,8 +1055,6 @@@ static bool e1000_clean_tx_irq(struct e
        }
        adapter->total_tx_bytes += total_tx_bytes;
        adapter->total_tx_packets += total_tx_packets;
 -      netdev->stats.tx_bytes += total_tx_bytes;
 -      netdev->stats.tx_packets += total_tx_packets;
        return count < tx_ring->count;
  }
  
@@@ -1241,6 -1245,8 +1241,6 @@@ next_desc
  
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
 -      netdev->stats.rx_bytes += total_rx_bytes;
 -      netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
  }
  
@@@ -1420,6 -1426,8 +1420,6 @@@ next_desc
  
        adapter->total_rx_bytes += total_rx_bytes;
        adapter->total_rx_packets += total_rx_packets;
 -      netdev->stats.rx_bytes += total_rx_bytes;
 -      netdev->stats.rx_packets += total_rx_packets;
        return cleaned;
  }
  
@@@ -2720,6 -2728,7 +2720,6 @@@ static void e1000_setup_rctl(struct e10
  {
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl, rfctl;
 -      u32 psrctl = 0;
        u32 pages = 0;
  
        /* Workaround Si errata on 82579 - configure jumbo frame flow */
                adapter->rx_ps_pages = 0;
  
        if (adapter->rx_ps_pages) {
 +              u32 psrctl = 0;
 +
                /* Configure extra packet-split registers */
                rfctl = er32(RFCTL);
                rfctl |= E1000_RFCTL_EXTEN;
@@@ -3021,6 -3028,7 +3021,6 @@@ static void e1000_set_multi(struct net_
        struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
 -      int i;
  
        /* Check for Promiscuous and All Multicast modes */
  
        ew32(RCTL, rctl);
  
        if (!netdev_mc_empty(netdev)) {
 +              int i = 0;
 +
                mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
                if (!mta_list)
                        return;
  
                /* prepare a packed array of only addresses. */
 -              i = 0;
                netdev_for_each_mc_addr(ha, netdev)
                        memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  
@@@ -3331,8 -3338,6 +3331,8 @@@ int e1000e_up(struct e1000_adapter *ada
        return 0;
  }
  
 +static void e1000e_update_stats(struct e1000_adapter *adapter);
 +
  void e1000e_down(struct e1000_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
        del_timer_sync(&adapter->phy_info_timer);
  
        netif_carrier_off(netdev);
 +
 +      spin_lock(&adapter->stats64_lock);
 +      e1000e_update_stats(adapter);
 +      spin_unlock(&adapter->stats64_lock);
 +
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
  
@@@ -3413,8 -3413,6 +3413,8 @@@ static int __devinit e1000_sw_init(stru
        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  
 +      spin_lock_init(&adapter->stats64_lock);
 +
        e1000e_set_interrupt_capability(adapter);
  
        if (e1000_alloc_queues(adapter))
@@@ -3888,7 -3886,7 +3888,7 @@@ release
   * e1000e_update_stats - Update the board statistics counters
   * @adapter: board private structure
   **/
 -void e1000e_update_stats(struct e1000_adapter *adapter)
 +static void e1000e_update_stats(struct e1000_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
@@@ -4000,11 -3998,10 +4000,11 @@@ static void e1000_phy_read_status(struc
  {
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_phy_regs *phy = &adapter->phy_regs;
  
        if ((er32(STATUS) & E1000_STATUS_LU) &&
            (adapter->hw.phy.media_type == e1000_media_type_copper)) {
 +              int ret_val;
 +
                ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
                ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
                ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@@ -4150,6 -4147,7 +4150,6 @@@ static void e1000_watchdog_task(struct 
        struct e1000_ring *tx_ring = adapter->tx_ring;
        struct e1000_hw *hw = &adapter->hw;
        u32 link, tctl;
 -      int tx_pending = 0;
  
        link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
        }
  
  link_up:
 +      spin_lock(&adapter->stats64_lock);
        e1000e_update_stats(adapter);
 +      spin_unlock(&adapter->stats64_lock);
  
        mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
        adapter->tpt_old = adapter->stats.tpt;
  
        e1000e_update_adaptive(&adapter->hw);
  
 -      if (!netif_carrier_ok(netdev)) {
 -              tx_pending = (e1000_desc_unused(tx_ring) + 1 <
 -                             tx_ring->count);
 -              if (tx_pending) {
 -                      /*
 -                       * We've lost link, so the controller stops DMA,
 -                       * but we've got queued Tx work that's never going
 -                       * to get done, so reset controller to flush Tx.
 -                       * (Do the reset outside of interrupt context).
 -                       */
 -                      schedule_work(&adapter->reset_task);
 -                      /* return immediately since reset is imminent */
 -                      return;
 -              }
 +      if (!netif_carrier_ok(netdev) &&
 +          (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
 +              /*
 +               * We've lost link, so the controller stops DMA,
 +               * but we've got queued Tx work that's never going
 +               * to get done, so reset controller to flush Tx.
 +               * (Do the reset outside of interrupt context).
 +               */
-               adapter->tx_timeout_count++;
 +              schedule_work(&adapter->reset_task);
 +              /* return immediately since reset is imminent */
 +              return;
        }
  
        /* Simple mode for Interrupt Throttle Rate (ITR) */
@@@ -4385,13 -4383,13 +4384,13 @@@ static int e1000_tso(struct e1000_adapt
        u32 cmd_length = 0;
        u16 ipcse = 0, tucse, mss;
        u8 ipcss, ipcso, tucss, tucso, hdr_len;
 -      int err;
  
        if (!skb_is_gso(skb))
                return 0;
  
        if (skb_header_cloned(skb)) {
 -              err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 +              int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
 +
                if (err)
                        return err;
        }
@@@ -4898,55 -4896,16 +4897,55 @@@ static void e1000_reset_task(struct wor
  }
  
  /**
 - * e1000_get_stats - Get System Network Statistics
 + * e1000_get_stats64 - Get System Network Statistics
   * @netdev: network interface device structure
 + * @stats: rtnl_link_stats64 pointer
   *
   * Returns the address of the device statistics structure.
 - * The statistics are actually updated from the timer callback.
   **/
 -static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
 +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
 +                                             struct rtnl_link_stats64 *stats)
  {
 -      /* only return the current stats */
 -      return &netdev->stats;
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      memset(stats, 0, sizeof(struct rtnl_link_stats64));
 +      spin_lock(&adapter->stats64_lock);
 +      e1000e_update_stats(adapter);
 +      /* Fill out the OS statistics structure */
 +      stats->rx_bytes = adapter->stats.gorc;
 +      stats->rx_packets = adapter->stats.gprc;
 +      stats->tx_bytes = adapter->stats.gotc;
 +      stats->tx_packets = adapter->stats.gptc;
 +      stats->multicast = adapter->stats.mprc;
 +      stats->collisions = adapter->stats.colc;
 +
 +      /* Rx Errors */
 +
 +      /*
 +       * RLEC on some newer hardware can be incorrect so build
 +       * our own version based on RUC and ROC
 +       */
 +      stats->rx_errors = adapter->stats.rxerrc +
 +              adapter->stats.crcerrs + adapter->stats.algnerrc +
 +              adapter->stats.ruc + adapter->stats.roc +
 +              adapter->stats.cexterr;
 +      stats->rx_length_errors = adapter->stats.ruc +
 +                                            adapter->stats.roc;
 +      stats->rx_crc_errors = adapter->stats.crcerrs;
 +      stats->rx_frame_errors = adapter->stats.algnerrc;
 +      stats->rx_missed_errors = adapter->stats.mpc;
 +
 +      /* Tx Errors */
 +      stats->tx_errors = adapter->stats.ecol +
 +                                     adapter->stats.latecol;
 +      stats->tx_aborted_errors = adapter->stats.ecol;
 +      stats->tx_window_errors = adapter->stats.latecol;
 +      stats->tx_carrier_errors = adapter->stats.tncrs;
 +
 +      /* Tx Dropped needs to be maintained elsewhere */
 +
 +      spin_unlock(&adapter->stats64_lock);
 +      return stats;
  }
  
  /**
@@@ -5516,10 -5475,9 +5515,10 @@@ static irqreturn_t e1000_intr_msix(int 
  {
        struct net_device *netdev = data;
        struct e1000_adapter *adapter = netdev_priv(netdev);
 -      int vector, msix_irq;
  
        if (adapter->msix_entries) {
 +              int vector, msix_irq;
 +
                vector = 0;
                msix_irq = adapter->msix_entries[vector].vector;
                disable_irq(msix_irq);
@@@ -5716,7 -5674,7 +5715,7 @@@ static const struct net_device_ops e100
        .ndo_open               = e1000_open,
        .ndo_stop               = e1000_close,
        .ndo_start_xmit         = e1000_xmit_frame,
 -      .ndo_get_stats          = e1000_get_stats,
 +      .ndo_get_stats64        = e1000e_get_stats64,
        .ndo_set_multicast_list = e1000_set_multi,
        .ndo_set_mac_address    = e1000_set_mac,
        .ndo_change_mtu         = e1000_change_mtu,
@@@ -95,9 -95,9 +95,9 @@@ struct ath_config 
   * @BUF_XRETRY: To denote excessive retries of the buffer
   */
  enum buffer_type {
 -      BUF_AMPDU               = BIT(2),
 -      BUF_AGGR                = BIT(3),
 -      BUF_XRETRY              = BIT(5),
 +      BUF_AMPDU               = BIT(0),
 +      BUF_AGGR                = BIT(1),
 +      BUF_XRETRY              = BIT(2),
  };
  
  #define bf_isampdu(bf)                (bf->bf_state.bf_type & BUF_AMPDU)
@@@ -137,6 -137,7 +137,6 @@@ void ath_descdma_cleanup(struct ath_sof
         (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
         WME_AC_VO)
  
 -#define ADDBA_EXCHANGE_ATTEMPTS    10
  #define ATH_AGGR_DELIM_SZ          4
  #define ATH_AGGR_MINPLEN           256 /* in bytes, minimum packet length */
  /* number of delimiters for encryption padding */
@@@ -183,8 -184,7 +183,8 @@@ enum ATH_AGGR_STATUS 
  
  #define ATH_TXFIFO_DEPTH 8
  struct ath_txq {
 -      u32 axq_qnum;
 +      int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
 +      u32 axq_qnum; /* ath9k hardware queue number */
        u32 *axq_link;
        struct list_head axq_q;
        spinlock_t axq_lock;
@@@ -218,6 -218,7 +218,7 @@@ struct ath_frame_info 
  struct ath_buf_state {
        u8 bf_type;
        u8 bfs_paprd;
+       unsigned long bfs_paprd_timestamp;
        enum ath9k_internal_frame_type bfs_ftype;
  };
  
@@@ -233,6 -234,7 +234,6 @@@ struct ath_buf 
        bool bf_stale;
        u16 bf_flags;
        struct ath_buf_state bf_state;
 -      struct ath_wiphy *aphy;
  };
  
  struct ath_atx_tid {
  };
  
  struct ath_node {
 -      struct ath_common *common;
 +#ifdef CONFIG_ATH9K_DEBUGFS
 +      struct list_head list; /* for sc->nodes */
 +      struct ieee80211_sta *sta; /* station struct we're part of */
 +#endif
        struct ath_atx_tid tid[WME_NUM_TID];
        struct ath_atx_ac ac[WME_NUM_AC];
        u16 maxampdu;
@@@ -279,11 -278,6 +280,11 @@@ struct ath_tx_control 
  #define ATH_TX_XRETRY       0x02
  #define ATH_TX_BAR          0x04
  
 +/**
 + * @txq_map:  Index is mac80211 queue number.  This is
 + *  not necessarily the same as the hardware queue number
 + *  (axq_qnum).
 + */
  struct ath_tx {
        u16 seq_no;
        u32 txqsetup;
@@@ -310,8 -304,6 +311,8 @@@ struct ath_rx 
        struct ath_descdma rxdma;
        struct ath_buf *rx_bufptr;
        struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
 +
 +      struct sk_buff *frag;
  };
  
  int ath_startrecv(struct ath_softc *sc);
@@@ -351,6 -343,7 +352,6 @@@ struct ath_vif 
        __le64 tsf_adjust; /* TSF adjustment for staggered beacons */
        enum nl80211_iftype av_opmode;
        struct ath_buf *av_bcbuf;
 -      struct ath_tx_control av_btxctl;
        u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
  };
  
@@@ -389,6 -382,7 +390,6 @@@ struct ath_beacon 
        u32 ast_be_xmit;
        u64 bc_tstamp;
        struct ieee80211_vif *bslot[ATH_BCBUF];
 -      struct ath_wiphy *bslot_aphy[ATH_BCBUF];
        int slottime;
        int slotupdate;
        struct ath9k_tx_queue_info beacon_qi;
  
  void ath_beacon_tasklet(unsigned long data);
  void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
 -int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
 +int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
  void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
  int ath_beaconq_config(struct ath_softc *sc);
  
@@@ -536,6 -530,7 +537,6 @@@ struct ath_ant_comb 
  #define ATH_CABQ_READY_TIME     80      /* % of beacon interval */
  #define ATH_MAX_SW_RETRIES      10
  #define ATH_CHAN_MAX            255
 -#define IEEE80211_WEP_NKID      4       /* number of key ids */
  
  #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
  #define ATH_RATE_DUMMY_MARKER   0
  #define PS_WAIT_FOR_TX_ACK        BIT(3)
  #define PS_BEACON_SYNC            BIT(4)
  
 -struct ath_wiphy;
  struct ath_rate_table;
  
 +struct ath9k_vif_iter_data {
 +      const u8 *hw_macaddr; /* phy's hardware address, set
 +                             * before starting iteration for
 +                             * valid bssid mask.
 +                             */
 +      u8 mask[ETH_ALEN]; /* bssid mask */
 +      int naps;      /* number of AP vifs */
 +      int nmeshes;   /* number of mesh vifs */
 +      int nstations; /* number of station vifs */
 +      int nwds;      /* number of nwd vifs */
 +      int nadhocs;   /* number of adhoc vifs */
 +      int nothers;   /* number of vifs not specified above. */
 +};
 +
  struct ath_softc {
        struct ieee80211_hw *hw;
        struct device *dev;
  
 -      spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
 -      struct ath_wiphy *pri_wiphy;
 -      struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
 -                                     * have NULL entries */
 -      int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
        int chan_idx;
        int chan_is_ht;
 -      struct ath_wiphy *next_wiphy;
 -      struct work_struct chan_work;
 -      int wiphy_select_failures;
 -      unsigned long wiphy_select_first_fail;
 -      struct delayed_work wiphy_work;
 -      unsigned long wiphy_scheduler_int;
 -      int wiphy_scheduler_index;
        struct survey_info *cur_survey;
        struct survey_info survey[ATH9K_NUM_CHANNELS];
  
        struct work_struct paprd_work;
        struct work_struct hw_check_work;
        struct completion paprd_complete;
-       bool paprd_pending;
  
 +      unsigned int hw_busy_count;
 +
        u32 intrstatus;
        u32 sc_flags; /* SC_OP_* */
        u16 ps_flags; /* PS_* */
        u16 curtxpow;
 -      u8 nbcnvifs;
 -      u16 nvifs;
        bool ps_enabled;
        bool ps_idle;
 +      short nbcnvifs;
 +      short nvifs;
        unsigned long ps_usecount;
  
        struct ath_config config;
        int led_on_cnt;
        int led_off_cnt;
  
 +      struct ath9k_hw_cal_data caldata;
 +      int last_rssi;
 +
        int beacon_interval;
  
  #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath9k_debug debug;
 +      spinlock_t nodes_lock;
 +      struct list_head nodes; /* basically, stations */
 +      unsigned int tx_complete_poll_work_seen;
  #endif
        struct ath_beacon_config cur_beacon_conf;
        struct delayed_work tx_complete_work;
 +      struct delayed_work hw_pll_work;
        struct ath_btcoex btcoex;
  
        struct ath_descdma txsdma;
        struct pm_qos_request_list pm_qos_req;
  };
  
 -struct ath_wiphy {
 -      struct ath_softc *sc; /* shared for all virtual wiphys */
 -      struct ieee80211_hw *hw;
 -      struct ath9k_hw_cal_data caldata;
 -      enum ath_wiphy_state {
 -              ATH_WIPHY_INACTIVE,
 -              ATH_WIPHY_ACTIVE,
 -              ATH_WIPHY_PAUSING,
 -              ATH_WIPHY_PAUSED,
 -              ATH_WIPHY_SCAN,
 -      } state;
 -      bool idle;
 -      int chan_idx;
 -      int chan_is_ht;
 -      int last_rssi;
 -};
 -
  void ath9k_tasklet(unsigned long data);
  int ath_reset(struct ath_softc *sc, bool retry_tx);
  int ath_cabq_update(struct ath_softc *);
@@@ -674,13 -675,14 +674,13 @@@ int ath9k_init_device(u16 devid, struc
                    const struct ath_bus_ops *bus_ops);
  void ath9k_deinit_device(struct ath_softc *sc);
  void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
 -void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
 -                         struct ath9k_channel *ichan);
  int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan);
  
  void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
  void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
  bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
 +bool ath9k_uses_beacons(int type);
  
  #ifdef CONFIG_PCI
  int ath_pci_init(void);
@@@ -704,12 -706,26 +704,12 @@@ void ath9k_ps_restore(struct ath_softc 
  u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
  
  void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
  
  void ath_start_rfkill_poll(struct ath_softc *sc);
  extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
 +void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
 +                             struct ieee80211_vif *vif,
 +                             struct ath9k_vif_iter_data *iter_data);
 +
  
  #endif /* ATH9K_H */
  #include "ath9k.h"
  #include "btcoex.h"
  
 -static void ath_update_txpow(struct ath_softc *sc)
 -{
 -      struct ath_hw *ah = sc->sc_ah;
 -
 -      if (sc->curtxpow != sc->config.txpowlimit) {
 -              ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
 -              /* read back in case value is clamped */
 -              sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
 -      }
 -}
 -
  static u8 parse_mpdudensity(u8 mpdudensity)
  {
        /*
        }
  }
  
 -static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
 -                                              struct ieee80211_hw *hw)
 -{
 -      struct ieee80211_channel *curchan = hw->conf.channel;
 -      struct ath9k_channel *channel;
 -      u8 chan_idx;
 -
 -      chan_idx = curchan->hw_value;
 -      channel = &sc->sc_ah->channels[chan_idx];
 -      ath9k_update_ichannel(sc, hw, channel);
 -      return channel;
 -}
 -
  bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
  {
        unsigned long flags;
@@@ -153,12 -177,7 +153,12 @@@ static void ath_update_survey_nf(struc
        }
  }
  
 -static void ath_update_survey_stats(struct ath_softc *sc)
 +/*
 + * Updates the survey statistics and returns the busy time since last
 + * update in %, if the measurement duration was long enough for the
 + * result to be useful, -1 otherwise.
 + */
 +static int ath_update_survey_stats(struct ath_softc *sc)
  {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct survey_info *survey = &sc->survey[pos];
        struct ath_cycle_counters *cc = &common->cc_survey;
        unsigned int div = common->clockrate * 1000;
 +      int ret = 0;
  
        if (!ah->curchan)
 -              return;
 +              return -1;
  
        if (ah->power_mode == ATH9K_PM_AWAKE)
                ath_hw_cycle_counters_update(common);
                survey->channel_time_rx += cc->rx_frame / div;
                survey->channel_time_tx += cc->tx_frame / div;
        }
 +
 +      if (cc->cycles < div)
 +              return -1;
 +
 +      if (cc->cycles > 0)
 +              ret = cc->rx_busy * 100 / cc->cycles;
 +
        memset(cc, 0, sizeof(*cc));
  
        ath_update_survey_nf(sc, pos);
 +
 +      return ret;
  }
  
  /*
  int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
                    struct ath9k_channel *hchan)
  {
 -      struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &common->hw->conf;
        if (sc->sc_flags & SC_OP_INVALID)
                return -EIO;
  
 +      sc->hw_busy_count = 0;
 +
        del_timer_sync(&common->ani.timer);
        cancel_work_sync(&sc->paprd_work);
        cancel_work_sync(&sc->hw_check_work);
        cancel_delayed_work_sync(&sc->tx_complete_work);
 +      cancel_delayed_work_sync(&sc->hw_pll_work);
  
        ath9k_ps_wakeup(sc);
  
        if (!ath_stoprecv(sc))
                stopped = false;
  
 +      if (!ath9k_hw_check_alive(ah))
 +              stopped = false;
 +
        /* XXX: do not flush receive queue here. We don't want
         * to flush data frames already in queue because of
         * changing channel. */
                fastcc = false;
  
        if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
 -              caldata = &aphy->caldata;
 +              caldata = &sc->caldata;
  
        ath_dbg(common, ATH_DBG_CONFIG,
                "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
                goto ps_restore;
        }
  
 -      ath_update_txpow(sc);
 +      ath9k_cmn_update_txpow(ah, sc->curtxpow,
 +                             sc->config.txpowlimit, &sc->curtxpow);
        ath9k_hw_set_interrupts(ah, ah->imask);
  
        if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
                if (sc->sc_flags & SC_OP_BEACONS)
                        ath_beacon_config(sc, NULL);
                ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
 +              ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
                ath_start_ani(common);
        }
  
   ps_restore:
 +      ieee80211_wake_queues(hw);
 +
        spin_unlock_bh(&sc->sc_pcu_lock);
  
        ath9k_ps_restore(sc);
@@@ -342,7 -342,6 +342,6 @@@ static bool ath_paprd_send_frame(struc
        tx_info->control.rates[1].idx = -1;
  
        init_completion(&sc->paprd_complete);
-       sc->paprd_pending = true;
        txctl.paprd = BIT(chain);
  
        if (ath_tx_start(hw, skb, &txctl) != 0) {
  
        time_left = wait_for_completion_timeout(&sc->paprd_complete,
                        msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
-       sc->paprd_pending = false;
  
        if (!time_left)
                ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
@@@ -551,12 -549,6 +549,12 @@@ static void ath_node_attach(struct ath_
        struct ath_hw *ah = sc->sc_ah;
        an = (struct ath_node *)sta->drv_priv;
  
 +#ifdef CONFIG_ATH9K_DEBUGFS
 +      spin_lock(&sc->nodes_lock);
 +      list_add(&an->list, &sc->nodes);
 +      spin_unlock(&sc->nodes_lock);
 +      an->sta = sta;
 +#endif
        if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
                sc->sc_flags |= SC_OP_ENABLE_APM;
  
@@@ -572,13 -564,6 +570,13 @@@ static void ath_node_detach(struct ath_
  {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
  
 +#ifdef CONFIG_ATH9K_DEBUGFS
 +      spin_lock(&sc->nodes_lock);
 +      list_del(&an->list);
 +      spin_unlock(&sc->nodes_lock);
 +      an->sta = NULL;
 +#endif
 +
        if (sc->sc_flags & SC_OP_TXAGGR)
                ath_tx_node_cleanup(sc, an);
  }
  void ath_hw_check(struct work_struct *work)
  {
        struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
 -      int i;
 +      struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 +      unsigned long flags;
 +      int busy;
  
        ath9k_ps_wakeup(sc);
 +      if (ath9k_hw_check_alive(sc->sc_ah))
 +              goto out;
  
 -      for (i = 0; i < 3; i++) {
 -              if (ath9k_hw_check_alive(sc->sc_ah))
 -                      goto out;
 +      spin_lock_irqsave(&common->cc_lock, flags);
 +      busy = ath_update_survey_stats(sc);
 +      spin_unlock_irqrestore(&common->cc_lock, flags);
  
 -              msleep(1);
 -      }
 -      ath_reset(sc, true);
 +      ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
 +              "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
 +      if (busy >= 99) {
 +              if (++sc->hw_busy_count >= 3)
 +                      ath_reset(sc, true);
 +      } else if (busy >= 0)
 +              sc->hw_busy_count = 0;
  
  out:
        ath9k_ps_restore(sc);
@@@ -627,15 -604,7 +625,15 @@@ void ath9k_tasklet(unsigned long data
        ath9k_ps_wakeup(sc);
        spin_lock(&sc->sc_pcu_lock);
  
 -      if (!ath9k_hw_check_alive(ah))
 +      /*
 +       * Only run the baseband hang check if beacons stop working in AP or
 +       * IBSS mode, because it has a high false positive rate. For station
 +       * mode it should not be necessary, since the upper layers will detect
 +       * this through a beacon miss automatically and the following channel
 +       * change will trigger a hardware reset anyway
 +       */
 +      if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
 +          !ath9k_hw_check_alive(ah))
                ieee80211_queue_work(sc->hw, &sc->hw_check_work);
  
        if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@@ -814,11 -783,54 +812,11 @@@ chip_reset
  #undef SCHED_INTR
  }
  
 -static u32 ath_get_extchanmode(struct ath_softc *sc,
 -                             struct ieee80211_channel *chan,
 -                             enum nl80211_channel_type channel_type)
 -{
 -      u32 chanmode = 0;
 -
 -      switch (chan->band) {
 -      case IEEE80211_BAND_2GHZ:
 -              switch(channel_type) {
 -              case NL80211_CHAN_NO_HT:
 -              case NL80211_CHAN_HT20:
 -                      chanmode = CHANNEL_G_HT20;
 -                      break;
 -              case NL80211_CHAN_HT40PLUS:
 -                      chanmode = CHANNEL_G_HT40PLUS;
 -                      break;
 -              case NL80211_CHAN_HT40MINUS:
 -                      chanmode = CHANNEL_G_HT40MINUS;
 -                      break;
 -              }
 -              break;
 -      case IEEE80211_BAND_5GHZ:
 -              switch(channel_type) {
 -              case NL80211_CHAN_NO_HT:
 -              case NL80211_CHAN_HT20:
 -                      chanmode = CHANNEL_A_HT20;
 -                      break;
 -              case NL80211_CHAN_HT40PLUS:
 -                      chanmode = CHANNEL_A_HT40PLUS;
 -                      break;
 -              case NL80211_CHAN_HT40MINUS:
 -                      chanmode = CHANNEL_A_HT40MINUS;
 -                      break;
 -              }
 -              break;
 -      default:
 -              break;
 -      }
 -
 -      return chanmode;
 -}
 -
  static void ath9k_bss_assoc_info(struct ath_softc *sc,
                                 struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_bss_conf *bss_conf)
  {
 -      struct ath_wiphy *aphy = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
  
                ath_beacon_config(sc, vif);
  
                /* Reset rssi stats */
 -              aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
 +              sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
                sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
  
                sc->sc_flags |= SC_OP_ANI_RUN;
@@@ -869,7 -881,7 +867,7 @@@ void ath_radio_enable(struct ath_softc 
        ath9k_hw_configpcipowersave(ah, 0, 0);
  
        if (!ah->curchan)
 -              ah->curchan = ath_get_curchannel(sc, sc->hw);
 +              ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
  
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
                        channel->center_freq, r);
        }
  
 -      ath_update_txpow(sc);
 +      ath9k_cmn_update_txpow(ah, sc->curtxpow,
 +                             sc->config.txpowlimit, &sc->curtxpow);
        if (ath_startrecv(sc) != 0) {
                ath_err(common, "Unable to restart recv logic\n");
                goto out;
@@@ -931,7 -942,7 +929,7 @@@ void ath_radio_disable(struct ath_soft
        ath_flushrecv(sc);              /* flush recv queue */
  
        if (!ah->curchan)
 -              ah->curchan = ath_get_curchannel(sc, hw);
 +              ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
  
        r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        if (r) {
@@@ -955,8 -966,6 +953,8 @@@ int ath_reset(struct ath_softc *sc, boo
        struct ieee80211_hw *hw = sc->hw;
        int r;
  
 +      sc->hw_busy_count = 0;
 +
        /* Stop ANI */
        del_timer_sync(&common->ani.timer);
  
         * that changes the channel so update any state that
         * might change as a result.
         */
 -      ath_update_txpow(sc);
 +      ath9k_cmn_update_txpow(ah, sc->curtxpow,
 +                             sc->config.txpowlimit, &sc->curtxpow);
  
        if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
                ath_beacon_config(sc, NULL);    /* restart beacons */
        return r;
  }
  
 -/* XXX: Remove me once we don't depend on ath9k_channel for all
 - * this redundant data */
 -void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
 -                         struct ath9k_channel *ichan)
 -{
 -      struct ieee80211_channel *chan = hw->conf.channel;
 -      struct ieee80211_conf *conf = &hw->conf;
 -
 -      ichan->channel = chan->center_freq;
 -      ichan->chan = chan;
 -
 -      if (chan->band == IEEE80211_BAND_2GHZ) {
 -              ichan->chanmode = CHANNEL_G;
 -              ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
 -      } else {
 -              ichan->chanmode = CHANNEL_A;
 -              ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
 -      }
 -
 -      if (conf_is_ht(conf))
 -              ichan->chanmode = ath_get_extchanmode(sc, chan,
 -                                          conf->channel_type);
 -}
 -
  /**********************/
  /* mac80211 callbacks */
  /**********************/
  
  static int ath9k_start(struct ieee80211_hw *hw)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_channel *curchan = hw->conf.channel;
  
        mutex_lock(&sc->mutex);
  
 -      if (ath9k_wiphy_started(sc)) {
 -              if (sc->chan_idx == curchan->hw_value) {
 -                      /*
 -                       * Already on the operational channel, the new wiphy
 -                       * can be marked active.
 -                       */
 -                      aphy->state = ATH_WIPHY_ACTIVE;
 -                      ieee80211_wake_queues(hw);
 -              } else {
 -                      /*
 -                       * Another wiphy is on another channel, start the new
 -                       * wiphy in paused state.
 -                       */
 -                      aphy->state = ATH_WIPHY_PAUSED;
 -                      ieee80211_stop_queues(hw);
 -              }
 -              mutex_unlock(&sc->mutex);
 -              return 0;
 -      }
 -      aphy->state = ATH_WIPHY_ACTIVE;
 -
        /* setup initial channel */
 -
        sc->chan_idx = curchan->hw_value;
  
 -      init_channel = ath_get_curchannel(sc, hw);
 +      init_channel = ath9k_cmn_get_curchannel(hw, ah);
  
        /* Reset SERDES registers */
        ath9k_hw_configpcipowersave(ah, 0, 0);
         * This is needed only to setup initial state
         * but it's best done after a reset.
         */
 -      ath_update_txpow(sc);
 +      ath9k_cmn_update_txpow(ah, sc->curtxpow,
 +                      sc->config.txpowlimit, &sc->curtxpow);
  
        /*
         * Setup the hardware after reset:
@@@ -1137,11 -1191,19 +1135,11 @@@ mutex_unlock
  static int ath9k_tx(struct ieee80211_hw *hw,
                    struct sk_buff *skb)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_tx_control txctl;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  
 -      if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
 -              ath_dbg(common, ATH_DBG_XMIT,
 -                      "ath9k: %s: TX in unexpected wiphy state %d\n",
 -                      wiphy_name(hw->wiphy), aphy->state);
 -              goto exit;
 -      }
 -
        if (sc->ps_enabled) {
                /*
                 * mac80211 does not set PM field for normal data frames, so we
@@@ -1200,26 -1262,44 +1198,26 @@@ exit
  
  static void ath9k_stop(struct ieee80211_hw *hw)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
  
        mutex_lock(&sc->mutex);
  
 -      aphy->state = ATH_WIPHY_INACTIVE;
 -
        if (led_blink)
                cancel_delayed_work_sync(&sc->ath_led_blink_work);
  
        cancel_delayed_work_sync(&sc->tx_complete_work);
 +      cancel_delayed_work_sync(&sc->hw_pll_work);
        cancel_work_sync(&sc->paprd_work);
        cancel_work_sync(&sc->hw_check_work);
  
 -      for (i = 0; i < sc->num_sec_wiphy; i++) {
 -              if (sc->sec_wiphy[i])
 -                      break;
 -      }
 -
 -      if (i == sc->num_sec_wiphy) {
 -              cancel_delayed_work_sync(&sc->wiphy_work);
 -              cancel_work_sync(&sc->chan_work);
 -      }
 -
        if (sc->sc_flags & SC_OP_INVALID) {
                ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
                return;
        }
  
 -      if (ath9k_wiphy_started(sc)) {
 -              mutex_unlock(&sc->mutex);
 -              return; /* another wiphy still in use */
 -      }
 -
        /* Ensure HW is awake when we try to shut it down. */
        ath9k_ps_wakeup(sc);
  
        } else
                sc->rx.rxlink = NULL;
  
 +      if (sc->rx.frag) {
 +              dev_kfree_skb_any(sc->rx.frag);
 +              sc->rx.frag = NULL;
 +      }
 +
        /* disable HAL and put h/w to sleep */
        ath9k_hw_disable(ah);
        ath9k_hw_configpcipowersave(ah, 1, 1);
        ath9k_ps_restore(sc);
  
        sc->ps_idle = true;
 -      ath9k_set_wiphy_idle(aphy, true);
        ath_radio_disable(sc, hw);
  
        sc->sc_flags |= SC_OP_INVALID;
        ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
  }
  
 -static int ath9k_add_interface(struct ieee80211_hw *hw,
 -                             struct ieee80211_vif *vif)
 +bool ath9k_uses_beacons(int type)
 +{
 +      switch (type) {
 +      case NL80211_IFTYPE_AP:
 +      case NL80211_IFTYPE_ADHOC:
 +      case NL80211_IFTYPE_MESH_POINT:
 +              return true;
 +      default:
 +              return false;
 +      }
 +}
 +
 +static void ath9k_reclaim_beacon(struct ath_softc *sc,
 +                               struct ieee80211_vif *vif)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 -      struct ath_hw *ah = sc->sc_ah;
 -      struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
 -      enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
 -      int ret = 0;
  
 -      mutex_lock(&sc->mutex);
 +      /* Disable SWBA interrupt */
 +      sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
 +      ath9k_ps_wakeup(sc);
 +      ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
 +      ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
 +      tasklet_kill(&sc->bcon_tasklet);
 +      ath9k_ps_restore(sc);
 +
 +      ath_beacon_return(sc, avp);
 +      sc->sc_flags &= ~SC_OP_BEACONS;
 +
 +      if (sc->nbcnvifs > 0) {
 +              /* Re-enable beaconing */
 +              sc->sc_ah->imask |= ATH9K_INT_SWBA;
 +              ath9k_ps_wakeup(sc);
 +              ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
 +              ath9k_ps_restore(sc);
 +      }
 +}
 +
 +static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 +{
 +      struct ath9k_vif_iter_data *iter_data = data;
 +      int i;
 +
 +      if (iter_data->hw_macaddr)
 +              for (i = 0; i < ETH_ALEN; i++)
 +                      iter_data->mask[i] &=
 +                              ~(iter_data->hw_macaddr[i] ^ mac[i]);
  
        switch (vif->type) {
 -      case NL80211_IFTYPE_STATION:
 -              ic_opmode = NL80211_IFTYPE_STATION;
 +      case NL80211_IFTYPE_AP:
 +              iter_data->naps++;
                break;
 -      case NL80211_IFTYPE_WDS:
 -              ic_opmode = NL80211_IFTYPE_WDS;
 +      case NL80211_IFTYPE_STATION:
 +              iter_data->nstations++;
                break;
        case NL80211_IFTYPE_ADHOC:
 -      case NL80211_IFTYPE_AP:
 +              iter_data->nadhocs++;
 +              break;
        case NL80211_IFTYPE_MESH_POINT:
 -              if (sc->nbcnvifs >= ATH_BCBUF) {
 -                      ret = -ENOBUFS;
 -                      goto out;
 -              }
 -              ic_opmode = vif->type;
 +              iter_data->nmeshes++;
 +              break;
 +      case NL80211_IFTYPE_WDS:
 +              iter_data->nwds++;
                break;
        default:
 -              ath_err(common, "Interface type %d not yet supported\n",
 -                      vif->type);
 -              ret = -EOPNOTSUPP;
 -              goto out;
 +              iter_data->nothers++;
 +              break;
        }
 +}
  
 -      ath_dbg(common, ATH_DBG_CONFIG,
 -              "Attach a VIF of type: %d\n", ic_opmode);
 +/* Called with sc->mutex held. */
 +void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
 +                             struct ieee80211_vif *vif,
 +                             struct ath9k_vif_iter_data *iter_data)
 +{
 +      struct ath_softc *sc = hw->priv;
 +      struct ath_hw *ah = sc->sc_ah;
 +      struct ath_common *common = ath9k_hw_common(ah);
  
 -      /* Set the VIF opmode */
 -      avp->av_opmode = ic_opmode;
 -      avp->av_bslot = -1;
 +      /*
 +       * Use the hardware MAC address as reference, the hardware uses it
 +       * together with the BSSID mask when matching addresses.
 +       */
 +      memset(iter_data, 0, sizeof(*iter_data));
 +      iter_data->hw_macaddr = common->macaddr;
 +      memset(&iter_data->mask, 0xff, ETH_ALEN);
  
 -      sc->nvifs++;
 +      if (vif)
 +              ath9k_vif_iter(iter_data, vif->addr, vif);
  
 -      ath9k_set_bssid_mask(hw, vif);
 +      /* Get list of all active MAC addresses */
 +      ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
 +                                                 iter_data);
 +}
  
 -      if (sc->nvifs > 1)
 -              goto out; /* skip global settings for secondary vif */
 +/* Called with sc->mutex held. */
 +static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
 +                                        struct ieee80211_vif *vif)
 +{
 +      struct ath_softc *sc = hw->priv;
 +      struct ath_hw *ah = sc->sc_ah;
 +      struct ath_common *common = ath9k_hw_common(ah);
 +      struct ath9k_vif_iter_data iter_data;
 +
 +      ath9k_calculate_iter_data(hw, vif, &iter_data);
 +
 +      ath9k_ps_wakeup(sc);
 +      /* Set BSSID mask. */
 +      memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
 +      ath_hw_setbssidmask(common);
  
 -      if (ic_opmode == NL80211_IFTYPE_AP) {
 +      /* Set op-mode & TSF */
 +      if (iter_data.naps > 0) {
                ath9k_hw_set_tsfadjust(ah, 1);
                sc->sc_flags |= SC_OP_TSF_RESET;
 -      }
 +              ah->opmode = NL80211_IFTYPE_AP;
 +      } else {
 +              ath9k_hw_set_tsfadjust(ah, 0);
 +              sc->sc_flags &= ~SC_OP_TSF_RESET;
  
 -      /* Set the device opmode */
 -      ah->opmode = ic_opmode;
 +              if (iter_data.nwds + iter_data.nmeshes)
 +                      ah->opmode = NL80211_IFTYPE_AP;
 +              else if (iter_data.nadhocs)
 +                      ah->opmode = NL80211_IFTYPE_ADHOC;
 +              else
 +                      ah->opmode = NL80211_IFTYPE_STATION;
 +      }
  
        /*
         * Enable MIB interrupts when there are hardware phy counters.
 -       * Note we only do this (at the moment) for station mode.
         */
 -      if ((vif->type == NL80211_IFTYPE_STATION) ||
 -          (vif->type == NL80211_IFTYPE_ADHOC) ||
 -          (vif->type == NL80211_IFTYPE_MESH_POINT)) {
 +      if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
                if (ah->config.enable_ani)
                        ah->imask |= ATH9K_INT_MIB;
                ah->imask |= ATH9K_INT_TSFOOR;
 +      } else {
 +              ah->imask &= ~ATH9K_INT_MIB;
 +              ah->imask &= ~ATH9K_INT_TSFOOR;
        }
  
        ath9k_hw_set_interrupts(ah, ah->imask);
 +      ath9k_ps_restore(sc);
  
 -      if (vif->type == NL80211_IFTYPE_AP    ||
 -          vif->type == NL80211_IFTYPE_ADHOC) {
 +      /* Set up ANI */
 +      if ((iter_data.naps + iter_data.nadhocs) > 0) {
                sc->sc_flags |= SC_OP_ANI_RUN;
                ath_start_ani(common);
 +      } else {
 +              sc->sc_flags &= ~SC_OP_ANI_RUN;
 +              del_timer_sync(&common->ani.timer);
        }
 +}
  
 -out:
 -      mutex_unlock(&sc->mutex);
 -      return ret;
 +/* Called with sc->mutex held, vif counts set up properly. */
 +static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
 +                                 struct ieee80211_vif *vif)
 +{
 +      struct ath_softc *sc = hw->priv;
 +
 +      ath9k_calculate_summary_state(hw, vif);
 +
 +      if (ath9k_uses_beacons(vif->type)) {
 +              int error;
 +              ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
 +              /* This may fail because upper levels do not have beacons
 +               * properly configured yet.  That's OK, we assume it
 +               * will be properly configured and then we will be notified
 +               * in the info_changed method and set up beacons properly
 +               * there.
 +               */
 +              error = ath_beacon_alloc(sc, vif);
 +              if (!error)
 +                      ath_beacon_config(sc, vif);
 +      }
  }
  
 -static void ath9k_reclaim_beacon(struct ath_softc *sc,
 -                               struct ieee80211_vif *vif)
 +
 +static int ath9k_add_interface(struct ieee80211_hw *hw,
 +                             struct ieee80211_vif *vif)
  {
 +      struct ath_softc *sc = hw->priv;
 +      struct ath_hw *ah = sc->sc_ah;
 +      struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
 +      int ret = 0;
  
 -      /* Disable SWBA interrupt */
 -      sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
 -      ath9k_ps_wakeup(sc);
 -      ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
 -      ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
 -      tasklet_kill(&sc->bcon_tasklet);
 -      ath9k_ps_restore(sc);
 +      mutex_lock(&sc->mutex);
  
 -      ath_beacon_return(sc, avp);
 -      sc->sc_flags &= ~SC_OP_BEACONS;
 +      switch (vif->type) {
 +      case NL80211_IFTYPE_STATION:
 +      case NL80211_IFTYPE_WDS:
 +      case NL80211_IFTYPE_ADHOC:
 +      case NL80211_IFTYPE_AP:
 +      case NL80211_IFTYPE_MESH_POINT:
 +              break;
 +      default:
 +              ath_err(common, "Interface type %d not yet supported\n",
 +                      vif->type);
 +              ret = -EOPNOTSUPP;
 +              goto out;
 +      }
  
 -      if (sc->nbcnvifs > 0) {
 -              /* Re-enable beaconing */
 -              sc->sc_ah->imask |= ATH9K_INT_SWBA;
 -              ath9k_ps_wakeup(sc);
 -              ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
 -              ath9k_ps_restore(sc);
 +      if (ath9k_uses_beacons(vif->type)) {
 +              if (sc->nbcnvifs >= ATH_BCBUF) {
 +                      ath_err(common, "Not enough beacon buffers when adding"
 +                              " new interface of type: %i\n",
 +                              vif->type);
 +                      ret = -ENOBUFS;
 +                      goto out;
 +              }
 +      }
 +
 +      if ((vif->type == NL80211_IFTYPE_ADHOC) &&
 +          sc->nvifs > 0) {
 +              ath_err(common, "Cannot create ADHOC interface when other"
 +                      " interfaces already exist.\n");
 +              ret = -EINVAL;
 +              goto out;
        }
 +
 +      ath_dbg(common, ATH_DBG_CONFIG,
 +              "Attach a VIF of type: %d\n", vif->type);
 +
 +      /* Set the VIF opmode */
 +      avp->av_opmode = vif->type;
 +      avp->av_bslot = -1;
 +
 +      sc->nvifs++;
 +
 +      ath9k_do_vif_add_setup(hw, vif);
 +out:
 +      mutex_unlock(&sc->mutex);
 +      return ret;
  }
  
  static int ath9k_change_interface(struct ieee80211_hw *hw,
                                  enum nl80211_iftype new_type,
                                  bool p2p)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
  
        ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
        mutex_lock(&sc->mutex);
  
 -      switch (new_type) {
 -      case NL80211_IFTYPE_AP:
 -      case NL80211_IFTYPE_ADHOC:
 +      /* See if new interface type is valid. */
 +      if ((new_type == NL80211_IFTYPE_ADHOC) &&
 +          (sc->nvifs > 1)) {
 +              ath_err(common, "When using ADHOC, it must be the only"
 +                      " interface.\n");
 +              ret = -EINVAL;
 +              goto out;
 +      }
 +
 +      if (ath9k_uses_beacons(new_type) &&
 +          !ath9k_uses_beacons(vif->type)) {
                if (sc->nbcnvifs >= ATH_BCBUF) {
                        ath_err(common, "No beacon slot available\n");
                        ret = -ENOBUFS;
                        goto out;
                }
 -              break;
 -      case NL80211_IFTYPE_STATION:
 -              /* Stop ANI */
 -              sc->sc_flags &= ~SC_OP_ANI_RUN;
 -              del_timer_sync(&common->ani.timer);
 -              if ((vif->type == NL80211_IFTYPE_AP) ||
 -                  (vif->type == NL80211_IFTYPE_ADHOC))
 -                      ath9k_reclaim_beacon(sc, vif);
 -              break;
 -      default:
 -              ath_err(common, "Interface type %d not yet supported\n",
 -                              vif->type);
 -              ret = -ENOTSUPP;
 -              goto out;
        }
 +
 +      /* Clean up old vif stuff */
 +      if (ath9k_uses_beacons(vif->type))
 +              ath9k_reclaim_beacon(sc, vif);
 +
 +      /* Add new settings */
        vif->type = new_type;
        vif->p2p = p2p;
  
 +      ath9k_do_vif_add_setup(hw, vif);
  out:
        mutex_unlock(&sc->mutex);
        return ret;
  static void ath9k_remove_interface(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  
        ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
  
        mutex_lock(&sc->mutex);
  
 -      /* Stop ANI */
 -      sc->sc_flags &= ~SC_OP_ANI_RUN;
 -      del_timer_sync(&common->ani.timer);
 +      sc->nvifs--;
  
        /* Reclaim beacon resources */
 -      if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
 -          (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
 -          (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
 +      if (ath9k_uses_beacons(vif->type))
                ath9k_reclaim_beacon(sc, vif);
  
 -      sc->nvifs--;
 +      ath9k_calculate_summary_state(hw, NULL);
  
        mutex_unlock(&sc->mutex);
  }
@@@ -1611,11 -1566,12 +1609,11 @@@ static void ath9k_disable_ps(struct ath
  
  static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ieee80211_conf *conf = &hw->conf;
 -      bool disable_radio;
 +      bool disable_radio = false;
  
        mutex_lock(&sc->mutex);
  
         * the end.
         */
        if (changed & IEEE80211_CONF_CHANGE_IDLE) {
 -              bool enable_radio;
 -              bool all_wiphys_idle;
 -              bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 -
 -              spin_lock_bh(&sc->wiphy_lock);
 -              all_wiphys_idle =  ath9k_all_wiphys_idle(sc);
 -              ath9k_set_wiphy_idle(aphy, idle);
 -
 -              enable_radio = (!idle && all_wiphys_idle);
 -
 -              /*
 -               * After we unlock here its possible another wiphy
 -               * can be re-renabled so to account for that we will
 -               * only disable the radio toward the end of this routine
 -               * if by then all wiphys are still idle.
 -               */
 -              spin_unlock_bh(&sc->wiphy_lock);
 -
 -              if (enable_radio) {
 -                      sc->ps_idle = false;
 +              sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
 +              if (!sc->ps_idle) {
                        ath_radio_enable(sc, hw);
                        ath_dbg(common, ATH_DBG_CONFIG,
                                "not-idle: enabling radio\n");
 +              } else {
 +                      disable_radio = true;
                }
        }
  
                if (ah->curchan)
                        old_pos = ah->curchan - &ah->channels[0];
  
 -              aphy->chan_idx = pos;
 -              aphy->chan_is_ht = conf_is_ht(conf);
                if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                        sc->sc_flags |= SC_OP_OFFCHANNEL;
                else
                        sc->sc_flags &= ~SC_OP_OFFCHANNEL;
  
                ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
                        curchan->center_freq);
  
 -              /* XXX: remove me eventualy */
 -              ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
 +              ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
 +                                        curchan, conf->channel_type);
  
                /* update survey stats for the old channel before switching */
                spin_lock_irqsave(&common->cc_lock, flags);
                        ath_update_survey_nf(sc, old_pos);
        }
  
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
                sc->config.txpowlimit = 2 * conf->power_level;
                ath9k_ps_wakeup(sc);
 -              ath_update_txpow(sc);
 +              ath9k_cmn_update_txpow(ah, sc->curtxpow,
 +                                     sc->config.txpowlimit, &sc->curtxpow);
                ath9k_ps_restore(sc);
        }
  
 -      spin_lock_bh(&sc->wiphy_lock);
 -      disable_radio = ath9k_all_wiphys_idle(sc);
 -      spin_unlock_bh(&sc->wiphy_lock);
 -
        if (disable_radio) {
                ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
 -              sc->ps_idle = true;
                ath_radio_disable(sc, hw);
        }
  
@@@ -1758,7 -1748,8 +1756,7 @@@ static void ath9k_configure_filter(stru
                                   unsigned int *total_flags,
                                   u64 multicast)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        u32 rfilt;
  
        changed_flags &= SUPPORTED_FILTERS;
@@@ -1778,7 -1769,8 +1776,7 @@@ static int ath9k_sta_add(struct ieee802
                         struct ieee80211_vif *vif,
                         struct ieee80211_sta *sta)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
  
        ath_node_attach(sc, sta);
  
@@@ -1789,7 -1781,8 +1787,7 @@@ static int ath9k_sta_remove(struct ieee
                            struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
  
        ath_node_detach(sc, sta);
  
  static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
                         const struct ieee80211_tx_queue_params *params)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_txq *txq;
        struct ath9k_tx_queue_info qi;
@@@ -1843,7 -1837,8 +1841,7 @@@ static int ath9k_set_key(struct ieee802
                         struct ieee80211_sta *sta,
                         struct ieee80211_key_conf *key)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        int ret = 0;
  
@@@ -1887,7 -1882,8 +1885,7 @@@ static void ath9k_bss_info_changed(stru
                                   struct ieee80211_bss_conf *bss_conf,
                                   u32 changed)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_vif *avp = (void *)vif->drv_priv;
        if ((changed & BSS_CHANGED_BEACON) ||
            ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
                ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
 -              error = ath_beacon_alloc(aphy, vif);
 +              error = ath_beacon_alloc(sc, vif);
                if (!error)
                        ath_beacon_config(sc, vif);
        }
                if (vif->type == NL80211_IFTYPE_AP) {
                        sc->sc_flags |= SC_OP_TSF_RESET;
                        ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
 -                      error = ath_beacon_alloc(aphy, vif);
 +                      error = ath_beacon_alloc(sc, vif);
                        if (!error)
                                ath_beacon_config(sc, vif);
                } else {
  
  static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
  {
 +      struct ath_softc *sc = hw->priv;
        u64 tsf;
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
  
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
  
  static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
  
        mutex_lock(&sc->mutex);
        ath9k_ps_wakeup(sc);
  
  static void ath9k_reset_tsf(struct ieee80211_hw *hw)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
  
        mutex_lock(&sc->mutex);
  
@@@ -2032,9 -2031,10 +2030,9 @@@ static int ath9k_ampdu_action(struct ie
                              struct ieee80211_vif *vif,
                              enum ieee80211_ampdu_mlme_action action,
                              struct ieee80211_sta *sta,
 -                            u16 tid, u16 *ssn)
 +                            u16 tid, u16 *ssn, u8 buf_size)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        int ret = 0;
  
        local_bh_disable();
  static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
                             struct survey_info *survey)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *chan;
        return 0;
  }
  
 -static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
 -{
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 -
 -      mutex_lock(&sc->mutex);
 -      if (ath9k_wiphy_scanning(sc)) {
 -              /*
 -               * There is a race here in mac80211 but fixing it requires
 -               * we revisit how we handle the scan complete callback.
 -               * After mac80211 fixes we will not have configured hardware
 -               * to the home channel nor would we have configured the RX
 -               * filter yet.
 -               */
 -              mutex_unlock(&sc->mutex);
 -              return;
 -      }
 -
 -      aphy->state = ATH_WIPHY_SCAN;
 -      ath9k_wiphy_pause_all_forced(sc, aphy);
 -      mutex_unlock(&sc->mutex);
 -}
 -
 -/*
 - * XXX: this requires a revisit after the driver
 - * scan_complete gets moved to another place/removed in mac80211.
 - */
 -static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
 -{
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 -
 -      mutex_lock(&sc->mutex);
 -      aphy->state = ATH_WIPHY_ACTIVE;
 -      mutex_unlock(&sc->mutex);
 -}
 -
  static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
  
        mutex_lock(&sc->mutex);
@@@ -2143,6 -2182,8 +2141,6 @@@ struct ieee80211_ops ath9k_ops = 
        .reset_tsf          = ath9k_reset_tsf,
        .ampdu_action       = ath9k_ampdu_action,
        .get_survey         = ath9k_get_survey,
 -      .sw_scan_start      = ath9k_sw_scan_start,
 -      .sw_scan_complete   = ath9k_sw_scan_complete,
        .rfkill_poll        = ath9k_rfkill_poll_state,
        .set_coverage_class = ath9k_set_coverage_class,
  };
@@@ -19,6 -19,7 +19,6 @@@
  
  #define BITS_PER_BYTE           8
  #define OFDM_PLCP_BITS          22
 -#define HT_RC_2_MCS(_rc)        ((_rc) & 0x1f)
  #define HT_RC_2_STREAMS(_rc)    ((((_rc) & 0x78) >> 3) + 1)
  #define L_STF                   8
  #define L_LTF                   8
@@@ -31,6 -32,7 +31,6 @@@
  #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
  #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
  
 -#define OFDM_SIFS_TIME            16
  
  static u16 bits_per_symbol[][2] = {
        /* 20MHz 40MHz */
@@@ -55,9 -57,8 +55,9 @@@ static void ath_tx_complete_buf(struct 
  static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                             struct list_head *head);
  static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
 -static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
 -                           int nframes, int nbad, int txok, bool update_rc);
 +static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
 +                           struct ath_tx_status *ts, int nframes, int nbad,
 +                           int txok, bool update_rc);
  static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
                              int seqno);
  
@@@ -168,7 -169,7 +168,7 @@@ static void ath_tx_flush_tid(struct ath
                        ath_tx_update_baw(sc, tid, fi->seqno);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
                } else {
 -                      ath_tx_send_normal(sc, txq, tid, &bf_head);
 +                      ath_tx_send_normal(sc, txq, NULL, &bf_head);
                }
                spin_lock_bh(&txq->axq_lock);
        }
@@@ -296,6 -297,7 +296,6 @@@ static struct ath_buf* ath_clone_txbuf(
  
        ATH_TXBUF_RESET(tbf);
  
 -      tbf->aphy = bf->aphy;
        tbf->bf_mpdu = bf->bf_mpdu;
        tbf->bf_buf_addr = bf->bf_buf_addr;
        memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@@ -343,7 -345,7 +343,7 @@@ static void ath_tx_complete_aggr(struc
        struct ath_node *an = NULL;
        struct sk_buff *skb;
        struct ieee80211_sta *sta;
 -      struct ieee80211_hw *hw;
 +      struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_hdr *hdr;
        struct ieee80211_tx_info *tx_info;
        struct ath_atx_tid *tid = NULL;
        hdr = (struct ieee80211_hdr *)skb->data;
  
        tx_info = IEEE80211_SKB_CB(skb);
 -      hw = bf->aphy->hw;
  
        memcpy(rates, tx_info->control.rates, sizeof(rates));
  
                            !bf->bf_stale || bf_next != NULL)
                                list_move_tail(&bf->list, &bf_head);
  
 -                      ath_tx_rc_status(bf, ts, 1, 1, 0, false);
 +                      ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
                                0, 0);
  
  
        ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
        while (bf) {
 -              txfail = txpending = 0;
 +              txfail = txpending = sendbar = 0;
                bf_next = bf->bf_next;
  
                skb = bf->bf_mpdu;
  
                        if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
                                memcpy(tx_info->control.rates, rates, sizeof(rates));
 -                              ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
 +                              ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
                                rc_update = false;
                        } else {
 -                              ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
 +                              ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
                        }
  
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
  
                                                bf->bf_state.bf_type |=
                                                        BUF_XRETRY;
 -                                              ath_tx_rc_status(bf, ts, nframes,
 +                                              ath_tx_rc_status(sc, bf, ts, nframes,
                                                                nbad, 0, false);
                                                ath_tx_complete_buf(sc, bf, txq,
                                                                    &bf_head,
  
        rcu_read_unlock();
  
 -      if (needreset)
 +      if (needreset) {
 +              spin_unlock_bh(&sc->sc_pcu_lock);
                ath_reset(sc, false);
 +              spin_lock_bh(&sc->sc_pcu_lock);
 +      }
  }
  
  static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@@ -856,10 -856,7 +856,10 @@@ int ath_tx_aggr_start(struct ath_softc 
  
        txtid->state |= AGGR_ADDBA_PROGRESS;
        txtid->paused = true;
 -      *ssn = txtid->seq_start;
 +      *ssn = txtid->seq_start = txtid->seq_next;
 +
 +      memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
 +      txtid->baw_head = txtid->baw_tail = 0;
  
        return 0;
  }
@@@ -945,7 -942,7 +945,7 @@@ struct ath_txq *ath_txq_setup(struct at
                [WME_AC_VI] = ATH_TXQ_AC_VI,
                [WME_AC_VO] = ATH_TXQ_AC_VO,
        };
 -      int qnum, i;
 +      int axq_qnum, i;
  
        memset(&qi, 0, sizeof(qi));
        qi.tqi_subtype = subtype_txq_to_hwq[subtype];
                        qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
                                        TXQ_FLAG_TXDESCINT_ENABLE;
        }
 -      qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
 -      if (qnum == -1) {
 +      axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
 +      if (axq_qnum == -1) {
                /*
                 * NB: don't print a message, this happens
                 * normally on parts with too few tx queues
                 */
                return NULL;
        }
 -      if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
 +      if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
                ath_err(common, "qnum %u out of range, max %zu!\n",
 -                      qnum, ARRAY_SIZE(sc->tx.txq));
 -              ath9k_hw_releasetxqueue(ah, qnum);
 +                      axq_qnum, ARRAY_SIZE(sc->tx.txq));
 +              ath9k_hw_releasetxqueue(ah, axq_qnum);
                return NULL;
        }
 -      if (!ATH_TXQ_SETUP(sc, qnum)) {
 -              struct ath_txq *txq = &sc->tx.txq[qnum];
 +      if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
 +              struct ath_txq *txq = &sc->tx.txq[axq_qnum];
  
 -              txq->axq_qnum = qnum;
 +              txq->axq_qnum = axq_qnum;
 +              txq->mac80211_qnum = -1;
                txq->axq_link = NULL;
                INIT_LIST_HEAD(&txq->axq_q);
                INIT_LIST_HEAD(&txq->axq_acq);
                txq->axq_depth = 0;
                txq->axq_ampdu_depth = 0;
                txq->axq_tx_inprogress = false;
 -              sc->tx.txqsetup |= 1<<qnum;
 +              sc->tx.txqsetup |= 1<<axq_qnum;
  
                txq->txq_headidx = txq->txq_tailidx = 0;
                for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
                        INIT_LIST_HEAD(&txq->txq_fifo[i]);
                INIT_LIST_HEAD(&txq->txq_fifo_pending);
        }
 -      return &sc->tx.txq[qnum];
 +      return &sc->tx.txq[axq_qnum];
  }
  
  int ath_txq_update(struct ath_softc *sc, int qnum,
@@@ -1209,17 -1205,8 +1209,17 @@@ bool ath_drain_all_txq(struct ath_soft
                ath_err(common, "Failed to stop TX DMA!\n");
  
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 -              if (ATH_TXQ_SETUP(sc, i))
 -                      ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
 +              if (!ATH_TXQ_SETUP(sc, i))
 +                      continue;
 +
 +              /*
 +               * The caller will resume queues with ieee80211_wake_queues.
 +               * Mark the queue as not stopped to prevent ath_tx_complete
 +               * from waking the queue too early.
 +               */
 +              txq = &sc->tx.txq[i];
 +              txq->stopped = false;
 +              ath_draintxq(sc, txq, retry_tx);
        }
  
        return !npend;
@@@ -1231,59 -1218,46 +1231,59 @@@ void ath_tx_cleanupq(struct ath_softc *
        sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
  }
  
 +/* For each axq_acq entry, for each tid, try to schedule packets
 + * for transmit until ampdu_depth has reached min Q depth.
 + */
  void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
  {
 -      struct ath_atx_ac *ac;
 -      struct ath_atx_tid *tid;
 +      struct ath_atx_ac *ac, *ac_tmp, *last_ac;
 +      struct ath_atx_tid *tid, *last_tid;
  
 -      if (list_empty(&txq->axq_acq))
 +      if (list_empty(&txq->axq_acq) ||
 +          txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
                return;
  
        ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
 -      list_del(&ac->list);
 -      ac->sched = false;
 +      last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
  
 -      do {
 -              if (list_empty(&ac->tid_q))
 -                      return;
 +      list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
 +              last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
 +              list_del(&ac->list);
 +              ac->sched = false;
  
 -              tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
 -              list_del(&tid->list);
 -              tid->sched = false;
 +              while (!list_empty(&ac->tid_q)) {
 +                      tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
 +                                             list);
 +                      list_del(&tid->list);
 +                      tid->sched = false;
  
 -              if (tid->paused)
 -                      continue;
 +                      if (tid->paused)
 +                              continue;
  
 -              ath_tx_sched_aggr(sc, txq, tid);
 +                      ath_tx_sched_aggr(sc, txq, tid);
  
 -              /*
 -               * add tid to round-robin queue if more frames
 -               * are pending for the tid
 -               */
 -              if (!list_empty(&tid->buf_q))
 -                      ath_tx_queue_tid(txq, tid);
 +                      /*
 +                       * add tid to round-robin queue if more frames
 +                       * are pending for the tid
 +                       */
 +                      if (!list_empty(&tid->buf_q))
 +                              ath_tx_queue_tid(txq, tid);
  
 -              break;
 -      } while (!list_empty(&ac->tid_q));
 +                      if (tid == last_tid ||
 +                          txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
 +                              break;
 +              }
  
 -      if (!list_empty(&ac->tid_q)) {
 -              if (!ac->sched) {
 -                      ac->sched = true;
 -                      list_add_tail(&ac->list, &txq->axq_acq);
 +              if (!list_empty(&ac->tid_q)) {
 +                      if (!ac->sched) {
 +                              ac->sched = true;
 +                              list_add_tail(&ac->list, &txq->axq_acq);
 +                      }
                }
 +
 +              if (ac == last_ac ||
 +                  txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
 +                      return;
        }
  }
  
@@@ -1327,7 -1301,6 +1327,7 @@@ static void ath_tx_txqaddbuf(struct ath
                INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
                list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
                INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
 +              TX_STAT_INC(txq->axq_qnum, puttxbuf);
                ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
                        txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
                list_splice_tail_init(head, &txq->axq_q);
  
                if (txq->axq_link == NULL) {
 +                      TX_STAT_INC(txq->axq_qnum, puttxbuf);
                        ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
                        ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
                                txq->axq_qnum, ito64(bf->bf_daddr),
                }
                ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
                                       &txq->axq_link);
 +              TX_STAT_INC(txq->axq_qnum, txstart);
                ath9k_hw_txstart(ah, txq->axq_qnum);
        }
        txq->axq_depth++;
@@@ -1364,6 -1335,7 +1364,6 @@@ static void ath_tx_send_ampdu(struct at
        struct list_head bf_head;
  
        bf->bf_state.bf_type |= BUF_AMPDU;
 -      TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
  
        /*
         * Do not queue to h/w when any of the following conditions is true:
                 * Add this frame to software queue for scheduling later
                 * for aggregation.
                 */
 +              TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
                list_add_tail(&bf->list, &tid->buf_q);
                ath_tx_queue_tid(txctl->txq, tid);
                return;
                ath_tx_addto_baw(sc, tid, fi->seqno);
  
        /* Queue to h/w without aggregation */
 +      TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
        bf->bf_lastbf = bf;
        ath_buf_set_rate(sc, bf, fi->framelen);
        ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@@ -1446,7 -1416,8 +1446,7 @@@ static enum ath9k_pkt_type get_hw_packe
  static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
                             int framelen)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = tx_info->control.sta;
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@@ -1664,7 -1635,8 +1664,7 @@@ static struct ath_buf *ath_tx_setup_buf
                                           struct ath_txq *txq,
                                           struct sk_buff *skb)
  {
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_frame_info *fi = get_frame_info(skb);
  
        ATH_TXBUF_RESET(bf);
  
 -      bf->aphy = aphy;
        bf->bf_flags = setup_tx_flags(skb);
        bf->bf_mpdu = skb;
  
@@@ -1752,6 -1725,9 +1752,9 @@@ static void ath_tx_start_dma(struct ath
                        ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
                                                   bf->bf_state.bfs_paprd);
  
+               if (txctl->paprd)
+                       bf->bf_state.bfs_paprd_timestamp = jiffies;
                ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
        }
  
@@@ -1765,7 -1741,8 +1768,7 @@@ int ath_tx_start(struct ieee80211_hw *h
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_sta *sta = info->control.sta;
 -      struct ath_wiphy *aphy = hw->priv;
 -      struct ath_softc *sc = aphy->sc;
 +      struct ath_softc *sc = hw->priv;
        struct ath_txq *txq = txctl->txq;
        struct ath_buf *bf;
        int padpos, padsize;
        spin_lock_bh(&txq->axq_lock);
        if (txq == sc->tx.txq_map[q] &&
            ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
 -              ath_mac80211_stop_queue(sc, q);
 +              ieee80211_stop_queue(sc->hw, q);
                txq->stopped = 1;
        }
        spin_unlock_bh(&txq->axq_lock);
  /*****************/
  
  static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
 -                          struct ath_wiphy *aphy, int tx_flags, int ftype,
 -                          struct ath_txq *txq)
 +                          int tx_flags, int ftype, struct ath_txq *txq)
  {
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  
        ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
  
 -      if (aphy)
 -              hw = aphy->hw;
 -
        if (tx_flags & ATH_TX_BAR)
                tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  
                                        PS_WAIT_FOR_TX_ACK));
        }
  
 -      if (unlikely(ftype))
 -              ath9k_tx_status(hw, skb, ftype);
 -      else {
 -              q = skb_get_queue_mapping(skb);
 -              if (txq == sc->tx.txq_map[q]) {
 -                      spin_lock_bh(&txq->axq_lock);
 -                      if (WARN_ON(--txq->pending_frames < 0))
 -                              txq->pending_frames = 0;
 -                      spin_unlock_bh(&txq->axq_lock);
 -              }
 +      q = skb_get_queue_mapping(skb);
 +      if (txq == sc->tx.txq_map[q]) {
 +              spin_lock_bh(&txq->axq_lock);
 +              if (WARN_ON(--txq->pending_frames < 0))
 +                      txq->pending_frames = 0;
  
 -              ieee80211_tx_status(hw, skb);
 +              if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
 +                      ieee80211_wake_queue(sc->hw, q);
 +                      txq->stopped = 0;
 +              }
 +              spin_unlock_bh(&txq->axq_lock);
        }
 +
 +      ieee80211_tx_status(hw, skb);
  }
  
  static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
        bf->bf_buf_addr = 0;
  
        if (bf->bf_state.bfs_paprd) {
-               if (!sc->paprd_pending)
+               if (time_after(jiffies,
+                               bf->bf_state.bfs_paprd_timestamp +
+                               msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
                        dev_kfree_skb_any(skb);
                else
                        complete(&sc->paprd_complete);
        } else {
 -              ath_debug_stat_tx(sc, bf, ts);
 -              ath_tx_complete(sc, skb, bf->aphy, tx_flags,
 +              ath_debug_stat_tx(sc, bf, ts, txq);
 +              ath_tx_complete(sc, skb, tx_flags,
                                bf->bf_state.bfs_ftype, txq);
        }
        /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
        spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
  }
  
 -static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
 -                           int nframes, int nbad, int txok, bool update_rc)
 +static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
 +                           struct ath_tx_status *ts, int nframes, int nbad,
 +                           int txok, bool update_rc)
  {
        struct sk_buff *skb = bf->bf_mpdu;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
 -      struct ieee80211_hw *hw = bf->aphy->hw;
 -      struct ath_softc *sc = bf->aphy->sc;
 +      struct ieee80211_hw *hw = sc->hw;
        struct ath_hw *ah = sc->sc_ah;
        u8 i, tx_rateindex;
  
        tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
  }
  
 -static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
 -{
 -      struct ath_txq *txq;
 -
 -      txq = sc->tx.txq_map[qnum];
 -      spin_lock_bh(&txq->axq_lock);
 -      if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
 -              if (ath_mac80211_start_queue(sc, qnum))
 -                      txq->stopped = 0;
 -      }
 -      spin_unlock_bh(&txq->axq_lock);
 -}
 -
  static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
  {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_tx_status ts;
        int txok;
        int status;
 -      int qnum;
  
        ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
                txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
                spin_lock_bh(&txq->axq_lock);
                if (list_empty(&txq->axq_q)) {
                        txq->axq_link = NULL;
 +                      if (sc->sc_flags & SC_OP_TXAGGR)
 +                              ath_txq_schedule(sc, txq);
                        spin_unlock_bh(&txq->axq_lock);
                        break;
                }
                        spin_unlock_bh(&txq->axq_lock);
                        break;
                }
 +              TX_STAT_INC(txq->axq_qnum, txprocdesc);
  
                /*
                 * Remove ath_buf's of the same transmit unit from txq,
                         */
                        if (ts.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
 -                      ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
 +                      ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
                }
  
 -              qnum = skb_get_queue_mapping(bf->bf_mpdu);
 -
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
                                             true);
                else
                        ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
  
 -              if (txq == sc->tx.txq_map[qnum])
 -                      ath_wake_mac80211_queue(sc, qnum);
 -
                spin_lock_bh(&txq->axq_lock);
 +
                if (sc->sc_flags & SC_OP_TXAGGR)
                        ath_txq_schedule(sc, txq);
                spin_unlock_bh(&txq->axq_lock);
        }
  }
  
 +static void ath_hw_pll_work(struct work_struct *work)
 +{
 +      struct ath_softc *sc = container_of(work, struct ath_softc,
 +                                          hw_pll_work.work);
 +      static int count;
 +
 +      if (AR_SREV_9485(sc->sc_ah)) {
 +              if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
 +                      count++;
 +
 +                      if (count == 3) {
 +                              /* Rx is hung for more than 500ms. Reset it */
 +                              ath_reset(sc, true);
 +                              count = 0;
 +                      }
 +              } else
 +                      count = 0;
 +
 +              ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
 +      }
 +}
 +
  static void ath_tx_complete_poll_work(struct work_struct *work)
  {
        struct ath_softc *sc = container_of(work, struct ath_softc,
        struct ath_txq *txq;
        int i;
        bool needreset = false;
 +#ifdef CONFIG_ATH9K_DEBUGFS
 +      sc->tx_complete_poll_work_seen++;
 +#endif
  
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
                if (ATH_TXQ_SETUP(sc, i)) {
                                } else {
                                        txq->axq_tx_inprogress = true;
                                }
 +                      } else {
 +                              /* If the queue has pending buffers, then it
 +                               * should be doing tx work (and have axq_depth).
 +                               * Shouldn't get to this state I think..but
 +                               * we do.
 +                               */
 +                              if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
 +                                  (txq->pending_frames > 0 ||
 +                                   !list_empty(&txq->axq_acq) ||
 +                                   txq->stopped)) {
 +                                      ath_err(ath9k_hw_common(sc->sc_ah),
 +                                              "txq: %p axq_qnum: %u,"
 +                                              " mac80211_qnum: %i"
 +                                              " axq_link: %p"
 +                                              " pending frames: %i"
 +                                              " axq_acq empty: %i"
 +                                              " stopped: %i"
 +                                              " axq_depth: 0  Attempting to"
 +                                              " restart tx logic.\n",
 +                                              txq, txq->axq_qnum,
 +                                              txq->mac80211_qnum,
 +                                              txq->axq_link,
 +                                              txq->pending_frames,
 +                                              list_empty(&txq->axq_acq),
 +                                              txq->stopped);
 +                                      ath_txq_schedule(sc, txq);
 +                              }
                        }
                        spin_unlock_bh(&txq->axq_lock);
                }
@@@ -2205,6 -2150,7 +2210,6 @@@ void ath_tx_edma_tasklet(struct ath_sof
        struct list_head bf_head;
        int status;
        int txok;
 -      int qnum;
  
        for (;;) {
                status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
                if (!bf_isampdu(bf)) {
                        if (txs.ts_status & ATH9K_TXERR_XRETRY)
                                bf->bf_state.bf_type |= BUF_XRETRY;
 -                      ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
 +                      ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
                }
  
 -              qnum = skb_get_queue_mapping(bf->bf_mpdu);
 -
                if (bf_isampdu(bf))
                        ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
                                             txok, true);
                        ath_tx_complete_buf(sc, bf, txq, &bf_head,
                                            &txs, txok, 0);
  
 -              if (txq == sc->tx.txq_map[qnum])
 -                      ath_wake_mac80211_queue(sc, qnum);
 -
                spin_lock_bh(&txq->axq_lock);
 +
                if (!list_empty(&txq->txq_fifo_pending)) {
                        INIT_LIST_HEAD(&bf_head);
                        bf = list_first_entry(&txq->txq_fifo_pending,
@@@ -2335,7 -2285,6 +2340,7 @@@ int ath_tx_init(struct ath_softc *sc, i
        }
  
        INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
 +      INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
  
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                error = ath_tx_edma_init(sc);
  #define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
  #define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
  
 -#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
 -#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
 -#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
 +#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
 +#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
 +#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
  
 -#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
 -#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
 -#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
 +#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
 +#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
 +#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
  
  static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
  {
@@@ -90,7 -90,7 +90,7 @@@ static void iwl6050_additional_nic_conf
                                CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
  }
  
 -static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
 +static void iwl6150_additional_nic_config(struct iwl_priv *priv)
  {
        /* Indicate calibration version to uCode. */
        if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@@ -354,7 -354,7 +354,7 @@@ static struct iwl_lib_ops iwl6000_lib 
        }
  };
  
 -static struct iwl_lib_ops iwl6000g2b_lib = {
 +static struct iwl_lib_ops iwl6030_lib = {
        .set_hw_params = iwl6000_hw_set_hw_params,
        .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
        .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@@ -430,8 -430,8 +430,8 @@@ static struct iwl_nic_ops iwl6050_nic_o
        .additional_nic_config = &iwl6050_additional_nic_config,
  };
  
 -static struct iwl_nic_ops iwl6050g2_nic_ops = {
 -      .additional_nic_config = &iwl6050g2_additional_nic_config,
 +static struct iwl_nic_ops iwl6150_nic_ops = {
 +      .additional_nic_config = &iwl6150_additional_nic_config,
  };
  
  static const struct iwl_ops iwl6000_ops = {
@@@ -451,17 -451,17 +451,17 @@@ static const struct iwl_ops iwl6050_op
        .ieee80211_ops = &iwlagn_hw_ops,
  };
  
 -static const struct iwl_ops iwl6050g2_ops = {
 +static const struct iwl_ops iwl6150_ops = {
        .lib = &iwl6000_lib,
        .hcmd = &iwlagn_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
 -      .nic = &iwl6050g2_nic_ops,
 +      .nic = &iwl6150_nic_ops,
        .ieee80211_ops = &iwlagn_hw_ops,
  };
  
 -static const struct iwl_ops iwl6000g2b_ops = {
 -      .lib = &iwl6000g2b_lib,
 +static const struct iwl_ops iwl6030_ops = {
 +      .lib = &iwl6030_lib,
        .hcmd = &iwlagn_bt_hcmd,
        .utils = &iwlagn_hcmd_utils,
        .led = &iwlagn_led_ops,
@@@ -479,6 -479,7 +479,6 @@@ static struct iwl_base_params iwl6000_b
        .shadow_ram_support = true,
        .led_compensation = 51,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 -      .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@@ -502,6 -503,7 +502,6 @@@ static struct iwl_base_params iwl6050_b
        .shadow_ram_support = true,
        .led_compensation = 51,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 -      .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@@ -524,6 -526,7 +524,6 @@@ static struct iwl_base_params iwl6000_g
        .shadow_ram_support = true,
        .led_compensation = 57,
        .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
 -      .supports_idle = true,
        .adv_thermal_throttle = true,
        .support_ct_kill_exit = true,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@@ -552,11 -555,11 +552,11 @@@ static struct iwl_bt_params iwl6000_bt_
  };
  
  #define IWL_DEVICE_6005                                               \
 -      .fw_name_pre = IWL6000G2A_FW_PRE,                       \
 +      .fw_name_pre = IWL6005_FW_PRE,                  \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
 -      .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,             \
 -      .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,     \
 +      .eeprom_ver = EEPROM_6005_EEPROM_VERSION,               \
 +      .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION,       \
        .ops = &iwl6000_ops,                                    \
        .mod_params = &iwlagn_mod_params,                       \
        .base_params = &iwl6000_g2_base_params,                 \
@@@ -581,12 -584,12 +581,12 @@@ struct iwl_cfg iwl6005_2bg_cfg = 
  };
  
  #define IWL_DEVICE_6030                                               \
 -      .fw_name_pre = IWL6000G2B_FW_PRE,                       \
 +      .fw_name_pre = IWL6030_FW_PRE,                  \
        .ucode_api_max = IWL6000G2_UCODE_API_MAX,               \
        .ucode_api_min = IWL6000G2_UCODE_API_MIN,               \
 -      .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,             \
 -      .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,     \
 -      .ops = &iwl6000g2b_ops,                                 \
 +      .eeprom_ver = EEPROM_6030_EEPROM_VERSION,               \
 +      .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION,       \
 +      .ops = &iwl6030_ops,                                    \
        .mod_params = &iwlagn_mod_params,                       \
        .base_params = &iwl6000_g2_base_params,                 \
        .bt_params = &iwl6000_bt_params,                        \
@@@ -678,6 -681,8 +678,8 @@@ struct iwl_cfg iwl6000i_2bg_cfg = 
        .fw_name_pre = IWL6050_FW_PRE,                          \
        .ucode_api_max = IWL6050_UCODE_API_MAX,                 \
        .ucode_api_min = IWL6050_UCODE_API_MIN,                 \
+       .valid_tx_ant = ANT_AB,         /* .cfg overwrite */    \
+       .valid_rx_ant = ANT_AB,         /* .cfg overwrite */    \
        .ops = &iwl6050_ops,                                    \
        .eeprom_ver = EEPROM_6050_EEPROM_VERSION,               \
        .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,       \
@@@ -703,9 -708,9 +705,9 @@@ struct iwl_cfg iwl6150_bgn_cfg = 
        .fw_name_pre = IWL6050_FW_PRE,
        .ucode_api_max = IWL6050_UCODE_API_MAX,
        .ucode_api_min = IWL6050_UCODE_API_MIN,
 -      .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
 -      .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
 -      .ops = &iwl6050g2_ops,
 +      .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
 +      .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
 +      .ops = &iwl6150_ops,
        .mod_params = &iwlagn_mod_params,
        .base_params = &iwl6050_base_params,
        .ht_params = &iwl6000_ht_params,
@@@ -731,5 -736,5 +733,5 @@@ struct iwl_cfg iwl6000_3agn_cfg = 
  
  MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
  MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 -MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 -MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 +MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
 +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
@@@ -59,7 -59,6 +59,7 @@@
  #include "iwl-sta.h"
  #include "iwl-agn-calib.h"
  #include "iwl-agn.h"
 +#include "iwl-agn-led.h"
  
  
  /******************************************************************************
@@@ -462,12 -461,8 +462,12 @@@ static void iwl_rx_reply_alive(struct i
        if (palive->is_valid == UCODE_VALID_OK)
                queue_delayed_work(priv->workqueue, pwork,
                                   msecs_to_jiffies(5));
 -      else
 -              IWL_WARN(priv, "uCode did not respond OK.\n");
 +      else {
 +              IWL_WARN(priv, "%s uCode did not respond OK.\n",
 +                      (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
 +                      "init" : "runtime");
 +              queue_work(priv->workqueue, &priv->restart);
 +      }
  }
  
  static void iwl_bg_beacon_update(struct work_struct *work)
@@@ -704,18 -699,18 +704,18 @@@ static void iwl_bg_ucode_trace(unsigne
        }
  }
  
 -static void iwl_rx_beacon_notif(struct iwl_priv *priv,
 -                              struct iwl_rx_mem_buffer *rxb)
 +static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
 +                                 struct iwl_rx_mem_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 -      struct iwl4965_beacon_notif *beacon =
 -              (struct iwl4965_beacon_notif *)pkt->u.raw;
 +      struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
  #ifdef CONFIG_IWLWIFI_DEBUG
 +      u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
        u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
  
 -      IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
 -              "tsf %d %d rate %d\n",
 -              le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
 +      IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
 +              "tsf:0x%.8x%.8x rate:%d\n",
 +              status & TX_STATUS_MSK,
                beacon->beacon_notify_hdr.failure_frame,
                le32_to_cpu(beacon->ibss_mgr_status),
                le32_to_cpu(beacon->high_tsf),
@@@ -818,7 -813,7 +818,7 @@@ static void iwl_setup_rx_handlers(struc
        priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
        priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
            iwl_rx_pm_debug_statistics_notif;
 -      priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
 +      priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
  
        /*
         * The same handler is used for both the REPLY to a discrete
   * the appropriate handlers, including command responses,
   * frame-received notifications, and other notifications.
   */
 -void iwl_rx_handle(struct iwl_priv *priv)
 +static void iwl_rx_handle(struct iwl_priv *priv)
  {
        struct iwl_rx_mem_buffer *rxb;
        struct iwl_rx_packet *pkt;
                        (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
                        (pkt->hdr.cmd != REPLY_TX);
  
 +              /*
 +               * Do the notification wait before RX handlers so
 +               * even if the RX handler consumes the RXB we have
 +               * access to it in the notification wait entry.
 +               */
 +              if (!list_empty(&priv->_agn.notif_waits)) {
 +                      struct iwl_notification_wait *w;
 +
 +                      spin_lock(&priv->_agn.notif_wait_lock);
 +                      list_for_each_entry(w, &priv->_agn.notif_waits, list) {
 +                              if (w->cmd == pkt->hdr.cmd) {
 +                                      w->triggered = true;
 +                                      if (w->fn)
 +                                              w->fn(priv, pkt);
 +                              }
 +                      }
 +                      spin_unlock(&priv->_agn.notif_wait_lock);
 +
 +                      wake_up_all(&priv->_agn.notif_waitq);
 +              }
 +
                /* Based on type of command response or notification,
                 *   handle those that need handling via function in
                 *   rx_handlers table.  See iwl_setup_rx_handlers() */
@@@ -1183,6 -1157,9 +1183,9 @@@ static void iwl_irq_tasklet_legacy(stru
        /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_enable_interrupts(priv);
+       /* Re-enable RF_KILL if it occurred */
+       else if (handled & CSR_INT_BIT_RF_KILL)
+               iwl_enable_rfkill_int(priv);
  
  #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
@@@ -1397,6 -1374,9 +1400,9 @@@ static void iwl_irq_tasklet(struct iwl_
        /* only Re-enable if disabled by irq */
        if (test_bit(STATUS_INT_ENABLED, &priv->status))
                iwl_enable_interrupts(priv);
+       /* Re-enable RF_KILL if it occurred */
+       else if (handled & CSR_INT_BIT_RF_KILL)
+               iwl_enable_rfkill_int(priv);
  }
  
  /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
@@@ -2652,6 -2632,13 +2658,6 @@@ static void iwl_alive_start(struct iwl_
  
        IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
  
 -      if (priv->card_alive.is_valid != UCODE_VALID_OK) {
 -              /* We had an error bringing up the hardware, so take it
 -               * all the way back down so we can try again */
 -              IWL_DEBUG_INFO(priv, "Alive failed.\n");
 -              goto restart;
 -      }
 -
        /* Initialize uCode has loaded Runtime uCode ... verify inst image.
         * This is a paranoid check, because we would not have gotten the
         * "runtime" alive if code weren't properly loaded.  */
        /* At this point, the NIC is initialized and operational */
        iwl_rf_kill_ct_config(priv);
  
 -      iwl_leds_init(priv);
 -
        IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
        wake_up_interruptible(&priv->wait_command_queue);
  
@@@ -2780,6 -2769,7 +2786,6 @@@ static void __iwl_down(struct iwl_priv 
                         priv->cfg->bt_params->bt_init_traffic_load;
        else
                priv->bt_traffic_load = 0;
 -      priv->bt_sco_active = false;
        priv->bt_full_concurrent = false;
        priv->bt_ci_compliance = 0;
  
@@@ -3073,7 -3063,8 +3079,7 @@@ static void iwl_bg_run_time_calib_work(
        }
  
        if (priv->start_calib) {
 -              if (priv->cfg->bt_params &&
 -                  priv->cfg->bt_params->bt_statistics) {
 +              if (iwl_bt_statistics(priv)) {
                        iwl_chain_noise_calibration(priv,
                                        (void *)&priv->_agn.statistics_bt);
                        iwl_sensitivity_calibration(priv,
@@@ -3098,7 -3089,7 +3104,7 @@@ static void iwl_bg_restart(struct work_
  
        if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
                struct iwl_rxon_context *ctx;
 -              bool bt_sco, bt_full_concurrent;
 +              bool bt_full_concurrent;
                u8 bt_ci_compliance;
                u8 bt_load;
                u8 bt_status;
                 * re-configure the hw when we reconfigure the BT
                 * command.
                 */
 -              bt_sco = priv->bt_sco_active;
                bt_full_concurrent = priv->bt_full_concurrent;
                bt_ci_compliance = priv->bt_ci_compliance;
                bt_load = priv->bt_traffic_load;
  
                __iwl_down(priv);
  
 -              priv->bt_sco_active = bt_sco;
                priv->bt_full_concurrent = bt_full_concurrent;
                priv->bt_ci_compliance = bt_ci_compliance;
                priv->bt_traffic_load = bt_load;
@@@ -3185,8 -3178,6 +3191,8 @@@ static int iwl_mac_setup_register(struc
                    IEEE80211_HW_SPECTRUM_MGMT |
                    IEEE80211_HW_REPORTS_TX_ACK_STATUS;
  
 +      hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 +
        if (!priv->cfg->base_params->broken_powersave)
                hw->flags |= IEEE80211_HW_SUPPORTS_PS |
                             IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
                hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
        }
  
 +      hw->wiphy->max_remain_on_channel_duration = 1000;
 +
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
 -                          WIPHY_FLAG_DISABLE_BEACON_HINTS;
 +                          WIPHY_FLAG_DISABLE_BEACON_HINTS |
 +                          WIPHY_FLAG_IBSS_RSN;
  
        /*
         * For now, disable PS by default because it affects
                priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &priv->bands[IEEE80211_BAND_5GHZ];
  
 +      iwl_leds_init(priv);
 +
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
                IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@@ -3277,7 -3263,7 +3283,7 @@@ int iwlagn_mac_start(struct ieee80211_h
                }
        }
  
 -      iwl_led_start(priv);
 +      iwlagn_led_enable(priv);
  
  out:
        priv->is_open = 1;
@@@ -3359,14 -3345,6 +3365,14 @@@ int iwlagn_mac_set_key(struct ieee80211
                return -EOPNOTSUPP;
        }
  
 +      /*
 +       * To support IBSS RSN, don't program group keys in IBSS, the
 +       * hardware will then not attempt to decrypt the frames.
 +       */
 +      if (vif->type == NL80211_IFTYPE_ADHOC &&
 +          !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
 +              return -EOPNOTSUPP;
 +
        sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
        if (sta_id == IWL_INVALID_STATION)
                return -EINVAL;
  int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            enum ieee80211_ampdu_mlme_action action,
 -                          struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 +                          struct ieee80211_sta *sta, u16 tid, u16 *ssn,
 +                          u8 buf_size)
  {
        struct iwl_priv *priv = hw->priv;
        int ret = -EINVAL;
 +      struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
  
        IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
                     sta->addr, tid);
                }
                break;
        case IEEE80211_AMPDU_TX_OPERATIONAL:
 +              /*
 +               * If the limit is 0, then it wasn't initialised yet,
 +               * use the default. We can do that since we take the
 +               * minimum below, and we don't want to go above our
 +               * default due to hardware restrictions.
 +               */
 +              if (sta_priv->max_agg_bufsize == 0)
 +                      sta_priv->max_agg_bufsize =
 +                              LINK_QUAL_AGG_FRAME_LIMIT_DEF;
 +
 +              /*
 +               * Even though in theory the peer could have different
 +               * aggregation reorder buffer sizes for different sessions,
 +               * our ucode doesn't allow for that and has a global limit
 +               * for each station. Therefore, use the minimum of all the
 +               * aggregation sessions and our default value.
 +               */
 +              sta_priv->max_agg_bufsize =
 +                      min(sta_priv->max_agg_bufsize, buf_size);
 +
                if (priv->cfg->ht_params &&
                    priv->cfg->ht_params->use_rts_for_aggregation) {
 -                      struct iwl_station_priv *sta_priv =
 -                              (void *) sta->drv_priv;
 -
                        /*
                         * switch to RTS/CTS if it is the prefer protection
                         * method for HT traffic
  
                        sta_priv->lq_sta.lq.general_params.flags |=
                                LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
 -                      iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
 -                                      &sta_priv->lq_sta.lq, CMD_ASYNC, false);
                }
 +
 +              sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
 +                      sta_priv->max_agg_bufsize;
 +
 +              iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
 +                              &sta_priv->lq_sta.lq, CMD_ASYNC, false);
                ret = 0;
                break;
        }
@@@ -3754,97 -3709,6 +3760,97 @@@ done
        IWL_DEBUG_MAC80211(priv, "leave\n");
  }
  
 +static void iwlagn_disable_roc(struct iwl_priv *priv)
 +{
 +      struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
 +      struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
 +
 +      lockdep_assert_held(&priv->mutex);
 +
 +      if (!ctx->is_active)
 +              return;
 +
 +      ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
 +      ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
 +      iwl_set_rxon_channel(priv, chan, ctx);
 +      iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
 +
 +      priv->_agn.hw_roc_channel = NULL;
 +
 +      iwlcore_commit_rxon(priv, ctx);
 +
 +      ctx->is_active = false;
 +}
 +
 +static void iwlagn_bg_roc_done(struct work_struct *work)
 +{
 +      struct iwl_priv *priv = container_of(work, struct iwl_priv,
 +                                           _agn.hw_roc_work.work);
 +
 +      mutex_lock(&priv->mutex);
 +      ieee80211_remain_on_channel_expired(priv->hw);
 +      iwlagn_disable_roc(priv);
 +      mutex_unlock(&priv->mutex);
 +}
 +
 +#ifdef CONFIG_IWL5000
 +static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
 +                                   struct ieee80211_channel *channel,
 +                                   enum nl80211_channel_type channel_type,
 +                                   int duration)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      int err = 0;
 +
 +      if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
 +              return -EOPNOTSUPP;
 +
 +      if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
 +                                      BIT(NL80211_IFTYPE_P2P_CLIENT)))
 +              return -EOPNOTSUPP;
 +
 +      mutex_lock(&priv->mutex);
 +
 +      if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
 +          test_bit(STATUS_SCAN_HW, &priv->status)) {
 +              err = -EBUSY;
 +              goto out;
 +      }
 +
 +      priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
 +      priv->_agn.hw_roc_channel = channel;
 +      priv->_agn.hw_roc_chantype = channel_type;
 +      priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
 +      iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
 +      queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
 +                         msecs_to_jiffies(duration + 20));
 +
 +      msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
 +      ieee80211_ready_on_channel(priv->hw);
 +
 + out:
 +      mutex_unlock(&priv->mutex);
 +
 +      return err;
 +}
 +
 +static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +
 +      if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
 +              return -EOPNOTSUPP;
 +
 +      cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
 +
 +      mutex_lock(&priv->mutex);
 +      iwlagn_disable_roc(priv);
 +      mutex_unlock(&priv->mutex);
 +
 +      return 0;
 +}
 +#endif
 +
  /*****************************************************************************
   *
   * driver setup and teardown
@@@ -3866,7 -3730,6 +3872,7 @@@ static void iwl_setup_deferred_work(str
        INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
        INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
        INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
 +      INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
  
        iwl_setup_scan_deferred_work(priv);
  
@@@ -4035,8 -3898,6 +4041,8 @@@ struct ieee80211_ops iwlagn_hw_ops = 
        .channel_switch = iwlagn_mac_channel_switch,
        .flush = iwlagn_mac_flush,
        .tx_last_beacon = iwl_mac_tx_last_beacon,
 +      .remain_on_channel = iwl_mac_remain_on_channel,
 +      .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
  };
  #endif
  
@@@ -4164,10 -4025,6 +4170,10 @@@ static int iwl_pci_probe(struct pci_de
        priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
        priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
                BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
 +#ifdef CONFIG_IWL_P2P
 +      priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
 +              BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
 +#endif
        priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
        priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
        priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@@ -4415,9 -4272,6 +4421,9 @@@ static void __devexit iwl_pci_remove(st
         * we need to set STATUS_EXIT_PENDING bit.
         */
        set_bit(STATUS_EXIT_PENDING, &priv->status);
 +
 +      iwl_leds_exit(priv);
 +
        if (priv->mac80211_registered) {
                ieee80211_unregister_hw(priv->hw);
                priv->mac80211_registered = 0;
@@@ -4638,49 -4492,6 +4644,49 @@@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_c
        {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
        {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
  
 +/* 2x00 Series */
 +      {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
 +
 +/* 2x30 Series */
 +      {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
 +
 +/* 6x35 Series */
 +      {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
 +      {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
 +      {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
 +      {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
 +
 +/* 200 Series */
 +      {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
 +
 +/* 230 Series */
 +      {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
 +      {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
 +      {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
 +
  #endif /* CONFIG_IWL5000 */
  
        {0}
@@@ -502,7 -502,6 +502,7 @@@ static void wl1251_op_stop(struct ieee8
        wl->psm = 0;
        wl->tx_queue_stopped = false;
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
 +      wl->rssi_thold = 0;
        wl->channel = WL1251_DEFAULT_CHANNEL;
  
        wl1251_debugfs_reset(wl);
@@@ -960,16 -959,6 +960,16 @@@ static void wl1251_op_bss_info_changed(
        if (ret < 0)
                goto out;
  
 +      if (changed & BSS_CHANGED_CQM) {
 +              ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
 +                                        WL1251_DEFAULT_LOW_RSSI_WEIGHT,
 +                                        WL1251_DEFAULT_LOW_RSSI_DEPTH,
 +                                        WL1251_ACX_LOW_RSSI_TYPE_EDGE);
 +              if (ret < 0)
 +                      goto out;
 +              wl->rssi_thold = bss_conf->cqm_rssi_thold;
 +      }
 +
        if (changed & BSS_CHANGED_BSSID) {
                memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
  
  
        if (changed & BSS_CHANGED_BEACON) {
                beacon = ieee80211_beacon_get(hw, vif);
+               if (!beacon)
+                       goto out_sleep;
                ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
                                              beacon->len);
  
@@@ -1321,11 -1313,9 +1324,11 @@@ int wl1251_init_ieee80211(struct wl125
        wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
                IEEE80211_HW_SUPPORTS_PS |
                IEEE80211_HW_BEACON_FILTER |
 -              IEEE80211_HW_SUPPORTS_UAPSD;
 +              IEEE80211_HW_SUPPORTS_UAPSD |
 +              IEEE80211_HW_SUPPORTS_CQM_RSSI;
  
 -      wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
 +      wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 +                                       BIT(NL80211_IFTYPE_ADHOC);
        wl->hw->wiphy->max_scan_ssids = 1;
        wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
  
@@@ -1387,7 -1377,6 +1390,7 @@@ struct ieee80211_hw *wl1251_alloc_hw(vo
        wl->psm_requested = false;
        wl->tx_queue_stopped = false;
        wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
 +      wl->rssi_thold = 0;
        wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
        wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
        wl->vif = NULL;
diff --combined net/batman-adv/unicast.c
@@@ -1,5 -1,5 +1,5 @@@
  /*
 - * Copyright (C) 2010 B.A.T.M.A.N. contributors:
 + * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
   *
   * Andreas Langer
   *
@@@ -50,12 -50,12 +50,12 @@@ static struct sk_buff *frag_merge_packe
                skb = tfp->skb;
        }
  
+       if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
+               goto err;
        skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
-       if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
-               /* free buffered skb, skb will be freed later */
-               kfree_skb(tfp->skb);
-               return NULL;
-       }
+       if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
+               goto err;
  
        /* move free entry to end */
        tfp->skb = NULL;
        unicast_packet->packet_type = BAT_UNICAST;
  
        return skb;
+ err:
+       /* free buffered skb, skb will be freed later */
+       kfree_skb(tfp->skb);
+       return NULL;
  }
  
  static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
@@@ -224,8 -229,7 +229,8 @@@ int frag_send_skb(struct sk_buff *skb, 
        struct unicast_frag_packet *frag1, *frag2;
        int uc_hdr_len = sizeof(struct unicast_packet);
        int ucf_hdr_len = sizeof(struct unicast_frag_packet);
 -      int data_len = skb->len;
 +      int data_len = skb->len - uc_hdr_len;
 +      int large_tail = 0;
  
        if (!bat_priv->primary_if)
                goto dropped;
        frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
        if (!frag_skb)
                goto dropped;
 +      skb_reserve(frag_skb, ucf_hdr_len);
  
        unicast_packet = (struct unicast_packet *) skb->data;
        memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
 -      skb_split(skb, frag_skb, data_len / 2);
 +      skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
  
        if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
            my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
        memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
        memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
  
 -      frag1->flags |= UNI_FRAG_HEAD;
 -      frag2->flags &= ~UNI_FRAG_HEAD;
 +      if (data_len & 1)
 +              large_tail = UNI_FRAG_LARGETAIL;
 +
 +      frag1->flags = UNI_FRAG_HEAD | large_tail;
 +      frag2->flags = large_tail;
  
        frag1->seqno = htons((uint16_t)atomic_inc_return(
                             &batman_if->frag_seqno));
diff --combined net/core/dev.c
  #include <trace/events/skb.h>
  #include <linux/pci.h>
  #include <linux/inetdevice.h>
 +#include <linux/cpu_rmap.h>
  
  #include "net-sysfs.h"
  
@@@ -1287,7 -1286,7 +1287,7 @@@ static int __dev_close(struct net_devic
        return __dev_close_many(&single);
  }
  
 -int dev_close_many(struct list_head *head)
 +static int dev_close_many(struct list_head *head)
  {
        struct net_device *dev, *tmp;
        LIST_HEAD(tmp_list);
@@@ -1595,48 -1594,6 +1595,48 @@@ static void dev_queue_xmit_nit(struct s
        rcu_read_unlock();
  }
  
 +/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
 + * @dev: Network device
 + * @txq: number of queues available
 + *
 + * If real_num_tx_queues is changed the tc mappings may no longer be
 + * valid. To resolve this verify the tc mapping remains valid and if
 + * not NULL the mapping. With no priorities mapping to this
 + * offset/count pair it will no longer be used. In the worst case TC0
 + * is invalid nothing can be done so disable priority mappings. If is
 + * expected that drivers will fix this mapping if they can before
 + * calling netif_set_real_num_tx_queues.
 + */
 +static void netif_setup_tc(struct net_device *dev, unsigned int txq)
 +{
 +      int i;
 +      struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
 +
 +      /* If TC0 is invalidated disable TC mapping */
 +      if (tc->offset + tc->count > txq) {
 +              pr_warning("Number of in use tx queues changed "
 +                         "invalidating tc mappings. Priority "
 +                         "traffic classification disabled!\n");
 +              dev->num_tc = 0;
 +              return;
 +      }
 +
 +      /* Invalidated prio to tc mappings set to TC0 */
 +      for (i = 1; i < TC_BITMASK + 1; i++) {
 +              int q = netdev_get_prio_tc_map(dev, i);
 +
 +              tc = &dev->tc_to_txq[q];
 +              if (tc->offset + tc->count > txq) {
 +                      pr_warning("Number of in use tx queues "
 +                                 "changed. Priority %i to tc "
 +                                 "mapping %i is no longer valid "
 +                                 "setting map to 0\n",
 +                                 i, q);
 +                      netdev_set_prio_tc_map(dev, i, 0);
 +              }
 +      }
 +}
 +
  /*
   * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
   * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@@ -1656,9 -1613,6 +1656,9 @@@ int netif_set_real_num_tx_queues(struc
                if (rc)
                        return rc;
  
 +              if (dev->num_tc)
 +                      netif_setup_tc(dev, txq);
 +
                if (txq < dev->real_num_tx_queues)
                        qdisc_reset_all_tx_gt(dev, txq);
        }
@@@ -1858,7 -1812,7 +1858,7 @@@ EXPORT_SYMBOL(skb_checksum_help)
   *    It may return NULL if the skb requires no segmentation.  This is
   *    only possible when GSO is used for verifying header integrity.
   */
 -struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
 +struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
  {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
@@@ -2046,7 -2000,7 +2046,7 @@@ static bool can_checksum_protocol(unsig
                 protocol == htons(ETH_P_FCOE)));
  }
  
 -static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
 +static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
  {
        if (!can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
        return features;
  }
  
 -int netif_skb_features(struct sk_buff *skb)
 +u32 netif_skb_features(struct sk_buff *skb)
  {
        __be16 protocol = skb->protocol;
 -      int features = skb->dev->features;
 +      u32 features = skb->dev->features;
  
        if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@@ -2106,7 -2060,7 +2106,7 @@@ int dev_hard_start_xmit(struct sk_buff 
        int rc = NETDEV_TX_OK;
  
        if (likely(!skb->next)) {
 -              int features;
 +              u32 features;
  
                /*
                 * If device doesnt need skb->dst, release it right now while
@@@ -2208,8 -2162,6 +2208,8 @@@ u16 __skb_tx_hash(const struct net_devi
                  unsigned int num_tx_queues)
  {
        u32 hash;
 +      u16 qoffset = 0;
 +      u16 qcount = num_tx_queues;
  
        if (skb_rx_queue_recorded(skb)) {
                hash = skb_get_rx_queue(skb);
                return hash;
        }
  
 +      if (dev->num_tc) {
 +              u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
 +              qoffset = dev->tc_to_txq[tc].offset;
 +              qcount = dev->tc_to_txq[tc].count;
 +      }
 +
        if (skb->sk && skb->sk->sk_hash)
                hash = skb->sk->sk_hash;
        else
                hash = (__force u16) skb->protocol ^ skb->rxhash;
        hash = jhash_1word(hash, hashrnd);
  
 -      return (u16) (((u64) hash * num_tx_queues) >> 32);
 +      return (u16) (((u64) hash * qcount) >> 32) + qoffset;
  }
  EXPORT_SYMBOL(__skb_tx_hash);
  
@@@ -2327,18 -2273,15 +2327,18 @@@ static inline int __dev_xmit_skb(struc
                                 struct netdev_queue *txq)
  {
        spinlock_t *root_lock = qdisc_lock(q);
 -      bool contended = qdisc_is_running(q);
 +      bool contended;
        int rc;
  
 +      qdisc_skb_cb(skb)->pkt_len = skb->len;
 +      qdisc_calculate_pkt_len(skb, q);
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
         * This permits __QDISC_STATE_RUNNING owner to get the lock more often
         * and dequeue packets faster.
         */
 +      contended = qdisc_is_running(q);
        if (unlikely(contended))
                spin_lock(&q->busylock);
  
                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
                        skb_dst_force(skb);
  
 -              qdisc_skb_cb(skb)->pkt_len = skb->len;
                qdisc_bstats_update(q, skb);
  
                if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
                rc = NET_XMIT_SUCCESS;
        } else {
                skb_dst_force(skb);
 -              rc = qdisc_enqueue_root(skb, q);
 +              rc = q->enqueue(skb, q) & NET_XMIT_MASK;
                if (qdisc_run_begin(q)) {
                        if (unlikely(contended)) {
                                spin_unlock(&q->busylock);
@@@ -2589,53 -2533,6 +2589,53 @@@ EXPORT_SYMBOL(__skb_get_rxhash)
  struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
  EXPORT_SYMBOL(rps_sock_flow_table);
  
 +static struct rps_dev_flow *
 +set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
 +          struct rps_dev_flow *rflow, u16 next_cpu)
 +{
 +      u16 tcpu;
 +
 +      tcpu = rflow->cpu = next_cpu;
 +      if (tcpu != RPS_NO_CPU) {
 +#ifdef CONFIG_RFS_ACCEL
 +              struct netdev_rx_queue *rxqueue;
 +              struct rps_dev_flow_table *flow_table;
 +              struct rps_dev_flow *old_rflow;
 +              u32 flow_id;
 +              u16 rxq_index;
 +              int rc;
 +
 +              /* Should we steer this flow to a different hardware queue? */
 +              if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap)
 +                      goto out;
 +              rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
 +              if (rxq_index == skb_get_rx_queue(skb))
 +                      goto out;
 +
 +              rxqueue = dev->_rx + rxq_index;
 +              flow_table = rcu_dereference(rxqueue->rps_flow_table);
 +              if (!flow_table)
 +                      goto out;
 +              flow_id = skb->rxhash & flow_table->mask;
 +              rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
 +                                                      rxq_index, flow_id);
 +              if (rc < 0)
 +                      goto out;
 +              old_rflow = rflow;
 +              rflow = &flow_table->flows[flow_id];
 +              rflow->cpu = next_cpu;
 +              rflow->filter = rc;
 +              if (old_rflow->filter == rflow->filter)
 +                      old_rflow->filter = RPS_NO_FILTER;
 +      out:
 +#endif
 +              rflow->last_qtail =
 +                      per_cpu(softnet_data, tcpu).input_queue_head;
 +      }
 +
 +      return rflow;
 +}
 +
  /*
   * get_rps_cpu is called from netif_receive_skb and returns the target
   * CPU from the RPS map of the receiving queue for a given skb.
@@@ -2707,9 -2604,12 +2707,9 @@@ static int get_rps_cpu(struct net_devic
                if (unlikely(tcpu != next_cpu) &&
                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
 -                    rflow->last_qtail)) >= 0)) {
 -                      tcpu = rflow->cpu = next_cpu;
 -                      if (tcpu != RPS_NO_CPU)
 -                              rflow->last_qtail = per_cpu(softnet_data,
 -                                  tcpu).input_queue_head;
 -              }
 +                    rflow->last_qtail)) >= 0))
 +                      rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
 +
                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
                        *rflowp = rflow;
                        cpu = tcpu;
@@@ -2730,46 -2630,6 +2730,46 @@@ done
        return cpu;
  }
  
 +#ifdef CONFIG_RFS_ACCEL
 +
 +/**
 + * rps_may_expire_flow - check whether an RFS hardware filter may be removed
 + * @dev: Device on which the filter was set
 + * @rxq_index: RX queue index
 + * @flow_id: Flow ID passed to ndo_rx_flow_steer()
 + * @filter_id: Filter ID returned by ndo_rx_flow_steer()
 + *
 + * Drivers that implement ndo_rx_flow_steer() should periodically call
 + * this function for each installed filter and remove the filters for
 + * which it returns %true.
 + */
 +bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
 +                       u32 flow_id, u16 filter_id)
 +{
 +      struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
 +      struct rps_dev_flow_table *flow_table;
 +      struct rps_dev_flow *rflow;
 +      bool expire = true;
 +      int cpu;
 +
 +      rcu_read_lock();
 +      flow_table = rcu_dereference(rxqueue->rps_flow_table);
 +      if (flow_table && flow_id <= flow_table->mask) {
 +              rflow = &flow_table->flows[flow_id];
 +              cpu = ACCESS_ONCE(rflow->cpu);
 +              if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
 +                  ((int)(per_cpu(softnet_data, cpu).input_queue_head -
 +                         rflow->last_qtail) <
 +                   (int)(10 * flow_table->mask)))
 +                      expire = false;
 +      }
 +      rcu_read_unlock();
 +      return expire;
 +}
 +EXPORT_SYMBOL(rps_may_expire_flow);
 +
 +#endif /* CONFIG_RFS_ACCEL */
 +
  /* Called from hardirq (IPI) context */
  static void rps_trigger_softirq(void *data)
  {
@@@ -4054,15 -3914,12 +4054,15 @@@ void *dev_seq_start(struct seq_file *se
  
  void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  {
 -      struct net_device *dev = (v == SEQ_START_TOKEN) ?
 -                                first_net_device(seq_file_net(seq)) :
 -                                next_net_device((struct net_device *)v);
 +      struct net_device *dev = v;
 +
 +      if (v == SEQ_START_TOKEN)
 +              dev = first_net_device_rcu(seq_file_net(seq));
 +      else
 +              dev = next_net_device_rcu(dev);
  
        ++*pos;
 -      return rcu_dereference(dev);
 +      return dev;
  }
  
  void dev_seq_stop(struct seq_file *seq, void *v)
@@@ -4719,17 -4576,6 +4719,17 @@@ int dev_set_mtu(struct net_device *dev
  EXPORT_SYMBOL(dev_set_mtu);
  
  /**
 + *    dev_set_group - Change group this device belongs to
 + *    @dev: device
 + *    @new_group: group this device should belong to
 + */
 +void dev_set_group(struct net_device *dev, int new_group)
 +{
 +      dev->group = new_group;
 +}
 +EXPORT_SYMBOL(dev_set_group);
 +
 +/**
   *    dev_set_mac_address - Change Media Access Control Address
   *    @dev: device
   *    @sa: new address
@@@ -5219,49 -5065,41 +5219,49 @@@ static void rollback_registered(struct 
        rollback_registered_many(&single);
  }
  
 -unsigned long netdev_fix_features(unsigned long features, const char *name)
 +u32 netdev_fix_features(struct net_device *dev, u32 features)
  {
 +      /* Fix illegal checksum combinations */
 +      if ((features & NETIF_F_HW_CSUM) &&
 +          (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 +              netdev_info(dev, "mixed HW and IP checksum settings.\n");
 +              features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 +      }
 +
 +      if ((features & NETIF_F_NO_CSUM) &&
 +          (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 +              netdev_info(dev, "mixed no checksumming and other settings.\n");
 +              features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
 +      }
 +
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
 -              if (name)
 -                      printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
 -                             "checksum feature.\n", name);
 +              netdev_info(dev,
 +                          "Dropping NETIF_F_SG since no checksum feature.\n");
                features &= ~NETIF_F_SG;
        }
  
        /* TSO requires that SG is present as well. */
        if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
 -              if (name)
 -                      printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
 -                             "SG feature.\n", name);
 +              netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
                features &= ~NETIF_F_TSO;
        }
  
 +      /* UFO needs SG and checksumming */
        if (features & NETIF_F_UFO) {
                /* maybe split UFO into V4 and V6? */
                if (!((features & NETIF_F_GEN_CSUM) ||
                    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
                            == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 -                      if (name)
 -                              printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
 -                                     "since no checksum offload features.\n",
 -                                     name);
 +                      netdev_info(dev,
 +                              "Dropping NETIF_F_UFO since no checksum offload features.\n");
                        features &= ~NETIF_F_UFO;
                }
  
                if (!(features & NETIF_F_SG)) {
 -                      if (name)
 -                              printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
 -                                     "since no NETIF_F_SG feature.\n", name);
 +                      netdev_info(dev,
 +                              "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
                        features &= ~NETIF_F_UFO;
                }
        }
@@@ -5404,7 -5242,22 +5404,7 @@@ int register_netdevice(struct net_devic
        if (dev->iflink == -1)
                dev->iflink = dev->ifindex;
  
 -      /* Fix illegal checksum combinations */
 -      if ((dev->features & NETIF_F_HW_CSUM) &&
 -          (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 -              printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
 -                     dev->name);
 -              dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
 -      }
 -
 -      if ((dev->features & NETIF_F_NO_CSUM) &&
 -          (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 -              printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
 -                     dev->name);
 -              dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
 -      }
 -
 -      dev->features = netdev_fix_features(dev->features, dev->name);
 +      dev->features = netdev_fix_features(dev, dev->features);
  
        /* Enable software GSO if SG is supported. */
        if (dev->features & NETIF_F_SG)
@@@ -5807,31 -5660,35 +5807,36 @@@ struct net_device *alloc_netdev_mqs(in
  
        dev_net_set(dev, &init_net);
  
+       dev->gso_max_size = GSO_MAX_SIZE;
+       INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
+       dev->ethtool_ntuple_list.count = 0;
+       INIT_LIST_HEAD(&dev->napi_list);
+       INIT_LIST_HEAD(&dev->unreg_list);
+       INIT_LIST_HEAD(&dev->link_watch_list);
+       dev->priv_flags = IFF_XMIT_DST_RELEASE;
+       setup(dev);
        dev->num_tx_queues = txqs;
        dev->real_num_tx_queues = txqs;
        if (netif_alloc_netdev_queues(dev))
-               goto free_pcpu;
+               goto free_all;
  
  #ifdef CONFIG_RPS
        dev->num_rx_queues = rxqs;
        dev->real_num_rx_queues = rxqs;
        if (netif_alloc_rx_queues(dev))
-               goto free_pcpu;
+               goto free_all;
  #endif
  
-       dev->gso_max_size = GSO_MAX_SIZE;
-       INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
-       dev->ethtool_ntuple_list.count = 0;
-       INIT_LIST_HEAD(&dev->napi_list);
-       INIT_LIST_HEAD(&dev->unreg_list);
-       INIT_LIST_HEAD(&dev->link_watch_list);
-       dev->priv_flags = IFF_XMIT_DST_RELEASE;
-       setup(dev);
        strcpy(dev->name, name);
 +      dev->group = INIT_NETDEV_GROUP;
        return dev;
  
+ free_all:
+       free_netdev(dev);
+       return NULL;
  free_pcpu:
        free_percpu(dev->pcpu_refcnt);
        kfree(dev->_tx);
@@@ -6140,7 -5997,8 +6145,7 @@@ static int dev_cpu_callback(struct noti
   *    @one to the master device with current feature set @all.  Will not
   *    enable anything that is off in @mask. Returns the new feature set.
   */
 -unsigned long netdev_increment_features(unsigned long all, unsigned long one,
 -                                      unsigned long mask)
 +u32 netdev_increment_features(u32 all, u32 one, u32 mask)
  {
        /* If device needs checksumming, downgrade to it. */
        if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
diff --combined net/mac80211/cfg.c
@@@ -1215,9 -1215,6 +1215,9 @@@ static int ieee80211_set_channel(struc
  {
        struct ieee80211_local *local = wiphy_priv(wiphy);
        struct ieee80211_sub_if_data *sdata = NULL;
 +      struct ieee80211_channel *old_oper;
 +      enum nl80211_channel_type old_oper_type;
 +      enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
  
        if (netdev)
                sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
                break;
        }
  
 -      local->oper_channel = chan;
 +      if (sdata)
 +              old_vif_oper_type = sdata->vif.bss_conf.channel_type;
 +      old_oper_type = local->_oper_channel_type;
  
        if (!ieee80211_set_channel_type(local, sdata, channel_type))
                return -EBUSY;
  
 -      ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 -      if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
 +      old_oper = local->oper_channel;
 +      local->oper_channel = chan;
 +
 +      /* Update driver if changes were actually made. */
 +      if ((old_oper != local->oper_channel) ||
 +          (old_oper_type != local->_oper_channel_type))
 +              ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
 +
 +      if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
 +          old_vif_oper_type != sdata->vif.bss_conf.channel_type)
                ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
  
        return 0;
@@@ -1287,11 -1274,8 +1287,11 @@@ static int ieee80211_scan(struct wiphy 
        case NL80211_IFTYPE_P2P_GO:
                if (sdata->local->ops->hw_scan)
                        break;
 -              /* FIXME: implement NoA while scanning in software */
 -              return -EOPNOTSUPP;
 +              /*
 +               * FIXME: implement NoA while scanning in software,
 +               * for now fall through to allow scanning only when
 +               * beaconing hasn't been configured yet
 +               */
        case NL80211_IFTYPE_AP:
                if (sdata->u.ap.beacon)
                        return -EOPNOTSUPP;
@@@ -1838,6 -1822,7 +1838,7 @@@ static int ieee80211_mgmt_tx(struct wip
                *cookie ^= 2;
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
                local->hw_roc_skb = skb;
+               local->hw_roc_skb_for_status = skb;
                mutex_unlock(&local->mtx);
  
                return 0;
@@@ -1891,6 -1876,7 +1892,7 @@@ static int ieee80211_mgmt_tx_cancel_wai
                if (ret == 0) {
                        kfree_skb(local->hw_roc_skb);
                        local->hw_roc_skb = NULL;
+                       local->hw_roc_skb_for_status = NULL;
                }
  
                mutex_unlock(&local->mtx);
@@@ -225,7 -225,6 +225,7 @@@ struct ieee80211_if_ap 
        struct sk_buff_head ps_bc_buf;
        atomic_t num_sta_ps; /* number of stations in PS mode */
        int dtim_count;
 +      bool dtim_bc_mc;
  };
  
  struct ieee80211_if_wds {
@@@ -655,6 -654,8 +655,6 @@@ struct tpt_led_trigger 
   *    well be on the operating channel
   * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
   *    determine if we are on the operating channel or not
 - * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
 - *    gets only set in conjunction with SCAN_SW_SCANNING
   * @SCAN_COMPLETED: Set for our scan work function when the driver reported
   *    that the scan completed.
   * @SCAN_ABORTED: Set for our scan work function when the driver reported
  enum {
        SCAN_SW_SCANNING,
        SCAN_HW_SCANNING,
 -      SCAN_OFF_CHANNEL,
        SCAN_COMPLETED,
        SCAN_ABORTED,
  };
@@@ -951,7 -953,7 +951,7 @@@ struct ieee80211_local 
  
        struct ieee80211_channel *hw_roc_channel;
        struct net_device *hw_roc_dev;
-       struct sk_buff *hw_roc_skb;
+       struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
        struct work_struct hw_roc_start, hw_roc_done;
        enum nl80211_channel_type hw_roc_channel_type;
        unsigned int hw_roc_duration;
@@@ -1145,14 -1147,10 +1145,14 @@@ void ieee80211_rx_bss_put(struct ieee80
                          struct ieee80211_bss *bss);
  
  /* off-channel helpers */
 -void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
 -void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
 +bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
 +void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
 +                                      bool tell_ap);
 +void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
 +                                  bool offchannel_ps_enable);
  void ieee80211_offchannel_return(struct ieee80211_local *local,
 -                               bool enable_beaconing);
 +                               bool enable_beaconing,
 +                               bool offchannel_ps_disable);
  void ieee80211_hw_roc_setup(struct ieee80211_local *local);
  
  /* interface handling */
diff --combined net/mac80211/status.c
@@@ -98,10 -98,6 +98,10 @@@ static void ieee80211_handle_filtered_f
         *  (b) always process RX events before TX status events if ordering
         *      can be unknown, for example with different interrupt status
         *      bits.
 +       *  (c) if PS mode transitions are manual (i.e. the flag
 +       *      %IEEE80211_HW_AP_LINK_PS is set), always process PS state
 +       *      changes before calling TX status events if ordering can be
 +       *      unknown.
         */
        if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
            skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
@@@ -327,6 -323,7 +327,7 @@@ void ieee80211_tx_status(struct ieee802
  
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                struct ieee80211_work *wk;
+               u64 cookie = (unsigned long)skb;
  
                rcu_read_lock();
                list_for_each_entry_rcu(wk, &local->work_list, list) {
                        break;
                }
                rcu_read_unlock();
+               if (local->hw_roc_skb_for_status == skb) {
+                       cookie = local->hw_roc_cookie ^ 2;
+                       local->hw_roc_skb_for_status = NULL;
+               }
                cfg80211_mgmt_tx_status(
-                       skb->dev, (unsigned long) skb, skb->data, skb->len,
+                       skb->dev, cookie, skb->data, skb->len,
                        !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
        }
  
diff --combined net/mac80211/tx.c
@@@ -257,8 -257,7 +257,8 @@@ ieee80211_tx_h_check_assoc(struct ieee8
        if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
                return TX_CONTINUE;
  
 -      if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
 +      if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
 +          test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
            !ieee80211_is_probe_req(hdr->frame_control) &&
            !ieee80211_is_nullfunc(hdr->frame_control))
                /*
@@@ -1395,8 -1394,7 +1395,8 @@@ static int invoke_tx_handlers(struct ie
        /* handlers after fragment must be aware of tx info fragmentation! */
        CALL_TXH(ieee80211_tx_h_stats);
        CALL_TXH(ieee80211_tx_h_encrypt);
 -      CALL_TXH(ieee80211_tx_h_calculate_duration);
 +      if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
 +              CALL_TXH(ieee80211_tx_h_calculate_duration);
  #undef CALL_TXH
  
   txh_done:
@@@ -1549,7 -1547,7 +1549,7 @@@ static int ieee80211_skb_resize(struct 
                skb_orphan(skb);
        }
  
-       if (skb_header_cloned(skb))
+       if (skb_cloned(skb))
                I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
        else if (head_need || tail_need)
                I802_DEBUG_INC(local->tx_expand_skb_head);
@@@ -1752,7 -1750,7 +1752,7 @@@ netdev_tx_t ieee80211_subif_start_xmit(
        __le16 fc;
        struct ieee80211_hdr hdr;
        struct ieee80211s_hdr mesh_hdr __maybe_unused;
 -      struct mesh_path *mppath = NULL;
 +      struct mesh_path __maybe_unused *mppath = NULL;
        const u8 *encaps_data;
        int encaps_len, skip_header_bytes;
        int nh_pos, h_pos;
                        mppath = mpp_path_lookup(skb->data, sdata);
  
                /*
 -               * Do not use address extension, if it is a packet from
 -               * the same interface and the destination is not being
 -               * proxied by any other mest point.
 +               * Use address extension if it is a packet from
 +               * another interface or if we know the destination
 +               * is being proxied by a portal (i.e. portal address
 +               * differs from proxied address)
                 */
                if (compare_ether_addr(sdata->vif.addr,
                                       skb->data + ETH_ALEN) == 0 &&
 -                  (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) {
 +                  !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
                        hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
                                        skb->data, skb->data + ETH_ALEN);
                        meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
                                        sdata, NULL, NULL);
                } else {
 -                      /* packet from other interface */
                        int is_mesh_mcast = 1;
                        const u8 *mesh_da;
  
@@@ -2180,8 -2178,6 +2180,8 @@@ static void ieee80211_beacon_add_tim(st
        if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
                aid0 = 1;
  
 +      bss->dtim_bc_mc = aid0 == 1;
 +
        if (have_bits) {
                /* Find largest even number N1 so that bits numbered 1 through
                 * (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
@@@ -2245,7 -2241,7 +2245,7 @@@ struct sk_buff *ieee80211_beacon_get_ti
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                ap = &sdata->u.ap;
                beacon = rcu_dereference(ap->beacon);
 -              if (ap && beacon) {
 +              if (beacon) {
                        /*
                         * headroom, head length,
                         * tail length and maximum TIM length
                struct ieee80211_mgmt *mgmt;
                u8 *pos;
  
 +#ifdef CONFIG_MAC80211_MESH
 +              if (!sdata->u.mesh.mesh_id_len)
 +                      goto out;
 +#endif
 +
                /* headroom, head length, tail length and maximum TIM length */
                skb = dev_alloc_skb(local->tx_headroom + 400 +
                                sdata->u.mesh.vendor_ie_len);
@@@ -2552,7 -2543,7 +2552,7 @@@ ieee80211_get_buffered_bc(struct ieee80
        if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
                goto out;
  
 -      if (bss->dtim_count != 0)
 +      if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
                goto out; /* send buffered bc/mc only after DTIM beacon */
  
        while (1) {