ath9k: Add support for multiple secondary virtual wiphys
[linux-2.6.git] / drivers / net / wireless / ath9k / xmit.c
index 8b332e1..3c48fa5 100644 (file)
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-/*
- * Implementation of transmit path.
- */
-
-#include "core.h"
+#include "ath9k.h"
 
 #define BITS_PER_BYTE           8
 #define OFDM_PLCP_BITS          22
@@ -59,512 +55,45 @@ static u32 bits_per_symbol[][2] = {
 
 #define IS_HT_RATE(_rate)     ((_rate) & 0x80)
 
-/*
- * Insert a chain of ath_buf (descriptors) on a multicast txq
- * but do NOT start tx DMA on this queue.
- * NB: must be called with txq lock held
- */
-
-static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
-                               struct ath_txq *txq,
-                               struct list_head *head)
-{
-       struct ath_hal *ah = sc->sc_ah;
-       struct ath_buf *bf;
-
-       if (list_empty(head))
-               return;
-
-       /*
-        * Insert the frame on the outbound list and
-        * pass it on to the hardware.
-        */
-       bf = list_first_entry(head, struct ath_buf, list);
-
-       /*
-        * The CAB queue is started from the SWBA handler since
-        * frames only go out on DTIM and to avoid possible races.
-        */
-       ath9k_hw_set_interrupts(ah, 0);
-
-       /*
-        * If there is anything in the mcastq, we want to set
-        * the "more data" bit in the last item in the queue to
-        * indicate that there is "more data". It makes sense to add
-        * it here since you are *always* going to have
-        * more data when adding to this queue, no matter where
-        * you call from.
-        */
-
-       if (txq->axq_depth) {
-               struct ath_buf *lbf;
-               struct ieee80211_hdr *hdr;
-
-               /*
-                * Add the "more data flag" to the last frame
-                */
-
-               lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
-               hdr = (struct ieee80211_hdr *)
-                       ((struct sk_buff *)(lbf->bf_mpdu))->data;
-               hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-       }
-
-       /*
-        * Now, concat the frame onto the queue
-        */
-       list_splice_tail_init(head, &txq->axq_q);
-       txq->axq_depth++;
-       txq->axq_totalqueued++;
-       txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
-
-       DPRINTF(sc, ATH_DBG_QUEUE,
-               "%s: txq depth = %d\n", __func__, txq->axq_depth);
-       if (txq->axq_link != NULL) {
-               *txq->axq_link = bf->bf_daddr;
-               DPRINTF(sc, ATH_DBG_XMIT,
-                       "%s: link[%u](%p)=%llx (%p)\n",
-                       __func__,
-                       txq->axq_qnum, txq->axq_link,
-                       ito64(bf->bf_daddr), bf->bf_desc);
-       }
-       txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
-       ath9k_hw_set_interrupts(ah, sc->sc_imask);
-}
-
-/*
- * Insert a chain of ath_buf (descriptors) on a txq and
- * assume the descriptors are already chained together by caller.
- * NB: must be called with txq lock held
- */
-
-static void ath_tx_txqaddbuf(struct ath_softc *sc,
-               struct ath_txq *txq, struct list_head *head)
-{
-       struct ath_hal *ah = sc->sc_ah;
-       struct ath_buf *bf;
-       /*
-        * Insert the frame on the outbound list and
-        * pass it on to the hardware.
-        */
-
-       if (list_empty(head))
-               return;
-
-       bf = list_first_entry(head, struct ath_buf, list);
-
-       list_splice_tail_init(head, &txq->axq_q);
-       txq->axq_depth++;
-       txq->axq_totalqueued++;
-       txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
-
-       DPRINTF(sc, ATH_DBG_QUEUE,
-               "%s: txq depth = %d\n", __func__, txq->axq_depth);
-
-       if (txq->axq_link == NULL) {
-               ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
-               DPRINTF(sc, ATH_DBG_XMIT,
-                       "%s: TXDP[%u] = %llx (%p)\n",
-                       __func__, txq->axq_qnum,
-                       ito64(bf->bf_daddr), bf->bf_desc);
-       } else {
-               *txq->axq_link = bf->bf_daddr;
-               DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n",
-                       __func__,
-                       txq->axq_qnum, txq->axq_link,
-                       ito64(bf->bf_daddr), bf->bf_desc);
-       }
-       txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
-       ath9k_hw_txstart(ah, txq->axq_qnum);
-}
-
-/* Get transmit rate index using rate in Kbps */
-
-static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
-{
-       int i;
-       int ndx = 0;
-
-       for (i = 0; i < rt->rateCount; i++) {
-               if (rt->info[i].rateKbps == rate) {
-                       ndx = i;
-                       break;
-               }
-       }
-
-       return ndx;
-}
+static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+                              struct ath_atx_tid *tid,
+                              struct list_head *bf_head);
+static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
+                               struct list_head *bf_q,
+                               int txok, int sendbar);
+static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
+                            struct list_head *head);
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
 
-/* Check if it's okay to send out aggregates */
+/*********************/
+/* Aggregation logic */
+/*********************/
 
-static int ath_aggr_query(struct ath_softc *sc,
-       struct ath_node *an, u8 tidno)
+static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
 {
        struct ath_atx_tid *tid;
        tid = ATH_AN_2_TID(an, tidno);
 
-       if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress)
+       if (tid->state & AGGR_ADDBA_COMPLETE ||
+           tid->state & AGGR_ADDBA_PROGRESS)
                return 1;
        else
                return 0;
 }
 
-static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
-{
-       enum ath9k_pkt_type htype;
-       __le16 fc;
-
-       fc = hdr->frame_control;
-
-       /* Calculate Atheros packet type from IEEE80211 packet header */
-
-       if (ieee80211_is_beacon(fc))
-               htype = ATH9K_PKT_TYPE_BEACON;
-       else if (ieee80211_is_probe_resp(fc))
-               htype = ATH9K_PKT_TYPE_PROBE_RESP;
-       else if (ieee80211_is_atim(fc))
-               htype = ATH9K_PKT_TYPE_ATIM;
-       else if (ieee80211_is_pspoll(fc))
-               htype = ATH9K_PKT_TYPE_PSPOLL;
-       else
-               htype = ATH9K_PKT_TYPE_NORMAL;
-
-       return htype;
-}
-
-static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl)
-{
-       struct ieee80211_hdr *hdr;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ath_tx_info_priv *tx_info_priv;
-       __le16 fc;
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-       fc = hdr->frame_control;
-       tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
-
-       if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
-               txctl->use_minrate = 1;
-               txctl->min_rate = tx_info_priv->min_rate;
-       } else if (ieee80211_is_data(fc)) {
-               if (ieee80211_is_nullfunc(fc) ||
-                       /* Port Access Entity (IEEE 802.1X) */
-                       (skb->protocol == cpu_to_be16(0x888E))) {
-                       txctl->use_minrate = 1;
-                       txctl->min_rate = tx_info_priv->min_rate;
-               }
-               if (is_multicast_ether_addr(hdr->addr1))
-                       txctl->mcast_rate = tx_info_priv->min_rate;
-       }
-
-}
-
-/* This function will setup additional txctl information, mostly rate stuff */
-/* FIXME: seqno, ps */
-static int ath_tx_prepare(struct ath_softc *sc,
-                         struct sk_buff *skb,
-                         struct ath_tx_control *txctl)
-{
-       struct ieee80211_hw *hw = sc->hw;
-       struct ieee80211_hdr *hdr;
-       struct ath_rc_series *rcs;
-       struct ath_txq *txq = NULL;
-       const struct ath9k_rate_table *rt;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ath_tx_info_priv *tx_info_priv;
-       int hdrlen;
-       u8 rix, antenna;
-       __le16 fc;
-       u8 *qc;
-
-       memset(txctl, 0, sizeof(struct ath_tx_control));
-
-       txctl->dev = sc;
-       hdr = (struct ieee80211_hdr *)skb->data;
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-       fc = hdr->frame_control;
-
-       rt = sc->sc_currates;
-       BUG_ON(!rt);
-
-       /* Fill misc fields */
-
-       spin_lock_bh(&sc->node_lock);
-       txctl->an = ath_node_get(sc, hdr->addr1);
-       /* create a temp node, if the node is not there already */
-       if (!txctl->an)
-               txctl->an = ath_node_attach(sc, hdr->addr1, 0);
-       spin_unlock_bh(&sc->node_lock);
-
-       if (ieee80211_is_data_qos(fc)) {
-               qc = ieee80211_get_qos_ctl(hdr);
-               txctl->tidno = qc[0] & 0xf;
-       }
-
-       txctl->if_id = 0;
-       txctl->nextfraglen = 0;
-       txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
-       txctl->txpower = MAX_RATE_POWER; /* FIXME */
-
-       /* Fill Key related fields */
-
-       txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
-       txctl->keyix = ATH9K_TXKEYIX_INVALID;
-
-       if (tx_info->control.hw_key) {
-               txctl->keyix = tx_info->control.hw_key->hw_key_idx;
-               txctl->frmlen += tx_info->control.icv_len;
-
-               if (tx_info->control.hw_key->alg == ALG_WEP)
-                       txctl->keytype = ATH9K_KEY_TYPE_WEP;
-               else if (tx_info->control.hw_key->alg == ALG_TKIP)
-                       txctl->keytype = ATH9K_KEY_TYPE_TKIP;
-               else if (tx_info->control.hw_key->alg == ALG_CCMP)
-                       txctl->keytype = ATH9K_KEY_TYPE_AES;
-       }
-
-       /* Fill packet type */
-
-       txctl->atype = get_hal_packet_type(hdr);
-
-       /* Fill qnum */
-
-       txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
-       txq = &sc->sc_txq[txctl->qnum];
-       spin_lock_bh(&txq->axq_lock);
-
-       /* Try to avoid running out of descriptors */
-       if (txq->axq_depth >= (ATH_TXBUF - 20)) {
-               DPRINTF(sc, ATH_DBG_FATAL,
-                       "%s: TX queue: %d is full, depth: %d\n",
-                       __func__,
-                       txctl->qnum,
-                       txq->axq_depth);
-               ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
-               txq->stopped = 1;
-               spin_unlock_bh(&txq->axq_lock);
-               return -1;
-       }
-
-       spin_unlock_bh(&txq->axq_lock);
-
-       /* Fill rate */
-
-       fill_min_rates(skb, txctl);
-
-       /* Fill flags */
-
-       txctl->flags = ATH9K_TXDESC_CLRDMASK;    /* needed for crypto errors */
-
-       if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
-               txctl->flags |= ATH9K_TXDESC_NOACK;
-       if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
-               txctl->flags |= ATH9K_TXDESC_RTSENA;
-
-       /*
-        * Setup for rate calculations.
-        */
-       tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
-       rcs = tx_info_priv->rcs;
-
-       if (ieee80211_is_data(fc) && !txctl->use_minrate) {
-
-               /* Enable HT only for DATA frames and not for EAPOL */
-               txctl->ht = (hw->conf.ht_conf.ht_supported &&
-                           (tx_info->flags & IEEE80211_TX_CTL_AMPDU));
-
-               if (is_multicast_ether_addr(hdr->addr1)) {
-                       rcs[0].rix = (u8)
-                               ath_tx_findindex(rt, txctl->mcast_rate);
-
-                       /*
-                        * mcast packets are not re-tried.
-                        */
-                       rcs[0].tries = 1;
-               }
-               /* For HT capable stations, we save tidno for later use.
-                * We also override seqno set by upper layer with the one
-                * in tx aggregation state.
-                *
-                * First, the fragmentation stat is determined.
-                * If fragmentation is on, the sequence number is
-                * not overridden, since it has been
-                * incremented by the fragmentation routine.
-                */
-               if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
-                       txctl->ht && sc->sc_txaggr) {
-                       struct ath_atx_tid *tid;
-
-                       tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
-
-                       hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
-                               IEEE80211_SEQ_SEQ_SHIFT);
-                       txctl->seqno = tid->seq_next;
-                       INCR(tid->seq_next, IEEE80211_SEQ_MAX);
-               }
-       } else {
-               /* for management and control frames,
-                * or for NULL and EAPOL frames */
-               if (txctl->min_rate)
-                       rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
-               else
-                       rcs[0].rix = 0;
-               rcs[0].tries = ATH_MGT_TXMAXTRY;
-       }
-       rix = rcs[0].rix;
-
-       /*
-        * Calculate duration.  This logically belongs in the 802.11
-        * layer but it lacks sufficient information to calculate it.
-        */
-       if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
-               u16 dur;
-               /*
-                * XXX not right with fragmentation.
-                */
-               if (sc->sc_flags & ATH_PREAMBLE_SHORT)
-                       dur = rt->info[rix].spAckDuration;
-               else
-                       dur = rt->info[rix].lpAckDuration;
-
-               if (le16_to_cpu(hdr->frame_control) &
-                               IEEE80211_FCTL_MOREFRAGS) {
-                       dur += dur;  /* Add additional 'SIFS + ACK' */
-
-                       /*
-                       ** Compute size of next fragment in order to compute
-                       ** durations needed to update NAV.
-                       ** The last fragment uses the ACK duration only.
-                       ** Add time for next fragment.
-                       */
-                       dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
-                                       txctl->nextfraglen,
-                                       rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
-               }
-
-               if (ieee80211_has_morefrags(fc) ||
-                    (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
-                       /*
-                       **  Force hardware to use computed duration for next
-                       **  fragment by disabling multi-rate retry, which
-                       **  updates duration based on the multi-rate
-                       **  duration table.
-                       */
-                       rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
-                       rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
-                       /* reset tries but keep rate index */
-                       rcs[0].tries = ATH_TXMAXTRY;
-               }
-
-               hdr->duration_id = cpu_to_le16(dur);
-       }
-
-       /*
-        * Determine if a tx interrupt should be generated for
-        * this descriptor.  We take a tx interrupt to reap
-        * descriptors when the h/w hits an EOL condition or
-        * when the descriptor is specifically marked to generate
-        * an interrupt.  We periodically mark descriptors in this
-        * way to insure timely replenishing of the supply needed
-        * for sending frames.  Defering interrupts reduces system
-        * load and potentially allows more concurrent work to be
-        * done but if done to aggressively can cause senders to
-        * backup.
-        *
-        * NB: use >= to deal with sc_txintrperiod changing
-        *     dynamically through sysctl.
-        */
-       spin_lock_bh(&txq->axq_lock);
-       if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
-               txctl->flags |= ATH9K_TXDESC_INTREQ;
-               txq->axq_intrcnt = 0;
-       }
-       spin_unlock_bh(&txq->axq_lock);
-
-       if (is_multicast_ether_addr(hdr->addr1)) {
-               antenna = sc->sc_mcastantenna + 1;
-               sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
-       } else
-               antenna = sc->sc_txantenna;
-
-#ifdef USE_LEGACY_HAL
-       txctl->antenna = antenna;
-#endif
-       return 0;
-}
-
-/* To complete a chain of buffers associated a frame */
-
-static void ath_tx_complete_buf(struct ath_softc *sc,
-                               struct ath_buf *bf,
-                               struct list_head *bf_q,
-                               int txok, int sendbar)
-{
-       struct sk_buff *skb = bf->bf_mpdu;
-       struct ath_xmit_status tx_status;
-       dma_addr_t *pa;
-
-       /*
-        * Set retry information.
-        * NB: Don't use the information in the descriptor, because the frame
-        * could be software retried.
-        */
-       tx_status.retries = bf->bf_retries;
-       tx_status.flags = 0;
-
-       if (sendbar)
-               tx_status.flags = ATH_TX_BAR;
-
-       if (!txok) {
-               tx_status.flags |= ATH_TX_ERROR;
-
-               if (bf->bf_isxretried)
-                       tx_status.flags |= ATH_TX_XRETRY;
-       }
-       /* Unmap this frame */
-       pa = get_dma_mem_context(bf, bf_dmacontext);
-       pci_unmap_single(sc->pdev,
-                        *pa,
-                        skb->len,
-                        PCI_DMA_TODEVICE);
-       /* complete this frame */
-       ath_tx_complete(sc, skb, &tx_status, bf->bf_node);
-
-       /*
-        * Return the list of ath_buf of this mpdu to free queue
-        */
-       spin_lock_bh(&sc->sc_txbuflock);
-       list_splice_tail_init(bf_q, &sc->sc_txbuf);
-       spin_unlock_bh(&sc->sc_txbuflock);
-}
-
-/*
- * queue up a dest/ac pair for tx scheduling
- * NB: must be called with txq lock held
- */
-
 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
 {
        struct ath_atx_ac *ac = tid->ac;
 
-       /*
-        * if tid is paused, hold off
-        */
        if (tid->paused)
                return;
 
-       /*
-        * add tid to ac atmost once
-        */
        if (tid->sched)
                return;
 
        tid->sched = true;
        list_add_tail(&tid->list, &ac->tid_q);
 
-       /*
-        * add node ac to txq atmost once
-        */
        if (ac->sched)
                return;
 
@@ -572,24 +101,18 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
        list_add_tail(&ac->list, &txq->axq_acq);
 }
 
-/* pause a tid */
-
 static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+       struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
 
        spin_lock_bh(&txq->axq_lock);
-
        tid->paused++;
-
        spin_unlock_bh(&txq->axq_lock);
 }
 
-/* resume a tid and schedule aggregate */
-
-void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
+       struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
 
        ASSERT(tid->paused > 0);
        spin_lock_bh(&txq->axq_lock);
@@ -602,67 +125,41 @@ void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
        if (list_empty(&tid->buf_q))
                goto unlock;
 
-       /*
-        * Add this TID to scheduler and try to send out aggregates
-        */
        ath_tx_queue_tid(txq, tid);
        ath_txq_schedule(sc, txq);
 unlock:
        spin_unlock_bh(&txq->axq_lock);
 }
 
-/* Compute the number of bad frames */
-
-static int ath_tx_num_badfrms(struct ath_softc *sc,
-       struct ath_buf *bf, int txok)
+static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_node *an = bf->bf_node;
-       int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
-       struct ath_buf *bf_last = bf->bf_lastbf;
-       struct ath_desc *ds = bf_last->bf_desc;
-       u16 seq_st = 0;
-       u32 ba[WME_BA_BMP_SIZE >> 5];
-       int ba_index;
-       int nbad = 0;
-       int isaggr = 0;
-
-       if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
-               return 0;
+       struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
+       struct ath_buf *bf;
+       struct list_head bf_head;
+       INIT_LIST_HEAD(&bf_head);
 
-       isaggr = bf->bf_isaggr;
-       if (isaggr) {
-               seq_st = ATH_DS_BA_SEQ(ds);
-               memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
-       }
+       ASSERT(tid->paused > 0);
+       spin_lock_bh(&txq->axq_lock);
 
-       while (bf) {
-               ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
-               if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
-                       nbad++;
+       tid->paused--;
 
-               bf = bf->bf_next;
+       if (tid->paused > 0) {
+               spin_unlock_bh(&txq->axq_lock);
+               return;
        }
 
-       return nbad;
-}
-
-static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
-{
-       struct sk_buff *skb;
-       struct ieee80211_hdr *hdr;
-
-       bf->bf_isretried = 1;
-       bf->bf_retries++;
+       while (!list_empty(&tid->buf_q)) {
+               bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+               ASSERT(!bf_isretried(bf));
+               list_move_tail(&bf->list, &bf_head);
+               ath_tx_send_normal(sc, txq, tid, &bf_head);
+       }
 
-       skb = bf->bf_mpdu;
-       hdr = (struct ieee80211_hdr *)skb->data;
-       hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
+       spin_unlock_bh(&txq->axq_lock);
 }
 
-/* Update block ack window */
-
-static void ath_tx_update_baw(struct ath_softc *sc,
-       struct ath_atx_tid *tid, int seqno)
+static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+                             int seqno)
 {
        int index, cindex;
 
@@ -677,381 +174,142 @@ static void ath_tx_update_baw(struct ath_softc *sc,
        }
 }
 
-/*
- * ath_pkt_dur - compute packet duration (NB: not NAV)
- *
- * rix - rate index
- * pktlen - total bytes (delims + data + fcs + pads + pad delims)
- * width  - 0 for 20 MHz, 1 for 40 MHz
- * half_gi - to use 4us v/s 3.6 us for symbol time
- */
-
-static u32 ath_pkt_duration(struct ath_softc *sc,
-                                 u8 rix,
-                                 struct ath_buf *bf,
-                                 int width,
-                                 int half_gi,
-                                 bool shortPreamble)
+static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+                            struct ath_buf *bf)
 {
-       const struct ath9k_rate_table *rt = sc->sc_currates;
-       u32 nbits, nsymbits, duration, nsymbols;
-       u8 rc;
-       int streams, pktlen;
+       int index, cindex;
 
-       pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen;
-       rc = rt->info[rix].rateCode;
+       if (bf_isretried(bf))
+               return;
 
-       /*
-        * for legacy rates, use old function to compute packet duration
-        */
-       if (!IS_HT_RATE(rc))
-               return ath9k_hw_computetxtime(sc->sc_ah,
-                                            rt,
-                                            pktlen,
-                                            rix,
-                                            shortPreamble);
-       /*
-        * find number of symbols: PLCP + data
-        */
-       nbits = (pktlen << 3) + OFDM_PLCP_BITS;
-       nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
-       nsymbols = (nbits + nsymbits - 1) / nsymbits;
+       index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
+       cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
 
-       if (!half_gi)
-               duration = SYMBOL_TIME(nsymbols);
-       else
-               duration = SYMBOL_TIME_HALFGI(nsymbols);
+       ASSERT(tid->tx_buf[cindex] == NULL);
+       tid->tx_buf[cindex] = bf;
 
-       /*
-        * addup duration for legacy/ht training and signal fields
-        */
-       streams = HT_RC_2_STREAMS(rc);
-       duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
-       return duration;
+       if (index >= ((tid->baw_tail - tid->baw_head) &
+               (ATH_TID_MAX_BUFS - 1))) {
+               tid->baw_tail = cindex;
+               INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
+       }
 }
 
-/* Rate module function to set rate related fields in tx descriptor */
+/*
+ * TODO: For frame(s) that are in the retry state, we will reuse the
+ * sequence number(s) without setting the retry bit. The
+ * alternative is to give up on these and BAR the receiver's window
+ * forward.
+ */
+static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
+                         struct ath_atx_tid *tid)
 
-static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
 {
-       struct ath_hal *ah = sc->sc_ah;
-       const struct ath9k_rate_table *rt;
-       struct ath_desc *ds = bf->bf_desc;
-       struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
-       struct ath9k_11n_rate_series series[4];
-       int i, flags, rtsctsena = 0, dynamic_mimops = 0;
-       u32 ctsduration = 0;
-       u8 rix = 0, cix, ctsrate = 0;
-       u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit;
-       struct ath_node *an = (struct ath_node *) bf->bf_node;
+       struct ath_buf *bf;
+       struct list_head bf_head;
+       INIT_LIST_HEAD(&bf_head);
 
-       /*
-        * get the cix for the lowest valid rix.
-        */
-       rt = sc->sc_currates;
-       for (i = 4; i--;) {
-               if (bf->bf_rcs[i].tries) {
-                       rix = bf->bf_rcs[i].rix;
+       for (;;) {
+               if (list_empty(&tid->buf_q))
                        break;
-               }
-       }
-       flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
-       cix = rt->info[rix].controlRate;
-
-       /*
-        * If 802.11g protection is enabled, determine whether
-        * to use RTS/CTS or just CTS.  Note that this is only
-        * done for OFDM/HT unicast frames.
-        */
-       if (sc->sc_protmode != PROT_M_NONE &&
-           (rt->info[rix].phy == PHY_OFDM ||
-            rt->info[rix].phy == PHY_HT) &&
-           (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
-               if (sc->sc_protmode == PROT_M_RTSCTS)
-                       flags = ATH9K_TXDESC_RTSENA;
-               else if (sc->sc_protmode == PROT_M_CTSONLY)
-                       flags = ATH9K_TXDESC_CTSENA;
 
-               cix = rt->info[sc->sc_protrix].controlRate;
-               rtsctsena = 1;
-       }
-
-       /* For 11n, the default behavior is to enable RTS for
-        * hw retried frames. We enable the global flag here and
-        * let rate series flags determine which rates will actually
-        * use RTS.
-        */
-       if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) {
-               BUG_ON(!an);
-               /*
-                * 802.11g protection not needed, use our default behavior
-                */
-               if (!rtsctsena)
-                       flags = ATH9K_TXDESC_RTSENA;
-               /*
-                * For dynamic MIMO PS, RTS needs to precede the first aggregate
-                * and the second aggregate should have any protection at all.
-                */
-               if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
-                       if (!bf->bf_aggrburst) {
-                               flags = ATH9K_TXDESC_RTSENA;
-                               dynamic_mimops = 1;
-                       } else {
-                               flags = 0;
-                       }
-               }
-       }
-
-       /*
-        * Set protection if aggregate protection on
-        */
-       if (sc->sc_config.ath_aggr_prot &&
-           (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) {
-               flags = ATH9K_TXDESC_RTSENA;
-               cix = rt->info[sc->sc_protrix].controlRate;
-               rtsctsena = 1;
-       }
-
-       /*
-        *  For AR5416 - RTS cannot be followed by a frame larger than 8K.
-        */
-       if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) {
-               /*
-                * Ensure that in the case of SM Dynamic power save
-                * while we are bursting the second aggregate the
-                * RTS is cleared.
-                */
-               flags &= ~(ATH9K_TXDESC_RTSENA);
-       }
-
-       /*
-        * CTS transmit rate is derived from the transmit rate
-        * by looking in the h/w rate table.  We must also factor
-        * in whether or not a short preamble is to be used.
-        */
-       /* NB: cix is set above where RTS/CTS is enabled */
-       BUG_ON(cix == 0xff);
-       ctsrate = rt->info[cix].rateCode |
-               (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0);
-
-       /*
-        * Setup HAL rate series
-        */
-       memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
-
-       for (i = 0; i < 4; i++) {
-               if (!bf->bf_rcs[i].tries)
-                       continue;
-
-               rix = bf->bf_rcs[i].rix;
-
-               series[i].Rate = rt->info[rix].rateCode |
-                       (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0);
-
-               series[i].Tries = bf->bf_rcs[i].tries;
-
-               series[i].RateFlags = (
-                       (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ?
-                               ATH9K_RATESERIES_RTS_CTS : 0) |
-                       ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ?
-                               ATH9K_RATESERIES_2040 : 0) |
-                       ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ?
-                               ATH9K_RATESERIES_HALFGI : 0);
-
-               series[i].PktDuration = ath_pkt_duration(
-                       sc, rix, bf,
-                       (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
-                       (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
-                       bf->bf_shpreamble);
-
-               if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
-                   (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
-                       /*
-                        * When sending to an HT node that has enabled static
-                        * SM/MIMO power save, send at single stream rates but
-                        * use maximum allowed transmit chains per user,
-                        * hardware, regulatory, or country limits for
-                        * better range.
-                        */
-                       series[i].ChSel = sc->sc_tx_chainmask;
-               } else {
-                       if (bf->bf_ht)
-                               series[i].ChSel =
-                                       ath_chainmask_sel_logic(sc, an);
-                       else
-                               series[i].ChSel = sc->sc_tx_chainmask;
-               }
-
-               if (rtsctsena)
-                       series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
-
-               /*
-                * Set RTS for all rates if node is in dynamic powersave
-                * mode and we are using dual stream rates.
-                */
-               if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
-                       series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
-       }
-
-       /*
-        * For non-HT devices, calculate RTS/CTS duration in software
-        * and disable multi-rate retry.
-        */
-       if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
-               /*
-                * Compute the transmit duration based on the frame
-                * size and the size of an ACK frame.  We call into the
-                * HAL to do the computation since it depends on the
-                * characteristics of the actual PHY being used.
-                *
-                * NB: CTS is assumed the same size as an ACK so we can
-                *     use the precalculated ACK durations.
-                */
-               if (flags & ATH9K_TXDESC_RTSENA) {    /* SIFS + CTS */
-                       ctsduration += bf->bf_shpreamble ?
-                               rt->info[cix].spAckDuration :
-                               rt->info[cix].lpAckDuration;
-               }
-
-               ctsduration += series[0].PktDuration;
+               bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
+               list_move_tail(&bf->list, &bf_head);
 
-               if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
-                       ctsduration += bf->bf_shpreamble ?
-                               rt->info[rix].spAckDuration :
-                               rt->info[rix].lpAckDuration;
-               }
+               if (bf_isretried(bf))
+                       ath_tx_update_baw(sc, tid, bf->bf_seqno);
 
-               /*
-                * Disable multi-rate retry when using RTS/CTS by clearing
-                * series 1, 2 and 3.
-                */
-               memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3);
+               spin_unlock(&txq->axq_lock);
+               ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
+               spin_lock(&txq->axq_lock);
        }
 
-       /*
-        * set dur_update_en for l-sig computation except for PS-Poll frames
-        */
-       ath9k_hw_set11n_ratescenario(ah, ds, lastds,
-                                   !bf->bf_ispspoll,
-                                   ctsrate,
-                                   ctsduration,
-                                   series, 4, flags);
-       if (sc->sc_config.ath_aggr_prot && flags)
-               ath9k_hw_set11n_burstduration(ah, ds, 8192);
+       tid->seq_next = tid->seq_start;
+       tid->baw_tail = tid->baw_head;
 }
 
-/*
- * Function to send a normal HT (non-AMPDU) frame
- * NB: must be called with txq lock held
- */
-
-static int ath_tx_send_normal(struct ath_softc *sc,
-                             struct ath_txq *txq,
-                             struct ath_atx_tid *tid,
-                             struct list_head *bf_head)
+static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
 {
-       struct ath_buf *bf;
        struct sk_buff *skb;
-       struct ieee80211_tx_info *tx_info;
-       struct ath_tx_info_priv *tx_info_priv;
-
-       BUG_ON(list_empty(bf_head));
-
-       bf = list_first_entry(bf_head, struct ath_buf, list);
-       bf->bf_isampdu = 0; /* regular HT frame */
-
-       skb = (struct sk_buff *)bf->bf_mpdu;
-       tx_info = IEEE80211_SKB_CB(skb);
-       tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
-       memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
-
-       /* update starting sequence number for subsequent ADDBA request */
-       INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+       struct ieee80211_hdr *hdr;
 
-       /* Queue to h/w without aggregation */
-       bf->bf_nframes = 1;
-       bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
-       ath_buf_set_rate(sc, bf);
-       ath_tx_txqaddbuf(sc, txq, bf_head);
+       bf->bf_state.bf_type |= BUF_RETRY;
+       bf->bf_retries++;
 
-       return 0;
+       skb = bf->bf_mpdu;
+       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
 }
 
-/* flush tid's software queue and send frames as non-ampdu's */
-
-static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
 {
-       struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
-       struct ath_buf *bf;
-       struct list_head bf_head;
-       INIT_LIST_HEAD(&bf_head);
+       struct ath_buf *tbf;
 
-       ASSERT(tid->paused > 0);
-       spin_lock_bh(&txq->axq_lock);
+       spin_lock_bh(&sc->tx.txbuflock);
+       ASSERT(!list_empty((&sc->tx.txbuf)));
+       tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+       list_del(&tbf->list);
+       spin_unlock_bh(&sc->tx.txbuflock);
 
-       tid->paused--;
+       ATH_TXBUF_RESET(tbf);
 
-       if (tid->paused > 0) {
-               spin_unlock_bh(&txq->axq_lock);
-               return;
-       }
+       tbf->bf_mpdu = bf->bf_mpdu;
+       tbf->bf_buf_addr = bf->bf_buf_addr;
+       *(tbf->bf_desc) = *(bf->bf_desc);
+       tbf->bf_state = bf->bf_state;
+       tbf->bf_dmacontext = bf->bf_dmacontext;
 
-       while (!list_empty(&tid->buf_q)) {
-               bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
-               ASSERT(!bf->bf_isretried);
-               list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
-               ath_tx_send_normal(sc, txq, tid, &bf_head);
-       }
-
-       spin_unlock_bh(&txq->axq_lock);
+       return tbf;
 }
 
-/* Completion routine of an aggregate */
-
-static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
-                                     struct ath_txq *txq,
-                                     struct ath_buf *bf,
-                                     struct list_head *bf_q,
-                                     int txok)
+static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
+                                struct ath_buf *bf, struct list_head *bf_q,
+                                int txok)
 {
-       struct ath_node *an = bf->bf_node;
-       struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno);
-       struct ath_buf *bf_last = bf->bf_lastbf;
+       struct ath_node *an = NULL;
+       struct sk_buff *skb;
+       struct ieee80211_sta *sta;
+       struct ieee80211_hdr *hdr;
+       struct ath_atx_tid *tid = NULL;
+       struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
        struct ath_desc *ds = bf_last->bf_desc;
-       struct ath_buf *bf_next, *bf_lastq = NULL;
        struct list_head bf_head, bf_pending;
        u16 seq_st = 0;
        u32 ba[WME_BA_BMP_SIZE >> 5];
        int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
-       int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
 
-       isaggr = bf->bf_isaggr;
-       if (isaggr) {
-               if (txok) {
-                       if (ATH_DS_TX_BA(ds)) {
-                               /*
-                                * extract starting sequence and
-                                * block-ack bitmap
-                                */
-                               seq_st = ATH_DS_BA_SEQ(ds);
-                               memcpy(ba,
-                                       ATH_DS_BA_BITMAP(ds),
-                                       WME_BA_BMP_SIZE >> 3);
-                       } else {
-                               memzero(ba, WME_BA_BMP_SIZE >> 3);
+       skb = (struct sk_buff *)bf->bf_mpdu;
+       hdr = (struct ieee80211_hdr *)skb->data;
 
-                               /*
-                                * AR5416 can become deaf/mute when BA
-                                * issue happens. Chip needs to be reset.
-                                * But AP code may have sychronization issues
-                                * when perform internal reset in this routine.
-                                * Only enable reset in STA mode for now.
-                                */
-                               if (sc->sc_opmode == ATH9K_M_STA)
-                                       needreset = 1;
-                       }
+       rcu_read_lock();
+
+       sta = ieee80211_find_sta(sc->hw, hdr->addr1);
+       if (!sta) {
+               rcu_read_unlock();
+               return;
+       }
+
+       an = (struct ath_node *)sta->drv_priv;
+       tid = ATH_AN_2_TID(an, bf->bf_tidno);
+
+       isaggr = bf_isaggr(bf);
+       memset(ba, 0, WME_BA_BMP_SIZE >> 3);
+
+       if (isaggr && txok) {
+               if (ATH_DS_TX_BA(ds)) {
+                       seq_st = ATH_DS_BA_SEQ(ds);
+                       memcpy(ba, ATH_DS_BA_BITMAP(ds),
+                              WME_BA_BMP_SIZE >> 3);
                } else {
-                       memzero(ba, WME_BA_BMP_SIZE >> 3);
+                       /*
+                        * AR5416 can become deaf/mute when BA
+                        * issue happens. Chip needs to be reset.
+                        * But AP code may have sychronization issues
+                        * when perform internal reset in this routine.
+                        * Only enable reset in STA mode for now.
+                        */
+                       if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION)
+                               needreset = 1;
                }
        }
 
@@ -1068,14 +326,13 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
                } else if (!isaggr && txok) {
                        /* transmit completion */
                } else {
-
-                       if (!tid->cleanup_inprogress && !isnodegone &&
+                       if (!(tid->state & AGGR_CLEANUP) &&
                            ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
                                if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
                                        ath_tx_set_retry(sc, bf);
                                        txpending = 1;
                                } else {
-                                       bf->bf_isxretried = 1;
+                                       bf->bf_state.bf_type |= BUF_XRETRY;
                                        txfail = 1;
                                        sendbar = 1;
                                }
@@ -1087,37 +344,12 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
                                txfail = 1;
                        }
                }
-               /*
-                * Remove ath_buf's of this sub-frame from aggregate queue.
-                */
-               if (bf_next == NULL) {  /* last subframe in the aggregate */
-                       ASSERT(bf->bf_lastfrm == bf_last);
 
-                       /*
-                        * The last descriptor of the last sub frame could be
-                        * a holding descriptor for h/w. If that's the case,
-                        * bf->bf_lastfrm won't be in the bf_q.
-                        * Make sure we handle bf_q properly here.
-                        */
-
-                       if (!list_empty(bf_q)) {
-                               bf_lastq = list_entry(bf_q->prev,
-                                       struct ath_buf, list);
-                               list_cut_position(&bf_head,
-                                       bf_q, &bf_lastq->list);
-                       } else {
-                               /*
-                                * XXX: if the last subframe only has one
-                                * descriptor which is also being used as
-                                * a holding descriptor. Then the ath_buf
-                                * is not in the bf_q at all.
-                                */
-                               INIT_LIST_HEAD(&bf_head);
-                       }
+               if (bf_next == NULL) {
+                       INIT_LIST_HEAD(&bf_head);
                } else {
                        ASSERT(!list_empty(bf_q));
-                       list_cut_position(&bf_head,
-                               bf_q, &bf->bf_lastfrm->list);
+                       list_move_tail(&bf->list, &bf_head);
                }
 
                if (!txpending) {
@@ -1129,66 +361,22 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
                        ath_tx_update_baw(sc, tid, bf->bf_seqno);
                        spin_unlock_bh(&txq->axq_lock);
 
-                       /* complete this sub-frame */
                        ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar);
                } else {
-                       /*
-                        * retry the un-acked ones
-                        */
-                       /*
-                        * XXX: if the last descriptor is holding descriptor,
-                        * in order to requeue the frame to software queue, we
-                        * need to allocate a new descriptor and
-                        * copy the content of holding descriptor to it.
-                        */
+                       /* retry the un-acked ones */
                        if (bf->bf_next == NULL &&
                            bf_last->bf_status & ATH_BUFSTATUS_STALE) {
                                struct ath_buf *tbf;
 
-                               /* allocate new descriptor */
-                               spin_lock_bh(&sc->sc_txbuflock);
-                               ASSERT(!list_empty((&sc->sc_txbuf)));
-                               tbf = list_first_entry(&sc->sc_txbuf,
-                                               struct ath_buf, list);
-                               list_del(&tbf->list);
-                               spin_unlock_bh(&sc->sc_txbuflock);
-
-                               ATH_TXBUF_RESET(tbf);
-
-                               /* copy descriptor content */
-                               tbf->bf_mpdu = bf_last->bf_mpdu;
-                               tbf->bf_node = bf_last->bf_node;
-                               tbf->bf_buf_addr = bf_last->bf_buf_addr;
-                               *(tbf->bf_desc) = *(bf_last->bf_desc);
-
-                               /* link it to the frame */
-                               if (bf_lastq) {
-                                       bf_lastq->bf_desc->ds_link =
-                                               tbf->bf_daddr;
-                                       bf->bf_lastfrm = tbf;
-                                       ath9k_hw_cleartxdesc(sc->sc_ah,
-                                               bf->bf_lastfrm->bf_desc);
-                               } else {
-                                       tbf->bf_state = bf_last->bf_state;
-                                       tbf->bf_lastfrm = tbf;
-                                       ath9k_hw_cleartxdesc(sc->sc_ah,
-                                               tbf->bf_lastfrm->bf_desc);
-
-                                       /* copy the DMA context */
-                                       copy_dma_mem_context(
-                                               get_dma_mem_context(tbf,
-                                                       bf_dmacontext),
-                                               get_dma_mem_context(bf_last,
-                                                       bf_dmacontext));
-                               }
+                               tbf = ath_clone_txbuf(sc, bf_last);
+                               ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
                                list_add_tail(&tbf->list, &bf_head);
                        } else {
                                /*
                                 * Clear descriptor status words for
                                 * software retry
                                 */
-                               ath9k_hw_cleartxdesc(sc->sc_ah,
-                                       bf->bf_lastfrm->bf_desc);
+                               ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
                        }
 
                        /*
@@ -1201,383 +389,49 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
                bf = bf_next;
        }
 
-       /*
-        * node is already gone. no more assocication
-        * with the node. the node might have been freed
-        * any  node acces can result in panic.note tid
-        * is part of the node.
-        */
-       if (isnodegone)
-               return;
-
-       if (tid->cleanup_inprogress) {
-               /* check to see if we're done with cleaning the h/w queue */
-               spin_lock_bh(&txq->axq_lock);
-
+       if (tid->state & AGGR_CLEANUP) {
                if (tid->baw_head == tid->baw_tail) {
-                       tid->addba_exchangecomplete = 0;
+                       tid->state &= ~AGGR_ADDBA_COMPLETE;
                        tid->addba_exchangeattempts = 0;
-                       spin_unlock_bh(&txq->axq_lock);
-
-                       tid->cleanup_inprogress = false;
+                       tid->state &= ~AGGR_CLEANUP;
 
                        /* send buffered frames as singles */
                        ath_tx_flush_tid(sc, tid);
-               } else
-                       spin_unlock_bh(&txq->axq_lock);
-
+               }
+               rcu_read_unlock();
                return;
        }
 
-       /*
-        * prepend un-acked frames to the beginning of the pending frame queue
-        */
+       /* prepend un-acked frames to the beginning of the pending frame queue */
        if (!list_empty(&bf_pending)) {
                spin_lock_bh(&txq->axq_lock);
-               /* Note: we _prepend_, we _do_not_ at to
-                * the end of the queue ! */
                list_splice(&bf_pending, &tid->buf_q);
                ath_tx_queue_tid(txq, tid);
                spin_unlock_bh(&txq->axq_lock);
        }
 
-       if (needreset)
-               ath_internal_reset(sc);
-
-       return;
-}
-
-/* Process completed xmit descriptors from the specified queue */
-
-static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
-{
-       struct ath_hal *ah = sc->sc_ah;
-       struct ath_buf *bf, *lastbf, *bf_held = NULL;
-       struct list_head bf_head;
-       struct ath_desc *ds, *tmp_ds;
-       struct sk_buff *skb;
-       struct ieee80211_tx_info *tx_info;
-       struct ath_tx_info_priv *tx_info_priv;
-       int nacked, txok, nbad = 0, isrifs = 0;
-       int status;
-
-       DPRINTF(sc, ATH_DBG_QUEUE,
-               "%s: tx queue %d (%x), link %p\n", __func__,
-               txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
-               txq->axq_link);
-
-       nacked = 0;
-       for (;;) {
-               spin_lock_bh(&txq->axq_lock);
-               txq->axq_intrcnt = 0; /* reset periodic desc intr count */
-               if (list_empty(&txq->axq_q)) {
-                       txq->axq_link = NULL;
-                       txq->axq_linkbuf = NULL;
-                       spin_unlock_bh(&txq->axq_lock);
-                       break;
-               }
-               bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
-
-               /*
-                * There is a race condition that a BH gets scheduled
-                * after sw writes TxE and before hw re-load the last
-                * descriptor to get the newly chained one.
-                * Software must keep the last DONE descriptor as a
-                * holding descriptor - software does so by marking
-                * it with the STALE flag.
-                */
-               bf_held = NULL;
-               if (bf->bf_status & ATH_BUFSTATUS_STALE) {
-                       bf_held = bf;
-                       if (list_is_last(&bf_held->list, &txq->axq_q)) {
-                               /* FIXME:
-                                * The holding descriptor is the last
-                                * descriptor in queue. It's safe to remove
-                                * the last holding descriptor in BH context.
-                                */
-                               spin_unlock_bh(&txq->axq_lock);
-                               break;
-                       } else {
-                               /* Lets work with the next buffer now */
-                               bf = list_entry(bf_held->list.next,
-                                       struct ath_buf, list);
-                       }
-               }
-
-               lastbf = bf->bf_lastbf;
-               ds = lastbf->bf_desc;    /* NB: last decriptor */
-
-               status = ath9k_hw_txprocdesc(ah, ds);
-               if (status == -EINPROGRESS) {
-                       spin_unlock_bh(&txq->axq_lock);
-                       break;
-               }
-               if (bf->bf_desc == txq->axq_lastdsWithCTS)
-                       txq->axq_lastdsWithCTS = NULL;
-               if (ds == txq->axq_gatingds)
-                       txq->axq_gatingds = NULL;
-
-               /*
-                * Remove ath_buf's of the same transmit unit from txq,
-                * however leave the last descriptor back as the holding
-                * descriptor for hw.
-                */
-               lastbf->bf_status |= ATH_BUFSTATUS_STALE;
-               INIT_LIST_HEAD(&bf_head);
-
-               if (!list_is_singular(&lastbf->list))
-                       list_cut_position(&bf_head,
-                               &txq->axq_q, lastbf->list.prev);
-
-               txq->axq_depth--;
-
-               if (bf->bf_isaggr)
-                       txq->axq_aggr_depth--;
-
-               txok = (ds->ds_txstat.ts_status == 0);
-
-               spin_unlock_bh(&txq->axq_lock);
-
-               if (bf_held) {
-                       list_del(&bf_held->list);
-                       spin_lock_bh(&sc->sc_txbuflock);
-                       list_add_tail(&bf_held->list, &sc->sc_txbuf);
-                       spin_unlock_bh(&sc->sc_txbuflock);
-               }
-
-               if (!bf->bf_isampdu) {
-                       /*
-                        * This frame is sent out as a single frame.
-                        * Use hardware retry status for this frame.
-                        */
-                       bf->bf_retries = ds->ds_txstat.ts_longretry;
-                       if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
-                               bf->bf_isxretried = 1;
-                       nbad = 0;
-               } else {
-                       nbad = ath_tx_num_badfrms(sc, bf, txok);
-               }
-               skb = bf->bf_mpdu;
-               tx_info = IEEE80211_SKB_CB(skb);
-               tx_info_priv = (struct ath_tx_info_priv *)
-                       tx_info->driver_data[0];
-               if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
-                       tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
-               if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
-                               (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
-                       if (ds->ds_txstat.ts_status == 0)
-                               nacked++;
-
-                       if (bf->bf_isdata) {
-                               if (isrifs)
-                                       tmp_ds = bf->bf_rifslast->bf_desc;
-                               else
-                                       tmp_ds = ds;
-                               memcpy(&tx_info_priv->tx,
-                                       &tmp_ds->ds_txstat,
-                                       sizeof(tx_info_priv->tx));
-                               tx_info_priv->n_frames = bf->bf_nframes;
-                               tx_info_priv->n_bad_frames = nbad;
-                       }
-               }
-
-               /*
-                * Complete this transmit unit
-                */
-               if (bf->bf_isampdu)
-                       ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
-               else
-                       ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
-
-               /* Wake up mac80211 queue */
+       rcu_read_unlock();
 
-               spin_lock_bh(&txq->axq_lock);
-               if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <=
-                               (ATH_TXBUF - 20)) {
-                       int qnum;
-                       qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
-                       if (qnum != -1) {
-                               ieee80211_wake_queue(sc->hw, qnum);
-                               txq->stopped = 0;
-                       }
-
-               }
-
-               /*
-                * schedule any pending packets if aggregation is enabled
-                */
-               if (sc->sc_txaggr)
-                       ath_txq_schedule(sc, txq);
-               spin_unlock_bh(&txq->axq_lock);
-       }
-       return nacked;
-}
-
-static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
-{
-       struct ath_hal *ah = sc->sc_ah;
-
-       (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
-       DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n",
-               __func__, txq->axq_qnum,
-               ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link);
-}
-
-/* Drain only the data queues */
-
-static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
-{
-       struct ath_hal *ah = sc->sc_ah;
-       int i;
-       int npend = 0;
-       enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
-
-       /* XXX return value */
-       if (!sc->sc_invalid) {
-               for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-                       if (ATH_TXQ_SETUP(sc, i)) {
-                               ath_tx_stopdma(sc, &sc->sc_txq[i]);
-
-                               /* The TxDMA may not really be stopped.
-                                * Double check the hal tx pending count */
-                               npend += ath9k_hw_numtxpending(ah,
-                                       sc->sc_txq[i].axq_qnum);
-                       }
-               }
-       }
-
-       if (npend) {
-               int status;
-
-               /* TxDMA not stopped, reset the hal */
-               DPRINTF(sc, ATH_DBG_XMIT,
-                       "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
-
-               spin_lock_bh(&sc->sc_resetlock);
-               if (!ath9k_hw_reset(ah, sc->sc_opmode,
-                       &sc->sc_curchan, ht_macmode,
-                       sc->sc_tx_chainmask, sc->sc_rx_chainmask,
-                       sc->sc_ht_extprotspacing, true, &status)) {
-
-                       DPRINTF(sc, ATH_DBG_FATAL,
-                               "%s: unable to reset hardware; hal status %u\n",
-                               __func__,
-                               status);
-               }
-               spin_unlock_bh(&sc->sc_resetlock);
-       }
-
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (ATH_TXQ_SETUP(sc, i))
-                       ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
-       }
-}
-
-/* Add a sub-frame to block ack window */
-
-static void ath_tx_addto_baw(struct ath_softc *sc,
-                            struct ath_atx_tid *tid,
-                            struct ath_buf *bf)
-{
-       int index, cindex;
-
-       if (bf->bf_isretried)
-               return;
-
-       index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
-       cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
-
-       ASSERT(tid->tx_buf[cindex] == NULL);
-       tid->tx_buf[cindex] = bf;
-
-       if (index >= ((tid->baw_tail - tid->baw_head) &
-               (ATH_TID_MAX_BUFS - 1))) {
-               tid->baw_tail = cindex;
-               INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
-       }
-}
-
-/*
- * Function to send an A-MPDU
- * NB: must be called with txq lock held
- */
-
-static int ath_tx_send_ampdu(struct ath_softc *sc,
-                            struct ath_txq *txq,
-                            struct ath_atx_tid *tid,
-                            struct list_head *bf_head,
-                            struct ath_tx_control *txctl)
-{
-       struct ath_buf *bf;
-       struct sk_buff *skb;
-       struct ieee80211_tx_info *tx_info;
-       struct ath_tx_info_priv *tx_info_priv;
-
-       BUG_ON(list_empty(bf_head));
-
-       bf = list_first_entry(bf_head, struct ath_buf, list);
-       bf->bf_isampdu = 1;
-       bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
-       bf->bf_tidno = txctl->tidno;
-
-       /*
-        * Do not queue to h/w when any of the following conditions is true:
-        * - there are pending frames in software queue
-        * - the TID is currently paused for ADDBA/BAR request
-        * - seqno is not within block-ack window
-        * - h/w queue depth exceeds low water mark
-        */
-       if (!list_empty(&tid->buf_q) || tid->paused ||
-           !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
-           txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
-               /*
-                * Add this frame to software queue for scheduling later
-                * for aggregation.
-                */
-               list_splice_tail_init(bf_head, &tid->buf_q);
-               ath_tx_queue_tid(txq, tid);
-               return 0;
-       }
-
-       skb = (struct sk_buff *)bf->bf_mpdu;
-       tx_info = IEEE80211_SKB_CB(skb);
-       tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
-       memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
-
-       /* Add sub-frame to BAW */
-       ath_tx_addto_baw(sc, tid, bf);
-
-       /* Queue to h/w without aggregation */
-       bf->bf_nframes = 1;
-       bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
-       ath_buf_set_rate(sc, bf);
-       ath_tx_txqaddbuf(sc, txq, bf_head);
-       return 0;
+       if (needreset)
+               ath_reset(sc, false);
 }
 
-/*
- * looks up the rate
- * returns aggr limit based on lowest of the rates
- */
-
-static u32 ath_lookup_rate(struct ath_softc *sc,
-                                struct ath_buf *bf)
+static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
+                          struct ath_atx_tid *tid)
 {
-       const struct ath9k_rate_table *rt = sc->sc_currates;
+       struct ath_rate_table *rate_table = sc->cur_rate_table;
        struct sk_buff *skb;
        struct ieee80211_tx_info *tx_info;
+       struct ieee80211_tx_rate *rates;
        struct ath_tx_info_priv *tx_info_priv;
-       u32 max_4ms_framelen, frame_length;
+       u32 max_4ms_framelen, frmlen;
        u16 aggr_limit, legacy = 0, maxampdu;
        int i;
 
-
        skb = (struct sk_buff *)bf->bf_mpdu;
        tx_info = IEEE80211_SKB_CB(skb);
-       tx_info_priv = (struct ath_tx_info_priv *)
-               tx_info->driver_data[0];
-       memcpy(bf->bf_rcs,
-               tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
+       rates = tx_info->control.rates;
+       tx_info_priv = (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
 
        /*
         * Find the lowest frame length among the rate series that will have a
@@ -1587,15 +441,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
        max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
 
        for (i = 0; i < 4; i++) {
-               if (bf->bf_rcs[i].tries) {
-                       frame_length = bf->bf_rcs[i].max_4ms_framelen;
-
-                       if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
+               if (rates[i].count) {
+                       if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
                                legacy = 1;
                                break;
                        }
 
-                       max_4ms_framelen = min(max_4ms_framelen, frame_length);
+                       frmlen = rate_table->info[rates[i].idx].max_4ms_framelen;
+                       max_4ms_framelen = min(max_4ms_framelen, frmlen);
                }
        }
 
@@ -1607,15 +460,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
        if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
                return 0;
 
-       aggr_limit = min(max_4ms_framelen,
-               (u32)ATH_AMPDU_LIMIT_DEFAULT);
+       aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_DEFAULT);
 
        /*
         * h/w can accept aggregates upto 16 bit lengths (65535).
         * The IE, however can hold upto 65536, which shows up here
         * as zero. Ignore 65536 since we  are constrained by hw.
         */
-       maxampdu = sc->sc_ht_info.maxampdu;
+       maxampdu = tid->an->maxampdu;
        if (maxampdu)
                aggr_limit = min(aggr_limit, maxampdu);
 
@@ -1623,16 +475,16 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
 }
 
 /*
- * returns the number of delimiters to be added to
+ * Returns the number of delimiters to be added to
  * meet the minimum required mpdudensity.
- * caller should make sure that the rate is  HT rate .
+ * caller should make sure that the rate is HT rate .
  */
-
-static int ath_compute_num_delims(struct ath_softc *sc,
-                                 struct ath_buf *bf,
-                                 u16 frmlen)
+static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
+                                 struct ath_buf *bf, u16 frmlen)
 {
-       const struct ath9k_rate_table *rt = sc->sc_currates;
+       struct ath_rate_table *rt = sc->cur_rate_table;
+       struct sk_buff *skb = bf->bf_mpdu;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        u32 nsymbits, nsymbols, mpdudensity;
        u16 minlen;
        u8 rc, flags, rix;
@@ -1656,7 +508,7 @@ static int ath_compute_num_delims(struct ath_softc *sc,
         * required minimum length for subframe. Take into account
         * whether high rate is 20 or 40Mhz and half or full GI.
         */
-       mpdudensity = sc->sc_ht_info.mpdudensity;
+       mpdudensity = tid->an->mpdudensity;
 
        /*
         * If there is no mpdu density restriction, no further calculation
@@ -1665,11 +517,11 @@ static int ath_compute_num_delims(struct ath_softc *sc,
        if (mpdudensity == 0)
                return ndelim;
 
-       rix = bf->bf_rcs[0].rix;
-       flags = bf->bf_rcs[0].flags;
-       rc = rt->info[rix].rateCode;
-       width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0;
-       half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0;
+       rix = tx_info->control.rates[0].idx;
+       flags = tx_info->control.rates[0].flags;
+       rc = rt->info[rix].ratecode;
+       width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
+       half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
 
        if (half_gi)
                nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
@@ -1682,9 +534,7 @@ static int ath_compute_num_delims(struct ath_softc *sc,
        nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
        minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
 
-       /* Is frame shorter than required minimum length? */
        if (frmlen < minlen) {
-               /* Get the minimum number of delimiters required. */
                mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
                ndelim = max(mindelim, ndelim);
        }
@@ -1692,147 +542,86 @@ static int ath_compute_num_delims(struct ath_softc *sc,
        return ndelim;
 }
 
-/*
- * For aggregation from software buffer queue.
- * NB: must be called with txq lock held
- */
-
 static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
-                                       struct ath_atx_tid *tid,
-                                       struct list_head *bf_q,
-                                       struct ath_buf **bf_last,
-                                       struct aggr_rifs_param *param,
-                                       int *prev_frames)
+                                            struct ath_atx_tid *tid,
+                                            struct list_head *bf_q)
 {
 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
-       struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL;
-       struct list_head bf_head;
-       int rl = 0, nframes = 0, ndelim;
+       struct ath_buf *bf, *bf_first, *bf_prev = NULL;
+       int rl = 0, nframes = 0, ndelim, prev_al = 0;
        u16 aggr_limit = 0, al = 0, bpad = 0,
                al_delta, h_baw = tid->baw_size / 2;
        enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
-       int prev_al = 0, is_ds_rate = 0;
-       INIT_LIST_HEAD(&bf_head);
-
-       BUG_ON(list_empty(&tid->buf_q));
 
        bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
 
        do {
                bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
 
-               /*
-                * do not step over block-ack window
-                */
+               /* do not step over block-ack window */
                if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
                        status = ATH_AGGR_BAW_CLOSED;
                        break;
                }
 
                if (!rl) {
-                       aggr_limit = ath_lookup_rate(sc, bf);
+                       aggr_limit = ath_lookup_rate(sc, bf, tid);
                        rl = 1;
-                       /*
-                        * Is rate dual stream
-                        */
-                       is_ds_rate =
-                               (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
                }
 
-               /*
-                * do not exceed aggregation limit
-                */
+               /* do not exceed aggregation limit */
                al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
 
-               if (nframes && (aggr_limit <
-                       (al + bpad + al_delta + prev_al))) {
+               if (nframes &&
+                   (aggr_limit < (al + bpad + al_delta + prev_al))) {
                        status = ATH_AGGR_LIMITED;
                        break;
                }
 
-               /*
-                * do not exceed subframe limit
-                */
-               if ((nframes + *prev_frames) >=
-                   min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
+               /* do not exceed subframe limit */
+               if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
                        status = ATH_AGGR_LIMITED;
                        break;
                }
+               nframes++;
 
-               /*
-                * add padding for previous frame to aggregation length
-                */
+               /* add padding for previous frame to aggregation length */
                al += bpad + al_delta;
 
                /*
                 * Get the delimiters needed to meet the MPDU
                 * density for this node.
                 */
-               ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen);
-
+               ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
                bpad = PADBYTES(al_delta) + (ndelim << 2);
 
                bf->bf_next = NULL;
-               bf->bf_lastfrm->bf_desc->ds_link = 0;
+               bf->bf_desc->ds_link = 0;
 
-               /*
-                * this packet is part of an aggregate
-                * - remove all descriptors belonging to this frame from
-                *   software queue
-                * - add it to block ack window
-                * - set up descriptors for aggregation
-                */
-               list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
+               /* link buffers of this frame to the aggregate */
                ath_tx_addto_baw(sc, tid, bf);
-
-               list_for_each_entry(tbf, &bf_head, list) {
-                       ath9k_hw_set11n_aggr_middle(sc->sc_ah,
-                               tbf->bf_desc, ndelim);
-               }
-
-               /*
-                * link buffers of this frame to the aggregate
-                */
-               list_splice_tail_init(&bf_head, bf_q);
-               nframes++;
-
+               ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
+               list_move_tail(&bf->list, bf_q);
                if (bf_prev) {
                        bf_prev->bf_next = bf;
-                       bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr;
+                       bf_prev->bf_desc->ds_link = bf->bf_daddr;
                }
                bf_prev = bf;
-
-#ifdef AGGR_NOSHORT
-               /*
-                * terminate aggregation on a small packet boundary
-                */
-               if (bf->bf_frmlen < ATH_AGGR_MINPLEN) {
-                       status = ATH_AGGR_SHORTPKT;
-                       break;
-               }
-#endif
        } while (!list_empty(&tid->buf_q));
 
        bf_first->bf_al = al;
        bf_first->bf_nframes = nframes;
-       *bf_last = bf_prev;
+
        return status;
 #undef PADBYTES
 }
 
-/*
- * process pending frames possibly doing a-mpdu aggregation
- * NB: must be called with txq lock held
- */
-
-static void ath_tx_sched_aggr(struct ath_softc *sc,
-       struct ath_txq *txq, struct ath_atx_tid *tid)
+static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
+                             struct ath_atx_tid *tid)
 {
-       struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL;
+       struct ath_buf *bf;
        enum ATH_AGGR_STATUS status;
        struct list_head bf_q;
-       struct aggr_rifs_param param = {0, 0, 0, 0, NULL};
-       int prev_frames = 0;
 
        do {
                if (list_empty(&tid->buf_q))
@@ -1840,379 +629,173 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
 
                INIT_LIST_HEAD(&bf_q);
 
-               status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, &param,
-                                         &prev_frames);
+               status = ath_tx_form_aggr(sc, tid, &bf_q);
 
                /*
-                * no frames picked up to be aggregated; block-ack
-                * window is not open
+                * no frames picked up to be aggregated;
+                * block-ack window is not open.
                 */
                if (list_empty(&bf_q))
                        break;
 
                bf = list_first_entry(&bf_q, struct ath_buf, list);
-               bf_last = list_entry(bf_q.prev, struct ath_buf, list);
-               bf->bf_lastbf = bf_last;
+               bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
 
-               /*
-                * if only one frame, send as non-aggregate
-                */
+               /* if only one frame, send as non-aggregate */
                if (bf->bf_nframes == 1) {
-                       ASSERT(bf->bf_lastfrm == bf_last);
-
-                       bf->bf_isaggr = 0;
-                       /*
-                        * clear aggr bits for every descriptor
-                        * XXX TODO: is there a way to optimize it?
-                        */
-                       list_for_each_entry(tbf, &bf_q, list) {
-                               ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc);
-                       }
-
+                       bf->bf_state.bf_type &= ~BUF_AGGR;
+                       ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
                        ath_buf_set_rate(sc, bf);
                        ath_tx_txqaddbuf(sc, txq, &bf_q);
                        continue;
                }
 
-               /*
-                * setup first desc with rate and aggr info
-                */
-               bf->bf_isaggr  = 1;
+               /* setup first desc of aggregate */
+               bf->bf_state.bf_type |= BUF_AGGR;
                ath_buf_set_rate(sc, bf);
                ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
 
-               /*
-                * anchor last frame of aggregate correctly
-                */
-               ASSERT(bf_lastaggr);
-               ASSERT(bf_lastaggr->bf_lastfrm == bf_last);
-               tbf = bf_lastaggr;
-               ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
-
-               /* XXX: We don't enter into this loop, consider removing this */
-               while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) {
-                       tbf = list_entry(tbf->list.next, struct ath_buf, list);
-                       ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc);
-               }
+               /* anchor last desc of aggregate */
+               ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
 
                txq->axq_aggr_depth++;
-
-               /*
-                * Normal aggregate, queue to hardware
-                */
                ath_tx_txqaddbuf(sc, txq, &bf_q);
 
        } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
                 status != ATH_AGGR_BAW_CLOSED);
 }
 
-/* Called with txq lock held */
-
-static void ath_tid_drain(struct ath_softc *sc,
-                         struct ath_txq *txq,
-                         struct ath_atx_tid *tid,
-                         bool bh_flag)
+int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
+                     u16 tid, u16 *ssn)
 {
-       struct ath_buf *bf;
-       struct list_head bf_head;
-       INIT_LIST_HEAD(&bf_head);
-
-       for (;;) {
-               if (list_empty(&tid->buf_q))
-                       break;
-               bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
-
-               list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
-
-               /* update baw for software retried frame */
-               if (bf->bf_isretried)
-                       ath_tx_update_baw(sc, tid, bf->bf_seqno);
-
-               /*
-                * do not indicate packets while holding txq spinlock.
-                * unlock is intentional here
-                */
-               if (likely(bh_flag))
-                       spin_unlock_bh(&txq->axq_lock);
-               else
-                       spin_unlock(&txq->axq_lock);
+       struct ath_atx_tid *txtid;
+       struct ath_node *an;
 
-               /* complete this sub-frame */
-               ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
+       an = (struct ath_node *)sta->drv_priv;
 
-               if (likely(bh_flag))
-                       spin_lock_bh(&txq->axq_lock);
-               else
-                       spin_lock(&txq->axq_lock);
+       if (sc->sc_flags & SC_OP_TXAGGR) {
+               txtid = ATH_AN_2_TID(an, tid);
+               txtid->state |= AGGR_ADDBA_PROGRESS;
+               ath_tx_pause_tid(sc, txtid);
+               *ssn = txtid->seq_start;
        }
 
-       /*
-        * TODO: For frame(s) that are in the retry state, we will reuse the
-        * sequence number(s) without setting the retry bit. The
-        * alternative is to give up on these and BAR the receiver's window
-        * forward.
-        */
-       tid->seq_next = tid->seq_start;
-       tid->baw_tail = tid->baw_head;
-}
-
-/*
- * Drain all pending buffers
- * NB: must be called with txq lock held
- */
-
-static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
-                                         struct ath_txq *txq,
-                                         bool bh_flag)
-{
-       struct ath_atx_ac *ac, *ac_tmp;
-       struct ath_atx_tid *tid, *tid_tmp;
-
-       list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
-               list_del(&ac->list);
-               ac->sched = false;
-               list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
-                       list_del(&tid->list);
-                       tid->sched = false;
-                       ath_tid_drain(sc, txq, tid, bh_flag);
-               }
-       }
+       return 0;
 }
 
-static int ath_tx_start_dma(struct ath_softc *sc,
-                           struct sk_buff *skb,
-                           struct scatterlist *sg,
-                           u32 n_sg,
-                           struct ath_tx_control *txctl)
+int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 {
-       struct ath_node *an = txctl->an;
-       struct ath_buf *bf = NULL;
+       struct ath_node *an = (struct ath_node *)sta->drv_priv;
+       struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
+       struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
+       struct ath_buf *bf;
        struct list_head bf_head;
-       struct ath_desc *ds;
-       struct ath_hal *ah = sc->sc_ah;
-       struct ath_txq *txq = &sc->sc_txq[txctl->qnum];
-       struct ath_tx_info_priv *tx_info_priv;
-       struct ath_rc_series *rcs;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *tx_info =  IEEE80211_SKB_CB(skb);
-       __le16 fc = hdr->frame_control;
-
-       /* For each sglist entry, allocate an ath_buf for DMA */
        INIT_LIST_HEAD(&bf_head);
-       spin_lock_bh(&sc->sc_txbuflock);
-       if (unlikely(list_empty(&sc->sc_txbuf))) {
-               spin_unlock_bh(&sc->sc_txbuflock);
-               return -ENOMEM;
-       }
-
-       bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
-       list_del(&bf->list);
-       spin_unlock_bh(&sc->sc_txbuflock);
 
-       list_add_tail(&bf->list, &bf_head);
-
-       /* set up this buffer */
-       ATH_TXBUF_RESET(bf);
-       bf->bf_frmlen = txctl->frmlen;
-       bf->bf_isdata = ieee80211_is_data(fc);
-       bf->bf_isbar = ieee80211_is_back_req(fc);
-       bf->bf_ispspoll = ieee80211_is_pspoll(fc);
-       bf->bf_flags = txctl->flags;
-       bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
-       bf->bf_keytype = txctl->keytype;
-       tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
-       rcs = tx_info_priv->rcs;
-       bf->bf_rcs[0] = rcs[0];
-       bf->bf_rcs[1] = rcs[1];
-       bf->bf_rcs[2] = rcs[2];
-       bf->bf_rcs[3] = rcs[3];
-       bf->bf_node = an;
-       bf->bf_mpdu = skb;
-       bf->bf_buf_addr = sg_dma_address(sg);
-
-       /* setup descriptor */
-       ds = bf->bf_desc;
-       ds->ds_link = 0;
-       ds->ds_data = bf->bf_buf_addr;
+       if (txtid->state & AGGR_CLEANUP)
+               return 0;
 
-       /*
-        * Save the DMA context in the first ath_buf
-        */
-       copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext),
-                            get_dma_mem_context(txctl, dmacontext));
+       if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
+               txtid->addba_exchangeattempts = 0;
+               return 0;
+       }
 
-       /*
-        * Formulate first tx descriptor with tx controls.
-        */
-       ath9k_hw_set11n_txdesc(ah,
-                              ds,
-                              bf->bf_frmlen, /* frame length */
-                              txctl->atype, /* Atheros packet type */
-                              min(txctl->txpower, (u16)60), /* txpower */
-                              txctl->keyix,            /* key cache index */
-                              txctl->keytype,          /* key type */
-                              txctl->flags);           /* flags */
-       ath9k_hw_filltxdesc(ah,
-                           ds,
-                           sg_dma_len(sg),     /* segment length */
-                           true,            /* first segment */
-                           (n_sg == 1) ? true : false, /* last segment */
-                           ds);                /* first descriptor */
-
-       bf->bf_lastfrm = bf;
-       bf->bf_ht = txctl->ht;
+       ath_tx_pause_tid(sc, txtid);
 
+       /* drop all software retried frames and mark this TID */
        spin_lock_bh(&txq->axq_lock);
-
-       if (txctl->ht && sc->sc_txaggr) {
-               struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
-               if (ath_aggr_query(sc, an, txctl->tidno)) {
-                       /*
-                        * Try aggregation if it's a unicast data frame
-                        * and the destination is HT capable.
-                        */
-                       ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl);
-               } else {
+       while (!list_empty(&txtid->buf_q)) {
+               bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
+               if (!bf_isretried(bf)) {
                        /*
-                        * Send this frame as regular when ADDBA exchange
-                        * is neither complete nor pending.
+                        * NB: it's based on the assumption that
+                        * software retried frame will always stay
+                        * at the head of software queue.
                         */
-                       ath_tx_send_normal(sc, txq, tid, &bf_head);
-               }
-       } else {
-               bf->bf_lastbf = bf;
-               bf->bf_nframes = 1;
-               ath_buf_set_rate(sc, bf);
-
-               if (ieee80211_is_back_req(fc)) {
-                       /* This is required for resuming tid
-                        * during BAR completion */
-                       bf->bf_tidno = txctl->tidno;
+                       break;
                }
-
-               if (is_multicast_ether_addr(hdr->addr1)) {
-                       struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
-
-                       /*
-                        * When servicing one or more stations in power-save
-                        * mode (or) if there is some mcast data waiting on
-                        * mcast queue (to prevent out of order delivery of
-                        * mcast,bcast packets) multicast frames must be
-                        * buffered until after the beacon. We use the private
-                        * mcast queue for that.
-                        */
-                       /* XXX? more bit in 802.11 frame header */
-                       spin_lock_bh(&avp->av_mcastq.axq_lock);
-                       if (txctl->ps || avp->av_mcastq.axq_depth)
-                               ath_tx_mcastqaddbuf(sc,
-                                       &avp->av_mcastq, &bf_head);
-                       else
-                               ath_tx_txqaddbuf(sc, txq, &bf_head);
-                       spin_unlock_bh(&avp->av_mcastq.axq_lock);
-               } else
-                       ath_tx_txqaddbuf(sc, txq, &bf_head);
+               list_move_tail(&bf->list, &bf_head);
+               ath_tx_update_baw(sc, txtid, bf->bf_seqno);
+               ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
        }
        spin_unlock_bh(&txq->axq_lock);
+
+       if (txtid->baw_head != txtid->baw_tail) {
+               txtid->state |= AGGR_CLEANUP;
+       } else {
+               txtid->state &= ~AGGR_ADDBA_COMPLETE;
+               txtid->addba_exchangeattempts = 0;
+               ath_tx_flush_tid(sc, txtid);
+       }
+
        return 0;
 }
 
-static void xmit_map_sg(struct ath_softc *sc,
-                       struct sk_buff *skb,
-                       dma_addr_t *pa,
-                       struct ath_tx_control *txctl)
+void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 {
-       struct ath_xmit_status tx_status;
-       struct ath_atx_tid *tid;
-       struct scatterlist sg;
-
-       *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
-       /* setup S/G list */
-       memset(&sg, 0, sizeof(struct scatterlist));
-       sg_dma_address(&sg) = *pa;
-       sg_dma_len(&sg) = skb->len;
-
-       if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
-               /*
-                *  We have to do drop frame here.
-                */
-               pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE);
+       struct ath_atx_tid *txtid;
+       struct ath_node *an;
 
-               tx_status.retries = 0;
-               tx_status.flags = ATH_TX_ERROR;
+       an = (struct ath_node *)sta->drv_priv;
 
-               if (txctl->ht && sc->sc_txaggr) {
-                       /* Reclaim the seqno. */
-                       tid = ATH_AN_2_TID((struct ath_node *)
-                               txctl->an, txctl->tidno);
-                       DECR(tid->seq_next, IEEE80211_SEQ_MAX);
-               }
-               ath_tx_complete(sc, skb, &tx_status, txctl->an);
+       if (sc->sc_flags & SC_OP_TXAGGR) {
+               txtid = ATH_AN_2_TID(an, tid);
+               txtid->baw_size =
+                       IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
+               txtid->state |= AGGR_ADDBA_COMPLETE;
+               txtid->state &= ~AGGR_ADDBA_PROGRESS;
+               ath_tx_resume_tid(sc, txtid);
        }
 }
 
-/* Initialize TX queue and h/w */
-
-int ath_tx_init(struct ath_softc *sc, int nbufs)
+bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
 {
-       int error = 0;
+       struct ath_atx_tid *txtid;
 
-       do {
-               spin_lock_init(&sc->sc_txbuflock);
+       if (!(sc->sc_flags & SC_OP_TXAGGR))
+               return false;
 
-               /* Setup tx descriptors */
-               error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
-                       "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC);
-               if (error != 0) {
-                       DPRINTF(sc, ATH_DBG_FATAL,
-                               "%s: failed to allocate tx descriptors: %d\n",
-                               __func__, error);
-                       break;
-               }
+       txtid = ATH_AN_2_TID(an, tidno);
 
-               /* XXX allocate beacon state together with vap */
-               error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
-                                         "beacon", ATH_BCBUF, 1);
-               if (error != 0) {
-                       DPRINTF(sc, ATH_DBG_FATAL,
-                               "%s: failed to allocate "
-                               "beacon descripotrs: %d\n",
-                               __func__, error);
-                       break;
+       if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
+               if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
+                   (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
+                       txtid->addba_exchangeattempts++;
+                       return true;
                }
+       }
 
-       } while (0);
-
-       if (error != 0)
-               ath_tx_cleanup(sc);
-
-       return error;
+       return false;
 }
 
-/* Reclaim all tx queue resources */
+/********************/
+/* Queue Management */
+/********************/
 
-int ath_tx_cleanup(struct ath_softc *sc)
+static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
+                                         struct ath_txq *txq)
 {
-       /* cleanup beacon descriptors */
-       if (sc->sc_bdma.dd_desc_len != 0)
-               ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
-
-       /* cleanup tx descriptors */
-       if (sc->sc_txdma.dd_desc_len != 0)
-               ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
+       struct ath_atx_ac *ac, *ac_tmp;
+       struct ath_atx_tid *tid, *tid_tmp;
 
-       return 0;
+       list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+               list_del(&ac->list);
+               ac->sched = false;
+               list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
+                       list_del(&tid->list);
+                       tid->sched = false;
+                       ath_tid_drain(sc, txq, tid);
+               }
+       }
 }
 
-/* Setup a h/w transmit queue */
-
 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
 {
-       struct ath_hal *ah = sc->sc_ah;
+       struct ath_hw *ah = sc->sc_ah;
        struct ath9k_tx_queue_info qi;
        int qnum;
 
-       memzero(&qi, sizeof(qi));
+       memset(&qi, 0, sizeof(qi));
        qi.tqi_subtype = subtype;
        qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
        qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
@@ -2247,15 +830,15 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                 */
                return NULL;
        }
-       if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
+       if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
                DPRINTF(sc, ATH_DBG_FATAL,
-                       "%s: hal qnum %u out of range, max %u!\n",
-                       __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
+                       "qnum %u out of range, max %u!\n",
+                       qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
                ath9k_hw_releasetxqueue(ah, qnum);
                return NULL;
        }
        if (!ATH_TXQ_SETUP(sc, qnum)) {
-               struct ath_txq *txq = &sc->sc_txq[qnum];
+               struct ath_txq *txq = &sc->tx.txq[qnum];
 
                txq->axq_qnum = qnum;
                txq->axq_link = NULL;
@@ -2265,69 +848,31 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
                txq->axq_depth = 0;
                txq->axq_aggr_depth = 0;
                txq->axq_totalqueued = 0;
-               txq->axq_intrcnt = 0;
                txq->axq_linkbuf = NULL;
-               sc->sc_txqsetup |= 1<<qnum;
+               sc->tx.txqsetup |= 1<<qnum;
        }
-       return &sc->sc_txq[qnum];
-}
-
-/* Reclaim resources for a setup queue */
-
-void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
-{
-       ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
-       sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
+       return &sc->tx.txq[qnum];
 }
 
-/*
- * Setup a hardware data transmit queue for the specified
- * access control.  The hal may not support all requested
- * queues in which case it will return a reference to a
- * previously setup queue.  We record the mapping from ac's
- * to h/w queues for use by ath_tx_start and also track
- * the set of h/w queues being used to optimize work in the
- * transmit interrupt handler and related routines.
- */
-
-int ath_tx_setup(struct ath_softc *sc, int haltype)
-{
-       struct ath_txq *txq;
-
-       if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
-               DPRINTF(sc, ATH_DBG_FATAL,
-                       "%s: HAL AC %u out of range, max %zu!\n",
-                       __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q));
-               return 0;
-       }
-       txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
-       if (txq != NULL) {
-               sc->sc_haltype2q[haltype] = txq->axq_qnum;
-               return 1;
-       } else
-               return 0;
-}
-
-int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
+static int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
 {
        int qnum;
 
        switch (qtype) {
        case ATH9K_TX_QUEUE_DATA:
-               if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
+               if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
                        DPRINTF(sc, ATH_DBG_FATAL,
-                               "%s: HAL AC %u out of range, max %zu!\n",
-                               __func__,
-                               haltype, ARRAY_SIZE(sc->sc_haltype2q));
+                               "HAL AC %u out of range, max %zu!\n",
+                               haltype, ARRAY_SIZE(sc->tx.hwq_map));
                        return -1;
                }
-               qnum = sc->sc_haltype2q[haltype];
+               qnum = sc->tx.hwq_map[haltype];
                break;
        case ATH9K_TX_QUEUE_BEACON:
-               qnum = sc->sc_bhalq;
+               qnum = sc->beacon.beaconq;
                break;
        case ATH9K_TX_QUEUE_CAB:
-               qnum = sc->sc_cabq->axq_qnum;
+               qnum = sc->beacon.cabq->axq_qnum;
                break;
        default:
                qnum = -1;
@@ -2335,26 +880,49 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
        return qnum;
 }
 
-/* Update parameters for a transmit queue */
+struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
+{
+       struct ath_txq *txq = NULL;
+       int qnum;
+
+       qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
+       txq = &sc->tx.txq[qnum];
+
+       spin_lock_bh(&txq->axq_lock);
+
+       if (txq->axq_depth >= (ATH_TXBUF - 20)) {
+               DPRINTF(sc, ATH_DBG_FATAL,
+                       "TX queue: %d is full, depth: %d\n",
+                       qnum, txq->axq_depth);
+               ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
+               txq->stopped = 1;
+               spin_unlock_bh(&txq->axq_lock);
+               return NULL;
+       }
+
+       spin_unlock_bh(&txq->axq_lock);
+
+       return txq;
+}
 
 int ath_txq_update(struct ath_softc *sc, int qnum,
                   struct ath9k_tx_queue_info *qinfo)
 {
-       struct ath_hal *ah = sc->sc_ah;
+       struct ath_hw *ah = sc->sc_ah;
        int error = 0;
        struct ath9k_tx_queue_info qi;
 
-       if (qnum == sc->sc_bhalq) {
+       if (qnum == sc->beacon.beaconq) {
                /*
                 * XXX: for beacon queue, we just save the parameter.
                 * It will be picked up by ath_beaconq_config when
                 * it's necessary.
                 */
-               sc->sc_beacon_qi = *qinfo;
+               sc->beacon.beacon_qi = *qinfo;
                return 0;
        }
 
-       ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
+       ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
 
        ath9k_hw_get_txq_props(ah, qnum, &qi);
        qi.tqi_aifs = qinfo->tqi_aifs;
@@ -2365,11 +933,10 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 
        if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
                DPRINTF(sc, ATH_DBG_FATAL,
-                       "%s: unable to update hardware queue %u!\n",
-                       __func__, qnum);
+                       "Unable to update hardware queue %u!\n", qnum);
                error = -EIO;
        } else {
-               ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
+               ath9k_hw_resettxqueue(ah, qnum);
        }
 
        return error;
@@ -2378,81 +945,37 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
 int ath_cabq_update(struct ath_softc *sc)
 {
        struct ath9k_tx_queue_info qi;
-       int qnum = sc->sc_cabq->axq_qnum;
-       struct ath_beacon_config conf;
+       int qnum = sc->beacon.cabq->axq_qnum;
 
        ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
        /*
         * Ensure the readytime % is within the bounds.
         */
-       if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
-               sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
-       else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
-               sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
-
-       ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf);
-       qi.tqi_readyTime =
-               (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
+       if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
+               sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
+       else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
+               sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
+
+       qi.tqi_readyTime = (sc->hw->conf.beacon_int *
+                           sc->config.cabqReadytime) / 100;
        ath_txq_update(sc, qnum, &qi);
 
        return 0;
 }
 
-int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
-{
-       struct ath_tx_control txctl;
-       int error = 0;
-
-       error = ath_tx_prepare(sc, skb, &txctl);
-       if (error == 0)
-               /*
-                * Start DMA mapping.
-                * ath_tx_start_dma() will be called either synchronously
-                * or asynchrounsly once DMA is complete.
-                */
-               xmit_map_sg(sc, skb,
-                           get_dma_mem_context(&txctl, dmacontext),
-                           &txctl);
-       else
-               ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
-
-       /* failed packets will be dropped by the caller */
-       return error;
-}
-
-/* Deferred processing of transmit interrupt */
-
-void ath_tx_tasklet(struct ath_softc *sc)
-{
-       u64 tsf = ath9k_hw_gettsf64(sc->sc_ah);
-       int i, nacked = 0;
-       u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
-
-       ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
-
-       /*
-        * Process each active queue.
-        */
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
-                       nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
-       }
-       if (nacked)
-               sc->sc_lastrx = tsf;
-}
-
-void ath_tx_draintxq(struct ath_softc *sc,
-       struct ath_txq *txq, bool retry_tx)
+/*
+ * Drain a given TX queue (could be Beacon or Data)
+ *
+ * This assumes output has been stopped and
+ * we do not need to block ath_tx_tasklet.
+ */
+void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 {
        struct ath_buf *bf, *lastbf;
        struct list_head bf_head;
 
        INIT_LIST_HEAD(&bf_head);
 
-       /*
-        * NB: this assumes output has been stopped and
-        *     we do not need to block ath_tx_tasklet
-        */
        for (;;) {
                spin_lock_bh(&txq->axq_lock);
 
@@ -2469,9 +992,9 @@ void ath_tx_draintxq(struct ath_softc *sc,
                        list_del(&bf->list);
                        spin_unlock_bh(&txq->axq_lock);
 
-                       spin_lock_bh(&sc->sc_txbuflock);
-                       list_add_tail(&bf->list, &sc->sc_txbuf);
-                       spin_unlock_bh(&sc->sc_txbuflock);
+                       spin_lock_bh(&sc->tx.txbuflock);
+                       list_add_tail(&bf->list, &sc->tx.txbuf);
+                       spin_unlock_bh(&sc->tx.txbuflock);
                        continue;
                }
 
@@ -2486,342 +1009,1110 @@ void ath_tx_draintxq(struct ath_softc *sc,
 
                spin_unlock_bh(&txq->axq_lock);
 
-               if (bf->bf_isampdu)
-                       ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
+               if (bf_isampdu(bf))
+                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0);
                else
                        ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
        }
 
        /* flush any pending frames if aggregation is enabled */
-       if (sc->sc_txaggr) {
+       if (sc->sc_flags & SC_OP_TXAGGR) {
                if (!retry_tx) {
                        spin_lock_bh(&txq->axq_lock);
-                       ath_txq_drain_pending_buffers(sc, txq,
-                               ATH9K_BH_STATUS_CHANGE);
+                       ath_txq_drain_pending_buffers(sc, txq);
                        spin_unlock_bh(&txq->axq_lock);
                }
        }
 }
 
-/* Drain the transmit queues and reclaim resources */
-
-void ath_draintxq(struct ath_softc *sc, bool retry_tx)
+void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
 {
-       /* stop beacon queue. The beacon will be freed when
-        * we go to INIT state */
-       if (!sc->sc_invalid) {
-               (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
-               DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
-                       ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_txq *txq;
+       int i, npend = 0;
+
+       if (sc->sc_flags & SC_OP_INVALID)
+               return;
+
+       /* Stop beacon queue */
+       ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+
+       /* Stop data queues */
+       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+               if (ATH_TXQ_SETUP(sc, i)) {
+                       txq = &sc->tx.txq[i];
+                       ath9k_hw_stoptxdma(ah, txq->axq_qnum);
+                       npend += ath9k_hw_numtxpending(ah, txq->axq_qnum);
+               }
        }
 
-       ath_drain_txdataq(sc, retry_tx);
-}
+       if (npend) {
+               int r;
 
-u32 ath_txq_depth(struct ath_softc *sc, int qnum)
-{
-       return sc->sc_txq[qnum].axq_depth;
+               DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
+
+               spin_lock_bh(&sc->sc_resetlock);
+               r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
+               if (r)
+                       DPRINTF(sc, ATH_DBG_FATAL,
+                               "Unable to reset hardware; reset status %u\n",
+                               r);
+               spin_unlock_bh(&sc->sc_resetlock);
+       }
+
+       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+               if (ATH_TXQ_SETUP(sc, i))
+                       ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
+       }
 }
 
-u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
+void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
 {
-       return sc->sc_txq[qnum].axq_aggr_depth;
+       ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
+       sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
 }
 
-/* Check if an ADDBA is required. A valid node must be passed. */
-enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
-                                     struct ath_node *an,
-                                     u8 tidno)
+void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
-       struct ath_atx_tid *txtid;
-       DECLARE_MAC_BUF(mac);
+       struct ath_atx_ac *ac;
+       struct ath_atx_tid *tid;
 
-       if (!sc->sc_txaggr)
-               return AGGR_NOT_REQUIRED;
+       if (list_empty(&txq->axq_acq))
+               return;
 
-       /* ADDBA exchange must be completed before sending aggregates */
-       txtid = ATH_AN_2_TID(an, tidno);
+       ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
+       list_del(&ac->list);
+       ac->sched = false;
 
-       if (txtid->addba_exchangecomplete)
-               return AGGR_EXCHANGE_DONE;
+       do {
+               if (list_empty(&ac->tid_q))
+                       return;
 
-       if (txtid->cleanup_inprogress)
-               return AGGR_CLEANUP_PROGRESS;
+               tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
+               list_del(&tid->list);
+               tid->sched = false;
 
-       if (txtid->addba_exchangeinprogress)
-               return AGGR_EXCHANGE_PROGRESS;
+               if (tid->paused)
+                       continue;
 
-       if (!txtid->addba_exchangecomplete) {
-               if (!txtid->addba_exchangeinprogress &&
-                   (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
-                       txtid->addba_exchangeattempts++;
-                       return AGGR_REQUIRED;
+               if ((txq->axq_depth % 2) == 0)
+                       ath_tx_sched_aggr(sc, txq, tid);
+
+               /*
+                * add tid to round-robin queue if more frames
+                * are pending for the tid
+                */
+               if (!list_empty(&tid->buf_q))
+                       ath_tx_queue_tid(txq, tid);
+
+               break;
+       } while (!list_empty(&ac->tid_q));
+
+       if (!list_empty(&ac->tid_q)) {
+               if (!ac->sched) {
+                       ac->sched = true;
+                       list_add_tail(&ac->list, &txq->axq_acq);
                }
        }
+}
 
-       return AGGR_NOT_REQUIRED;
+int ath_tx_setup(struct ath_softc *sc, int haltype)
+{
+       struct ath_txq *txq;
+
+       if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
+               DPRINTF(sc, ATH_DBG_FATAL,
+                       "HAL AC %u out of range, max %zu!\n",
+                        haltype, ARRAY_SIZE(sc->tx.hwq_map));
+               return 0;
+       }
+       txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
+       if (txq != NULL) {
+               sc->tx.hwq_map[haltype] = txq->axq_qnum;
+               return 1;
+       } else
+               return 0;
 }
 
-/* Start TX aggregation */
+/***********/
+/* TX, DMA */
+/***********/
 
-int ath_tx_aggr_start(struct ath_softc *sc,
-                     const u8 *addr,
-                     u16 tid,
-                     u16 *ssn)
+/*
+ * Insert a chain of ath_buf (descriptors) on a txq and
+ * assume the descriptors are already chained together by caller.
+ */
+static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
+                            struct list_head *head)
 {
-       struct ath_atx_tid *txtid;
-       struct ath_node *an;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_buf *bf;
 
-       spin_lock_bh(&sc->node_lock);
-       an = ath_node_find(sc, (u8 *) addr);
-       spin_unlock_bh(&sc->node_lock);
+       /*
+        * Insert the frame on the outbound list and
+        * pass it on to the hardware.
+        */
 
-       if (!an) {
-               DPRINTF(sc, ATH_DBG_AGGR,
-                       "%s: Node not found to initialize "
-                       "TX aggregation\n", __func__);
-               return -1;
+       if (list_empty(head))
+               return;
+
+       bf = list_first_entry(head, struct ath_buf, list);
+
+       list_splice_tail_init(head, &txq->axq_q);
+       txq->axq_depth++;
+       txq->axq_totalqueued++;
+       txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
+
+       DPRINTF(sc, ATH_DBG_QUEUE,
+               "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+
+       if (txq->axq_link == NULL) {
+               ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
+               DPRINTF(sc, ATH_DBG_XMIT,
+                       "TXDP[%u] = %llx (%p)\n",
+                       txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
+       } else {
+               *txq->axq_link = bf->bf_daddr;
+               DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
+                       txq->axq_qnum, txq->axq_link,
+                       ito64(bf->bf_daddr), bf->bf_desc);
        }
+       txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
+       ath9k_hw_txstart(ah, txq->axq_qnum);
+}
 
-       if (sc->sc_txaggr) {
-               txtid = ATH_AN_2_TID(an, tid);
-               txtid->addba_exchangeinprogress = 1;
-               ath_tx_pause_tid(sc, txtid);
+static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
+{
+       struct ath_buf *bf = NULL;
+
+       spin_lock_bh(&sc->tx.txbuflock);
+
+       if (unlikely(list_empty(&sc->tx.txbuf))) {
+               spin_unlock_bh(&sc->tx.txbuflock);
+               return NULL;
        }
 
-       return 0;
+       bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
+       list_del(&bf->list);
+
+       spin_unlock_bh(&sc->tx.txbuflock);
+
+       return bf;
+}
+
+static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
+                             struct list_head *bf_head,
+                             struct ath_tx_control *txctl)
+{
+       struct ath_buf *bf;
+
+       bf = list_first_entry(bf_head, struct ath_buf, list);
+       bf->bf_state.bf_type |= BUF_AMPDU;
+
+       /*
+        * Do not queue to h/w when any of the following conditions is true:
+        * - there are pending frames in software queue
+        * - the TID is currently paused for ADDBA/BAR request
+        * - seqno is not within block-ack window
+        * - h/w queue depth exceeds low water mark
+        */
+       if (!list_empty(&tid->buf_q) || tid->paused ||
+           !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
+           txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
+               /*
+                * Add this frame to software queue for scheduling later
+                * for aggregation.
+                */
+               list_move_tail(&bf->list, &tid->buf_q);
+               ath_tx_queue_tid(txctl->txq, tid);
+               return;
+       }
+
+       /* Add sub-frame to BAW */
+       ath_tx_addto_baw(sc, tid, bf);
+
+       /* Queue to h/w without aggregation */
+       bf->bf_nframes = 1;
+       bf->bf_lastbf = bf;
+       ath_buf_set_rate(sc, bf);
+       ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
+}
+
+static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+                              struct ath_atx_tid *tid,
+                              struct list_head *bf_head)
+{
+       struct ath_buf *bf;
+
+       bf = list_first_entry(bf_head, struct ath_buf, list);
+       bf->bf_state.bf_type &= ~BUF_AMPDU;
+
+       /* update starting sequence number for subsequent ADDBA request */
+       INCR(tid->seq_start, IEEE80211_SEQ_MAX);
+
+       bf->bf_nframes = 1;
+       bf->bf_lastbf = bf;
+       ath_buf_set_rate(sc, bf);
+       ath_tx_txqaddbuf(sc, txq, bf_head);
+}
+
+static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+       enum ath9k_pkt_type htype;
+       __le16 fc;
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = hdr->frame_control;
+
+       if (ieee80211_is_beacon(fc))
+               htype = ATH9K_PKT_TYPE_BEACON;
+       else if (ieee80211_is_probe_resp(fc))
+               htype = ATH9K_PKT_TYPE_PROBE_RESP;
+       else if (ieee80211_is_atim(fc))
+               htype = ATH9K_PKT_TYPE_ATIM;
+       else if (ieee80211_is_pspoll(fc))
+               htype = ATH9K_PKT_TYPE_PSPOLL;
+       else
+               htype = ATH9K_PKT_TYPE_NORMAL;
+
+       return htype;
+}
+
+static bool is_pae(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+       __le16 fc;
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = hdr->frame_control;
+
+       if (ieee80211_is_data(fc)) {
+               if (ieee80211_is_nullfunc(fc) ||
+                   /* Port Access Entity (IEEE 802.1X) */
+                   (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
+                       return true;
+               }
+       }
+
+       return false;
 }
 
-/* Stop tx aggregation */
+static int get_hw_crypto_keytype(struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+       if (tx_info->control.hw_key) {
+               if (tx_info->control.hw_key->alg == ALG_WEP)
+                       return ATH9K_KEY_TYPE_WEP;
+               else if (tx_info->control.hw_key->alg == ALG_TKIP)
+                       return ATH9K_KEY_TYPE_TKIP;
+               else if (tx_info->control.hw_key->alg == ALG_CCMP)
+                       return ATH9K_KEY_TYPE_AES;
+       }
+
+       return ATH9K_KEY_TYPE_CLEAR;
+}
 
-int ath_tx_aggr_stop(struct ath_softc *sc,
-                    const u8 *addr,
-                    u16 tid)
+static void assign_aggr_tid_seqno(struct sk_buff *skb,
+                                 struct ath_buf *bf)
 {
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr;
        struct ath_node *an;
+       struct ath_atx_tid *tid;
+       __le16 fc;
+       u8 *qc;
 
-       spin_lock_bh(&sc->node_lock);
-       an = ath_node_find(sc, (u8 *) addr);
-       spin_unlock_bh(&sc->node_lock);
+       if (!tx_info->control.sta)
+               return;
 
-       if (!an) {
-               DPRINTF(sc, ATH_DBG_AGGR,
-                       "%s: TX aggr stop for non-existent node\n", __func__);
-               return -1;
+       an = (struct ath_node *)tx_info->control.sta->drv_priv;
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = hdr->frame_control;
+
+       if (ieee80211_is_data_qos(fc)) {
+               qc = ieee80211_get_qos_ctl(hdr);
+               bf->bf_tidno = qc[0] & 0xf;
        }
 
-       ath_tx_aggr_teardown(sc, an, tid);
-       return 0;
+       /*
+        * For HT capable stations, we save tidno for later use.
+        * We also override seqno set by upper layer with the one
+        * in tx aggregation state.
+        *
+        * If fragmentation is on, the sequence number is
+        * not overridden, since it has been
+        * incremented by the fragmentation routine.
+        *
+        * FIXME: check if the fragmentation threshold exceeds
+        * IEEE80211 max.
+        */
+       tid = ATH_AN_2_TID(an, bf->bf_tidno);
+       hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
+                       IEEE80211_SEQ_SEQ_SHIFT);
+       bf->bf_seqno = tid->seq_next;
+       INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+}
+
+static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
+                         struct ath_txq *txq)
+{
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       int flags = 0;
+
+       flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
+       flags |= ATH9K_TXDESC_INTREQ;
+
+       if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
+               flags |= ATH9K_TXDESC_NOACK;
+
+       return flags;
 }
 
 /*
- * Performs transmit side cleanup when TID changes from aggregated to
- * unaggregated.
- * - Pause the TID and mark cleanup in progress
- * - Discard all retry frames from the s/w queue.
+ * rix - rate index
+ * pktlen - total bytes (delims + data + fcs + pads + pad delims)
+ * width  - 0 for 20 MHz, 1 for 40 MHz
+ * half_gi - to use 4us v/s 3.6 us for symbol time
  */
+static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
+                           int width, int half_gi, bool shortPreamble)
+{
+       struct ath_rate_table *rate_table = sc->cur_rate_table;
+       u32 nbits, nsymbits, duration, nsymbols;
+       u8 rc;
+       int streams, pktlen;
+
+       pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
+       rc = rate_table->info[rix].ratecode;
+
+       /* for legacy rates, use old function to compute packet duration */
+       if (!IS_HT_RATE(rc))
+               return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
+                                             rix, shortPreamble);
+
+       /* find number of symbols: PLCP + data */
+       nbits = (pktlen << 3) + OFDM_PLCP_BITS;
+       nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
+       nsymbols = (nbits + nsymbits - 1) / nsymbits;
+
+       if (!half_gi)
+               duration = SYMBOL_TIME(nsymbols);
+       else
+               duration = SYMBOL_TIME_HALFGI(nsymbols);
+
+       /* addup duration for legacy/ht training and signal fields */
+       streams = HT_RC_2_STREAMS(rc);
+       duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+
+       return duration;
+}
 
-void ath_tx_aggr_teardown(struct ath_softc *sc,
-       struct ath_node *an, u8 tid)
+static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
 {
-       struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
-       struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
-       struct ath_buf *bf;
+       struct ath_rate_table *rt = sc->cur_rate_table;
+       struct ath9k_11n_rate_series series[4];
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *tx_info;
+       struct ieee80211_tx_rate *rates;
+       struct ieee80211_hdr *hdr;
+       int i, flags = 0;
+       u8 rix = 0, ctsrate = 0;
+       bool is_pspoll;
+
+       memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
+
+       skb = (struct sk_buff *)bf->bf_mpdu;
+       tx_info = IEEE80211_SKB_CB(skb);
+       rates = tx_info->control.rates;
+       hdr = (struct ieee80211_hdr *)skb->data;
+       is_pspoll = ieee80211_is_pspoll(hdr->frame_control);
+
+       /*
+        * We check if Short Preamble is needed for the CTS rate by
+        * checking the BSS's global flag.
+        * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+        */
+       if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
+               ctsrate = rt->info[tx_info->control.rts_cts_rate_idx].ratecode |
+                       rt->info[tx_info->control.rts_cts_rate_idx].short_preamble;
+       else
+               ctsrate = rt->info[tx_info->control.rts_cts_rate_idx].ratecode;
+
+       /*
+        * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
+        * Check the first rate in the series to decide whether RTS/CTS
+        * or CTS-to-self has to be used.
+        */
+       if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+               flags = ATH9K_TXDESC_CTSENA;
+       else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+               flags = ATH9K_TXDESC_RTSENA;
+
+       /* FIXME: Handle aggregation protection */
+       if (sc->config.ath_aggr_prot &&
+           (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
+               flags = ATH9K_TXDESC_RTSENA;
+       }
+
+       /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+       if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+               flags &= ~(ATH9K_TXDESC_RTSENA);
+
+       for (i = 0; i < 4; i++) {
+               if (!rates[i].count || (rates[i].idx < 0))
+                       continue;
+
+               rix = rates[i].idx;
+               series[i].Tries = rates[i].count;
+               series[i].ChSel = sc->tx_chainmask;
+
+               if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+                       series[i].Rate = rt->info[rix].ratecode |
+                               rt->info[rix].short_preamble;
+               else
+                       series[i].Rate = rt->info[rix].ratecode;
+
+               if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+                       series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+               if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       series[i].RateFlags |= ATH9K_RATESERIES_2040;
+               if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+                       series[i].RateFlags |= ATH9K_RATESERIES_HALFGI;
+
+               series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
+                        (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
+                        (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
+                        (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE));
+       }
+
+       /* set dur_update_en for l-sig computation except for PS-Poll frames */
+       ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
+                                    bf->bf_lastbf->bf_desc,
+                                    !is_pspoll, ctsrate,
+                                    0, series, 4, flags);
+
+       if (sc->config.ath_aggr_prot && flags)
+               ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
+}
+
+static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
+                               struct sk_buff *skb,
+                               struct ath_tx_control *txctl)
+{
+       struct ath_wiphy *aphy = hw->priv;
+       struct ath_softc *sc = aphy->sc;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ath_tx_info_priv *tx_info_priv;
+       int hdrlen;
+       __le16 fc;
+
+       tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
+       if (unlikely(!tx_info_priv))
+               return -ENOMEM;
+       tx_info->rate_driver_data[0] = tx_info_priv;
+       tx_info_priv->aphy = aphy;
+       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+       fc = hdr->frame_control;
+
+       ATH_TXBUF_RESET(bf);
+
+       bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
+
+       if ((conf_is_ht(&sc->hw->conf) && !is_pae(skb) &&
+            (tx_info->flags & IEEE80211_TX_CTL_AMPDU)))
+               bf->bf_state.bf_type |= BUF_HT;
+
+       bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
+
+       bf->bf_keytype = get_hw_crypto_keytype(skb);
+       if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
+               bf->bf_frmlen += tx_info->control.hw_key->icv_len;
+               bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
+       } else {
+               bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
+       }
+
+       if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
+               assign_aggr_tid_seqno(skb, bf);
+
+       bf->bf_mpdu = skb;
+
+       bf->bf_dmacontext = dma_map_single(sc->dev, skb->data,
+                                          skb->len, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
+               bf->bf_mpdu = NULL;
+               DPRINTF(sc, ATH_DBG_CONFIG,
+                       "dma_mapping_error() on TX\n");
+               return -ENOMEM;
+       }
+
+       bf->bf_buf_addr = bf->bf_dmacontext;
+       return 0;
+}
+
+/* FIXME: tx power */
+static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
+                            struct ath_tx_control *txctl)
+{
+       struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+       struct ieee80211_tx_info *tx_info =  IEEE80211_SKB_CB(skb);
+       struct ath_node *an = NULL;
        struct list_head bf_head;
+       struct ath_desc *ds;
+       struct ath_atx_tid *tid;
+       struct ath_hw *ah = sc->sc_ah;
+       int frm_type;
+
+       frm_type = get_hw_packet_type(skb);
+
        INIT_LIST_HEAD(&bf_head);
+       list_add_tail(&bf->list, &bf_head);
 
-       DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__);
+       ds = bf->bf_desc;
+       ds->ds_link = 0;
+       ds->ds_data = bf->bf_buf_addr;
 
-       if (txtid->cleanup_inprogress) /* cleanup is in progress */
-               return;
+       ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
+                              bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
 
-       if (!txtid->addba_exchangecomplete) {
-               txtid->addba_exchangeattempts = 0;
-               return;
-       }
+       ath9k_hw_filltxdesc(ah, ds,
+                           skb->len,   /* segment length */
+                           true,       /* first segment */
+                           true,       /* last segment */
+                           ds);        /* first descriptor */
 
-       /* TID must be paused first */
-       ath_tx_pause_tid(sc, txtid);
+       spin_lock_bh(&txctl->txq->axq_lock);
 
-       /* drop all software retried frames and mark this TID */
-       spin_lock_bh(&txq->axq_lock);
-       while (!list_empty(&txtid->buf_q)) {
-               bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
-               if (!bf->bf_isretried) {
+       if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
+           tx_info->control.sta) {
+               an = (struct ath_node *)tx_info->control.sta->drv_priv;
+               tid = ATH_AN_2_TID(an, bf->bf_tidno);
+
+               if (ath_aggr_query(sc, an, bf->bf_tidno)) {
                        /*
-                        * NB: it's based on the assumption that
-                        * software retried frame will always stay
-                        * at the head of software queue.
+                        * Try aggregation if it's a unicast data frame
+                        * and the destination is HT capable.
                         */
-                       break;
+                       ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
+               } else {
+                       /*
+                        * Send this frame as regular when ADDBA
+                        * exchange is neither complete nor pending.
+                        */
+                       ath_tx_send_normal(sc, txctl->txq,
+                                          tid, &bf_head);
                }
-               list_cut_position(&bf_head,
-                       &txtid->buf_q, &bf->bf_lastfrm->list);
-               ath_tx_update_baw(sc, txtid, bf->bf_seqno);
+       } else {
+               bf->bf_lastbf = bf;
+               bf->bf_nframes = 1;
 
-               /* complete this sub-frame */
-               ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
+               ath_buf_set_rate(sc, bf);
+               ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
        }
 
-       if (txtid->baw_head != txtid->baw_tail) {
-               spin_unlock_bh(&txq->axq_lock);
-               txtid->cleanup_inprogress = true;
-       } else {
-               txtid->addba_exchangecomplete = 0;
-               txtid->addba_exchangeattempts = 0;
+       spin_unlock_bh(&txctl->txq->axq_lock);
+}
+
+/* Upon failure caller should free skb */
+int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
+                struct ath_tx_control *txctl)
+{
+       struct ath_wiphy *aphy = hw->priv;
+       struct ath_softc *sc = aphy->sc;
+       struct ath_buf *bf;
+       int r;
+
+       bf = ath_tx_get_buffer(sc);
+       if (!bf) {
+               DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
+               return -1;
+       }
+
+       r = ath_tx_setup_buffer(hw, bf, skb, txctl);
+       if (unlikely(r)) {
+               struct ath_txq *txq = txctl->txq;
+
+               DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
+
+               /* upon ath_tx_processq() this TX queue will be resumed, we
+                * guarantee this will happen by knowing beforehand that
+                * we will at least have to run TX completionon one buffer
+                * on the queue */
+               spin_lock_bh(&txq->axq_lock);
+               if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
+                       ieee80211_stop_queue(sc->hw,
+                               skb_get_queue_mapping(skb));
+                       txq->stopped = 1;
+               }
                spin_unlock_bh(&txq->axq_lock);
-               ath_tx_flush_tid(sc, txtid);
+
+               spin_lock_bh(&sc->tx.txbuflock);
+               list_add_tail(&bf->list, &sc->tx.txbuf);
+               spin_unlock_bh(&sc->tx.txbuflock);
+
+               return r;
        }
-}
 
-/*
- * Tx scheduling logic
- * NB: must be called with txq lock held
- */
+       ath_tx_start_dma(sc, bf, txctl);
 
-void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
+       return 0;
+}
+
+void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
 {
-       struct ath_atx_ac *ac;
-       struct ath_atx_tid *tid;
+       struct ath_wiphy *aphy = hw->priv;
+       struct ath_softc *sc = aphy->sc;
+       int hdrlen, padsize;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ath_tx_control txctl;
 
-       /* nothing to schedule */
-       if (list_empty(&txq->axq_acq))
-               return;
-       /*
-        * get the first node/ac pair on the queue
-        */
-       ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
-       list_del(&ac->list);
-       ac->sched = false;
+       memset(&txctl, 0, sizeof(struct ath_tx_control));
 
        /*
-        * process a single tid per destination
+        * As a temporary workaround, assign seq# here; this will likely need
+        * to be cleaned up to work better with Beacon transmission and virtual
+        * BSSes.
         */
-       do {
-               /* nothing to schedule */
-               if (list_empty(&ac->tid_q))
+       if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+               struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+               if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+                       sc->tx.seq_no += 0x10;
+               hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+               hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+       }
+
+       /* Add the padding after the header if this is not already done */
+       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+       if (hdrlen & 3) {
+               padsize = hdrlen % 4;
+               if (skb_headroom(skb) < padsize) {
+                       DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
+                       dev_kfree_skb_any(skb);
                        return;
+               }
+               skb_push(skb, padsize);
+               memmove(skb->data, skb->data + padsize, hdrlen);
+       }
 
-               tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
-               list_del(&tid->list);
-               tid->sched = false;
+       txctl.txq = sc->beacon.cabq;
 
-               if (tid->paused)    /* check next tid to keep h/w busy */
-                       continue;
+       DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
 
-               if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) ||
-                   ((txq->axq_depth % 2) == 0)) {
-                       ath_tx_sched_aggr(sc, txq, tid);
-               }
+       if (ath_tx_start(hw, skb, &txctl) != 0) {
+               DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
+               goto exit;
+       }
 
+       return;
+exit:
+       dev_kfree_skb_any(skb);
+}
+
+/*****************/
+/* TX Completion */
+/*****************/
+
+static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
+                           struct ath_xmit_status *tx_status)
+{
+       struct ieee80211_hw *hw = sc->hw;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
+       int hdrlen, padsize;
+
+       DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
+
+       if (tx_info_priv)
+               hw = tx_info_priv->aphy->hw;
+
+       if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
+           tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
+               kfree(tx_info_priv);
+               tx_info->rate_driver_data[0] = NULL;
+       }
+
+       if (tx_status->flags & ATH_TX_BAR) {
+               tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+               tx_status->flags &= ~ATH_TX_BAR;
+       }
+
+       if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
+               /* Frame was ACKed */
+               tx_info->flags |= IEEE80211_TX_STAT_ACK;
+       }
+
+       tx_info->status.rates[0].count = tx_status->retries + 1;
+
+       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+       padsize = hdrlen & 3;
+       if (padsize && hdrlen >= 24) {
                /*
-                * add tid to round-robin queue if more frames
-                * are pending for the tid
+                * Remove MAC header padding before giving the frame back to
+                * mac80211.
                 */
-               if (!list_empty(&tid->buf_q))
-                       ath_tx_queue_tid(txq, tid);
+               memmove(skb->data + padsize, skb->data, hdrlen);
+               skb_pull(skb, padsize);
+       }
 
-               /* only schedule one TID at a time */
-               break;
-       } while (!list_empty(&ac->tid_q));
+       ieee80211_tx_status(hw, skb);
+}
+
+static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
+                               struct list_head *bf_q,
+                               int txok, int sendbar)
+{
+       struct sk_buff *skb = bf->bf_mpdu;
+       struct ath_xmit_status tx_status;
+       unsigned long flags;
 
        /*
-        * schedule AC if more TIDs need processing
+        * Set retry information.
+        * NB: Don't use the information in the descriptor, because the frame
+        * could be software retried.
         */
-       if (!list_empty(&ac->tid_q)) {
-               /*
-                * add dest ac to txq if not already added
-                */
-               if (!ac->sched) {
-                       ac->sched = true;
-                       list_add_tail(&ac->list, &txq->axq_acq);
+       tx_status.retries = bf->bf_retries;
+       tx_status.flags = 0;
+
+       if (sendbar)
+               tx_status.flags = ATH_TX_BAR;
+
+       if (!txok) {
+               tx_status.flags |= ATH_TX_ERROR;
+
+               if (bf_isxretried(bf))
+                       tx_status.flags |= ATH_TX_XRETRY;
+       }
+
+       dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
+       ath_tx_complete(sc, skb, &tx_status);
+
+       /*
+        * Return the list of ath_buf of this mpdu to free queue
+        */
+       spin_lock_irqsave(&sc->tx.txbuflock, flags);
+       list_splice_tail_init(bf_q, &sc->tx.txbuf);
+       spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
+}
+
+static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
+                             int txok)
+{
+       struct ath_buf *bf_last = bf->bf_lastbf;
+       struct ath_desc *ds = bf_last->bf_desc;
+       u16 seq_st = 0;
+       u32 ba[WME_BA_BMP_SIZE >> 5];
+       int ba_index;
+       int nbad = 0;
+       int isaggr = 0;
+
+       if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
+               return 0;
+
+       isaggr = bf_isaggr(bf);
+       if (isaggr) {
+               seq_st = ATH_DS_BA_SEQ(ds);
+               memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
+       }
+
+       while (bf) {
+               ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
+               if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
+                       nbad++;
+
+               bf = bf->bf_next;
+       }
+
+       return nbad;
+}
+
+static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
+{
+       struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+       struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
+
+       tx_info_priv->update_rc = false;
+       if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
+               tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+
+       if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
+           (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
+               if (ieee80211_is_data(hdr->frame_control)) {
+                       memcpy(&tx_info_priv->tx, &ds->ds_txstat,
+                              sizeof(tx_info_priv->tx));
+                       tx_info_priv->n_frames = bf->bf_nframes;
+                       tx_info_priv->n_bad_frames = nbad;
+                       tx_info_priv->update_rc = true;
                }
        }
 }
 
-/* Initialize per-node transmit state */
+static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
+{
+       int qnum;
+
+       spin_lock_bh(&txq->axq_lock);
+       if (txq->stopped &&
+           sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
+               qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
+               if (qnum != -1) {
+                       ieee80211_wake_queue(sc->hw, qnum);
+                       txq->stopped = 0;
+               }
+       }
+       spin_unlock_bh(&txq->axq_lock);
+}
 
-void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 {
-       if (sc->sc_txaggr) {
-               struct ath_atx_tid *tid;
-               struct ath_atx_ac *ac;
-               int tidno, acno;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_buf *bf, *lastbf, *bf_held = NULL;
+       struct list_head bf_head;
+       struct ath_desc *ds;
+       int txok, nbad = 0;
+       int status;
 
-               sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
+       DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+               txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
+               txq->axq_link);
 
-               /*
-                * Init per tid tx state
-                */
-               for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
-                               tidno < WME_NUM_TID;
-                               tidno++, tid++) {
-                       tid->an        = an;
-                       tid->tidno     = tidno;
-                       tid->seq_start = tid->seq_next = 0;
-                       tid->baw_size  = WME_MAX_BA;
-                       tid->baw_head  = tid->baw_tail = 0;
-                       tid->sched     = false;
-                       tid->paused = false;
-                       tid->cleanup_inprogress = false;
-                       INIT_LIST_HEAD(&tid->buf_q);
-
-                       acno = TID_TO_WME_AC(tidno);
-                       tid->ac = &an->an_aggr.tx.ac[acno];
-
-                       /* ADDBA state */
-                       tid->addba_exchangecomplete     = 0;
-                       tid->addba_exchangeinprogress   = 0;
-                       tid->addba_exchangeattempts     = 0;
+       for (;;) {
+               spin_lock_bh(&txq->axq_lock);
+               if (list_empty(&txq->axq_q)) {
+                       txq->axq_link = NULL;
+                       txq->axq_linkbuf = NULL;
+                       spin_unlock_bh(&txq->axq_lock);
+                       break;
                }
+               bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
 
                /*
-                * Init per ac tx state
+                * There is a race condition that a BH gets scheduled
+                * after sw writes TxE and before hw re-load the last
+                * descriptor to get the newly chained one.
+                * Software must keep the last DONE descriptor as a
+                * holding descriptor - software does so by marking
+                * it with the STALE flag.
                 */
-               for (acno = 0, ac = &an->an_aggr.tx.ac[acno];
-                               acno < WME_NUM_AC; acno++, ac++) {
-                       ac->sched    = false;
-                       INIT_LIST_HEAD(&ac->tid_q);
-
-                       switch (acno) {
-                       case WME_AC_BE:
-                               ac->qnum = ath_tx_get_qnum(sc,
-                                       ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
-                               break;
-                       case WME_AC_BK:
-                               ac->qnum = ath_tx_get_qnum(sc,
-                                       ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
-                               break;
-                       case WME_AC_VI:
-                               ac->qnum = ath_tx_get_qnum(sc,
-                                       ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
-                               break;
-                       case WME_AC_VO:
-                               ac->qnum = ath_tx_get_qnum(sc,
-                                       ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
+               bf_held = NULL;
+               if (bf->bf_status & ATH_BUFSTATUS_STALE) {
+                       bf_held = bf;
+                       if (list_is_last(&bf_held->list, &txq->axq_q)) {
+                               txq->axq_link = NULL;
+                               txq->axq_linkbuf = NULL;
+                               spin_unlock_bh(&txq->axq_lock);
+
+                               /*
+                                * The holding descriptor is the last
+                                * descriptor in queue. It's safe to remove
+                                * the last holding descriptor in BH context.
+                                */
+                               spin_lock_bh(&sc->tx.txbuflock);
+                               list_move_tail(&bf_held->list, &sc->tx.txbuf);
+                               spin_unlock_bh(&sc->tx.txbuflock);
+
                                break;
+                       } else {
+                               bf = list_entry(bf_held->list.next,
+                                               struct ath_buf, list);
                        }
                }
+
+               lastbf = bf->bf_lastbf;
+               ds = lastbf->bf_desc;
+
+               status = ath9k_hw_txprocdesc(ah, ds);
+               if (status == -EINPROGRESS) {
+                       spin_unlock_bh(&txq->axq_lock);
+                       break;
+               }
+               if (bf->bf_desc == txq->axq_lastdsWithCTS)
+                       txq->axq_lastdsWithCTS = NULL;
+               if (ds == txq->axq_gatingds)
+                       txq->axq_gatingds = NULL;
+
+               /*
+                * Remove ath_buf's of the same transmit unit from txq,
+                * however leave the last descriptor back as the holding
+                * descriptor for hw.
+                */
+               lastbf->bf_status |= ATH_BUFSTATUS_STALE;
+               INIT_LIST_HEAD(&bf_head);
+               if (!list_is_singular(&lastbf->list))
+                       list_cut_position(&bf_head,
+                               &txq->axq_q, lastbf->list.prev);
+
+               txq->axq_depth--;
+               if (bf_isaggr(bf))
+                       txq->axq_aggr_depth--;
+
+               txok = (ds->ds_txstat.ts_status == 0);
+               spin_unlock_bh(&txq->axq_lock);
+
+               if (bf_held) {
+                       spin_lock_bh(&sc->tx.txbuflock);
+                       list_move_tail(&bf_held->list, &sc->tx.txbuf);
+                       spin_unlock_bh(&sc->tx.txbuflock);
+               }
+
+               if (!bf_isampdu(bf)) {
+                       /*
+                        * This frame is sent out as a single frame.
+                        * Use hardware retry status for this frame.
+                        */
+                       bf->bf_retries = ds->ds_txstat.ts_longretry;
+                       if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
+                               bf->bf_state.bf_type |= BUF_XRETRY;
+                       nbad = 0;
+               } else {
+                       nbad = ath_tx_num_badfrms(sc, bf, txok);
+               }
+
+               ath_tx_rc_status(bf, ds, nbad);
+
+               if (bf_isampdu(bf))
+                       ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok);
+               else
+                       ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
+
+               ath_wake_mac80211_queue(sc, txq);
+
+               spin_lock_bh(&txq->axq_lock);
+               if (sc->sc_flags & SC_OP_TXAGGR)
+                       ath_txq_schedule(sc, txq);
+               spin_unlock_bh(&txq->axq_lock);
+       }
+}
+
+
+void ath_tx_tasklet(struct ath_softc *sc)
+{
+       int i;
+       u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
+
+       ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
+
+       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+               if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
+                       ath_tx_processq(sc, &sc->tx.txq[i]);
        }
 }
 
-/* Cleanupthe pending buffers for the node. */
+/*****************/
+/* Init, Cleanup */
+/*****************/
+
+int ath_tx_init(struct ath_softc *sc, int nbufs)
+{
+       int error = 0;
+
+       do {
+               spin_lock_init(&sc->tx.txbuflock);
+
+               error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
+                       "tx", nbufs, 1);
+               if (error != 0) {
+                       DPRINTF(sc, ATH_DBG_FATAL,
+                               "Failed to allocate tx descriptors: %d\n",
+                               error);
+                       break;
+               }
+
+               error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
+                                         "beacon", ATH_BCBUF, 1);
+               if (error != 0) {
+                       DPRINTF(sc, ATH_DBG_FATAL,
+                               "Failed to allocate beacon descriptors: %d\n",
+                               error);
+                       break;
+               }
+
+       } while (0);
+
+       if (error != 0)
+               ath_tx_cleanup(sc);
+
+       return error;
+}
+
+int ath_tx_cleanup(struct ath_softc *sc)
+{
+       if (sc->beacon.bdma.dd_desc_len != 0)
+               ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
+
+       if (sc->tx.txdma.dd_desc_len != 0)
+               ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
+
+       return 0;
+}
 
-void ath_tx_node_cleanup(struct ath_softc *sc,
-       struct ath_node *an, bool bh_flag)
+void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
+{
+       struct ath_atx_tid *tid;
+       struct ath_atx_ac *ac;
+       int tidno, acno;
+
+       for (tidno = 0, tid = &an->tid[tidno];
+            tidno < WME_NUM_TID;
+            tidno++, tid++) {
+               tid->an        = an;
+               tid->tidno     = tidno;
+               tid->seq_start = tid->seq_next = 0;
+               tid->baw_size  = WME_MAX_BA;
+               tid->baw_head  = tid->baw_tail = 0;
+               tid->sched     = false;
+               tid->paused    = false;
+               tid->state &= ~AGGR_CLEANUP;
+               INIT_LIST_HEAD(&tid->buf_q);
+               acno = TID_TO_WME_AC(tidno);
+               tid->ac = &an->ac[acno];
+               tid->state &= ~AGGR_ADDBA_COMPLETE;
+               tid->state &= ~AGGR_ADDBA_PROGRESS;
+               tid->addba_exchangeattempts = 0;
+       }
+
+       for (acno = 0, ac = &an->ac[acno];
+            acno < WME_NUM_AC; acno++, ac++) {
+               ac->sched    = false;
+               INIT_LIST_HEAD(&ac->tid_q);
+
+               switch (acno) {
+               case WME_AC_BE:
+                       ac->qnum = ath_tx_get_qnum(sc,
+                                  ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+                       break;
+               case WME_AC_BK:
+                       ac->qnum = ath_tx_get_qnum(sc,
+                                  ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
+                       break;
+               case WME_AC_VI:
+                       ac->qnum = ath_tx_get_qnum(sc,
+                                  ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
+                       break;
+               case WME_AC_VO:
+                       ac->qnum = ath_tx_get_qnum(sc,
+                                  ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
+                       break;
+               }
+       }
+}
+
+void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
        int i;
        struct ath_atx_ac *ac, *ac_tmp;
        struct ath_atx_tid *tid, *tid_tmp;
        struct ath_txq *txq;
+
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
                if (ATH_TXQ_SETUP(sc, i)) {
-                       txq = &sc->sc_txq[i];
+                       txq = &sc->tx.txq[i];
 
-                       if (likely(bh_flag))
-                               spin_lock_bh(&txq->axq_lock);
-                       else
-                               spin_lock(&txq->axq_lock);
+                       spin_lock(&txq->axq_lock);
 
                        list_for_each_entry_safe(ac,
                                        ac_tmp, &txq->axq_acq, list) {
@@ -2836,36 +2127,14 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
                                                tid_tmp, &ac->tid_q, list) {
                                        list_del(&tid->list);
                                        tid->sched = false;
-                                       ath_tid_drain(sc, txq, tid, bh_flag);
-                                       tid->addba_exchangecomplete = 0;
+                                       ath_tid_drain(sc, txq, tid);
+                                       tid->state &= ~AGGR_ADDBA_COMPLETE;
                                        tid->addba_exchangeattempts = 0;
-                                       tid->cleanup_inprogress = false;
+                                       tid->state &= ~AGGR_CLEANUP;
                                }
                        }
 
-                       if (likely(bh_flag))
-                               spin_unlock_bh(&txq->axq_lock);
-                       else
-                               spin_unlock(&txq->axq_lock);
-               }
-       }
-}
-
-/* Cleanup per node transmit state */
-
-void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
-{
-       if (sc->sc_txaggr) {
-               struct ath_atx_tid *tid;
-               int tidno, i;
-
-               /* Init per tid rx state */
-               for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
-                       tidno < WME_NUM_TID;
-                    tidno++, tid++) {
-
-                       for (i = 0; i < ATH_TID_MAX_BUFS; i++)
-                               ASSERT(tid->tx_buf[i] == NULL);
+                       spin_unlock(&txq->axq_lock);
                }
        }
 }