#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
+#include <linux/slab.h>
#include <asm/div64.h>
addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
- addr |= ssb_dma_translation(ring->dev->dev);
+ addr |= ring->dev->dma.translation;
ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
if (slot == ring->nr_slots - 1)
ctl |= B43_DMA32_DCTL_DTABLEEND;
addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
- addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
+ addrhi |= (ring->dev->dma.translation << 1);
if (slot == ring->nr_slots - 1)
ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
if (start)
dma_addr_t dmaaddr;
if (tx) {
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
- buf, len, DMA_TO_DEVICE);
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+ buf, len, DMA_TO_DEVICE);
} else {
- dmaaddr = ssb_dma_map_single(ring->dev->dev,
- buf, len, DMA_FROM_DEVICE);
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
+ buf, len, DMA_FROM_DEVICE);
}
return dmaaddr;
dma_addr_t addr, size_t len, int tx)
{
if (tx) {
- ssb_dma_unmap_single(ring->dev->dev,
- addr, len, DMA_TO_DEVICE);
+ dma_unmap_single(ring->dev->dev->dma_dev,
+ addr, len, DMA_TO_DEVICE);
} else {
- ssb_dma_unmap_single(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_unmap_single(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
}
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_cpu(ring->dev->dev,
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
addr, len, DMA_FROM_DEVICE);
}
dma_addr_t addr, size_t len)
{
B43_WARN_ON(ring->tx);
- ssb_dma_sync_single_for_device(ring->dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
+ addr, len, DMA_FROM_DEVICE);
}
static inline
*/
if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
- ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
- B43_DMA_RINGMEMSIZE,
- &(ring->dmabase), flags);
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
+ B43_DMA_RINGMEMSIZE,
+ &(ring->dmabase), flags);
if (!ring->descbase) {
b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
return -ENOMEM;
static void free_ringmemory(struct b43_dmaring *ring)
{
- gfp_t flags = GFP_KERNEL;
-
- if (ring->type == B43_DMA_64BIT)
- flags |= GFP_DMA;
-
- ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
- ring->descbase, ring->dmabase, flags);
+ dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
+ ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
dma_addr_t addr,
size_t buffersize, bool dma_to_device)
{
- if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
return 1;
switch (ring->type) {
return 1;
}
+static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
+{
+ unsigned char *f = skb->data + ring->frameoffset;
+
+ return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
+}
+
+static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
+{
+ struct b43_rxhdr_fw4 *rxhdr;
+ unsigned char *frame;
+
+ /* This poisons the RX buffer to detect DMA failures. */
+
+ rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
+ rxhdr->frame_len = 0;
+
+ B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
+ frame = skb->data + ring->frameoffset;
+ memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
+}
+
static int setup_rx_descbuffer(struct b43_dmaring *ring,
struct b43_dmadesc_generic *desc,
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
{
- struct b43_rxhdr_fw4 *rxhdr;
dma_addr_t dmaaddr;
struct sk_buff *skb;
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
+ b43_poison_rx_buffer(ring, skb);
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
/* ugh. try to realloc in zone_dma */
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
+ b43_poison_rx_buffer(ring, skb);
dmaaddr = map_descbuffer(ring, skb->data,
ring->rx_buffersize, 0);
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
- rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
- rxhdr->frame_len = 0;
-
return 0;
}
int err = 0;
u32 value;
u32 addrext;
- u32 trans = ssb_dma_translation(ring->dev->dev);
+ u32 trans = ring->dev->dma.translation;
if (ring->tx) {
if (ring->type == B43_DMA_64BIT) {
static void free_all_descbuffers(struct b43_dmaring *ring)
{
- struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
int i;
if (!ring->used_slots)
return;
for (i = 0; i < ring->nr_slots; i++) {
- desc = ring->ops->idx2desc(ring, i, &meta);
+ /* get meta - ignore returned value */
+ ring->ops->idx2desc(ring, i, &meta);
- if (!meta->skb) {
+ if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
B43_WARN_ON(!ring->tx);
continue;
}
if (tmp & B43_DMA32_TXADDREXT_MASK)
return DMA_BIT_MASK(32);
- return DMA_30BIT_MASK;
+ return DMA_BIT_MASK(30);
}
static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
{
- if (dmamask == DMA_30BIT_MASK)
+ if (dmamask == DMA_BIT_MASK(30))
return B43_DMA_30BIT;
if (dmamask == DMA_BIT_MASK(32))
return B43_DMA_32BIT;
enum b43_dmatype type)
{
struct b43_dmaring *ring;
- int err;
+ int i, err;
dma_addr_t dma_test;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
GFP_KERNEL);
if (!ring->meta)
goto err_kfree_ring;
+ for (i = 0; i < ring->nr_slots; i++)
+ ring->meta->skb = B43_DMA_PTR_POISON;
ring->type = type;
ring->dev = dev;
} else
B43_WARN_ON(1);
}
- spin_lock_init(&ring->lock);
#ifdef CONFIG_B43_DEBUG
ring->last_injected_overflow = jiffies;
#endif
goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
- dma_test = ssb_dma_map_single(dev->dev,
- ring->txhdr_cache,
- b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_test = dma_map_single(dev->dev->dma_dev,
+ ring->txhdr_cache,
+ b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
if (!ring->txhdr_cache)
goto err_kfree_meta;
- dma_test = ssb_dma_map_single(dev->dev,
- ring->txhdr_cache,
- b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_test = dma_map_single(dev->dev->dma_dev,
+ ring->txhdr_cache,
+ b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
if (b43_dma_mapping_error(ring, dma_test,
b43_txhdr_size(dev), 1)) {
}
}
- ssb_dma_unmap_single(dev->dev,
- dma_test, b43_txhdr_size(dev),
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev->dma_dev,
+ dma_test, b43_txhdr_size(dev),
+ DMA_TO_DEVICE);
}
err = alloc_ringmemory(ring);
/* Try to set the DMA mask. If it fails, try falling back to a
* lower mask, as we can always also support a lower one. */
while (1) {
- err = ssb_dma_set_mask(dev->dev, mask);
- if (!err)
- break;
+ err = dma_set_mask(dev->dev->dma_dev, mask);
+ if (!err) {
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
+ if (!err)
+ break;
+ }
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
fallback = 1;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
- mask = DMA_30BIT_MASK;
+ mask = DMA_BIT_MASK(30);
fallback = 1;
continue;
}
if (err)
return err;
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ dma->translation = ssb_dma_translation(dev->dev->sdev);
+ break;
+#endif
+ }
+
err = -ENOMEM;
/* setup TX DMA channels. */
dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
goto err_destroy_mcast;
/* No support for the TX status DMA ring. */
- B43_WARN_ON(dev->dev->id.revision < 5);
+ B43_WARN_ON(dev->dev->core_rev < 5);
b43dbg(dev->wl, "%u-bit DMA initialized\n",
(unsigned int)type);
case 0x5000:
ring = dma->tx_ring_mcast;
break;
- default:
- B43_WARN_ON(1);
}
*slot = (cookie & 0x0FFF);
- B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
+ if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
+ b43dbg(dev->wl, "TX-status contains "
+ "invalid cookie: 0x%04X\n", cookie);
+ return NULL;
+ }
return ring;
}
{
const struct b43_dma_ops *ops = ring->ops;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
u8 *header;
int slot, old_top_slot, old_used_slots;
int err;
struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
struct b43_dmadesc_meta *meta_hdr;
- struct sk_buff *bounce_skb;
u16 cookie;
size_t hdrsize = b43_txhdr_size(ring->dev);
header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
cookie = generate_cookie(ring, slot);
err = b43_generate_txhdr(ring->dev, header,
- skb->data, skb->len, info, cookie);
+ skb, info, cookie);
if (unlikely(err)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
meta->skb = skb;
meta->is_last_fragment = 1;
+ priv_info->bouncebuffer = NULL;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
- if (!bounce_skb) {
+ priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
+ GFP_ATOMIC | GFP_DMA);
+ if (!priv_info->bouncebuffer) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
- memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
- dev_kfree_skb_any(skb);
- skb = bounce_skb;
- meta->skb = skb;
- meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+ meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
+ kfree(priv_info->bouncebuffer);
+ priv_info->bouncebuffer = NULL;
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO;
- goto out_free_bounce;
+ goto out_unmap_hdr;
}
}
ops->poke_tx(ring, next_slot(ring, slot));
return 0;
-out_free_bounce:
- dev_kfree_skb_any(skb);
out_unmap_hdr:
unmap_descbuffer(ring, meta_hdr->dmaaddr,
hdrsize, 1);
{
struct b43_dmaring *ring;
- if (b43_modparam_qos) {
+ if (dev->qos_enabled) {
/* 0 = highest priority */
switch (queue_prio) {
default:
struct b43_dmaring *ring;
struct ieee80211_hdr *hdr;
int err = 0;
- unsigned long flags;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
hdr = (struct ieee80211_hdr *)skb->data;
dev, skb_get_queue_mapping(skb));
}
- spin_lock_irqsave(&ring->lock, flags);
-
B43_WARN_ON(!ring->tx);
- /* Check if the queue was stopped in mac80211,
- * but we got called nevertheless.
- * That would be a mac80211 bug. */
- B43_WARN_ON(ring->stopped);
- if (unlikely(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
- b43warn(dev->wl, "DMA queue overflow\n");
+ if (unlikely(ring->stopped)) {
+ /* We get here only because of a bug in mac80211.
+ * Because of a race, one packet may be queued after
+ * the queue is stopped, thus we got called when we shouldn't.
+ * For now, just refuse the transmit. */
+ if (b43_debug(dev, B43_DBG_DMAVERBOSE))
+ b43err(dev->wl, "Packet after queue stopped\n");
err = -ENOSPC;
- goto out_unlock;
+ goto out;
+ }
+
+ if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
+ /* If we get here, we have a real error with the queue
+ * full, but queues not stopped. */
+ b43err(dev->wl, "DMA queue overflow\n");
+ err = -ENOSPC;
+ goto out;
}
/* Assign the queue number to the ring (if not already done before)
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
err = 0;
- goto out_unlock;
+ goto out;
}
if (unlikely(err)) {
b43err(dev->wl, "DMA tx mapping failure\n");
- goto out_unlock;
+ goto out;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
}
-out_unlock:
- spin_unlock_irqrestore(&ring->lock, flags);
+out:
return err;
}
-/* Called with IRQs disabled. */
void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status)
{
const struct b43_dma_ops *ops;
struct b43_dmaring *ring;
- struct b43_dmadesc_generic *desc;
struct b43_dmadesc_meta *meta;
- int slot;
+ int slot, firstused;
bool frame_succeed;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
return;
+ B43_WARN_ON(!ring->tx);
- spin_lock(&ring->lock); /* IRQs are already disabled. */
+ /* Sanity check: TX packets are processed in-order on one ring.
+ * Check if the slot deduced from the cookie really is the first
+ * used slot. */
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+ * malfunction, memory leaks and/or stall of DMA functionality. */
+ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+ "Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ return;
+ }
- B43_WARN_ON(!ring->tx);
ops = ring->ops;
while (1) {
- B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
- desc = ops->idx2desc(ring, slot, &meta);
+ B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
+ /* get meta - ignore returned value */
+ ops->idx2desc(ring, slot, &meta);
+
+ if (b43_dma_ptr_is_poisoned(meta->skb)) {
+ b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
+ "on ring %d\n",
+ slot, firstused, ring->index);
+ break;
+ }
+ if (meta->skb) {
+ struct b43_private_tx_info *priv_info =
+ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
- if (meta->skb)
- unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
- 1);
- else
+ unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+ kfree(priv_info->bouncebuffer);
+ priv_info->bouncebuffer = NULL;
+ } else {
unmap_descbuffer(ring, meta->dmaaddr,
b43_txhdr_size(dev), 1);
+ }
if (meta->is_last_fragment) {
struct ieee80211_tx_info *info;
- BUG_ON(!meta->skb);
+ if (unlikely(!meta->skb)) {
+ /* This is a scatter-gather fragment of a frame, so
+ * the skb pointer must not be NULL. */
+ b43dbg(dev->wl, "TX status unexpected NULL skb "
+ "at slot %d (first=%d) on ring %d\n",
+ slot, firstused, ring->index);
+ break;
+ }
info = IEEE80211_SKB_CB(meta->skb);
ring->nr_failed_tx_packets++;
ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
- ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
+ ieee80211_tx_status(dev->wl->hw, meta->skb);
- /* skb is freed by ieee80211_tx_status_irqsafe() */
- meta->skb = NULL;
+ /* skb will be freed by ieee80211_tx_status().
+ * Poison our pointer. */
+ meta->skb = B43_DMA_PTR_POISON;
} else {
/* No need to call free_descriptor_buffer here, as
* this is only the txhdr, which is not allocated.
*/
- B43_WARN_ON(meta->skb);
+ if (unlikely(meta->skb)) {
+ b43dbg(dev->wl, "TX status unexpected non-NULL skb "
+ "at slot %d (first=%d) on ring %d\n",
+ slot, firstused, ring->index);
+ break;
+ }
}
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
- if (meta->is_last_fragment)
+ if (meta->is_last_fragment) {
+ /* This is the last scatter-gather
+ * fragment of the frame. We are done. */
break;
+ }
slot = next_slot(ring, slot);
}
if (ring->stopped) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
}
-
- spin_unlock(&ring->lock);
-}
-
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_dmaring *ring;
- unsigned long flags;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = select_ring_by_priority(dev, i);
-
- spin_lock_irqsave(&ring->lock, flags);
- stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
- stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
- stats[i].count = ring->nr_tx_packets;
- spin_unlock_irqrestore(&ring->lock, flags);
- }
}
static void dma_rx(struct b43_dmaring *ring, int *slot)
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5);
if (unlikely(len == 0)) {
- /* recycle the descriptor buffer. */
- sync_descbuffer_for_device(ring, meta->dmaaddr,
- ring->rx_buffersize);
- goto drop;
+ dmaaddr = meta->dmaaddr;
+ goto drop_recycle_buffer;
}
}
- if (unlikely(len > ring->rx_buffersize)) {
+ if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
+ /* Something went wrong with the DMA.
+ * The device did not touch the buffer and did not overwrite the poison. */
+ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
+ dmaaddr = meta->dmaaddr;
+ goto drop_recycle_buffer;
+ }
+ if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
while (1) {
desc = ops->idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
+ b43_poison_rx_buffer(ring, meta->skb);
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) {
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
- sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
- goto drop;
+ goto drop_recycle_buffer;
}
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
b43_rx(ring->dev, skb, rxhdr);
drop:
return;
+
+drop_recycle_buffer:
+ /* Poison and recycle the RX buffer. */
+ b43_poison_rx_buffer(ring, skb);
+ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
void b43_dma_rx(struct b43_dmaring *ring)
dma_rx(ring, &slot);
update_max_used_slots(ring, ++used_slots);
}
+ wmb();
ops->set_current_rxslot(ring, slot);
ring->current_slot = slot;
}
static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx);
ring->ops->tx_suspend(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43_WARN_ON(!ring->tx);
ring->ops->tx_resume(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
void b43_dma_tx_suspend(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */