1 /******************************************************************************
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 The full GNU General Public License is included in this distribution in the
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 ******************************************************************************/
34 #include <linux/version.h>
43 #ifdef CONFIG_IPW2200_DEBUG
49 #ifdef CONFIG_IPW2200_MONITOR
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
61 #ifdef CONFIG_IPW2200_RADIOTAP
67 #ifdef CONFIG_IPW2200_QOS
73 #define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION IPW2200_VERSION
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
85 static int cmdlog = 0;
86 #ifdef CONFIG_IPW2200_DEBUG
89 static int channel = 0;
92 static u32 ipw_debug_level;
93 static int associate = 1;
94 static int auto_create = 1;
96 static int disable = 0;
97 static int bt_coexist = 0;
98 static int hwcrypto = 0;
99 static int roaming = 1;
100 static const char ipw_modes[] = {
103 static int antenna = CFG_SYS_ANTENNA_BOTH;
105 #ifdef CONFIG_IPW2200_PROMISCUOUS
106 static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
110 #ifdef CONFIG_IPW2200_QOS
111 static int qos_enable = 0;
112 static int qos_burst_enable = 0;
113 static int qos_no_ack_mask = 0;
114 static int burst_duration_CCK = 0;
115 static int burst_duration_OFDM = 0;
117 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
118 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
119 QOS_TX3_CW_MIN_OFDM},
120 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
121 QOS_TX3_CW_MAX_OFDM},
122 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
123 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
124 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
125 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
128 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
129 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
131 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
133 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
134 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
135 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
136 QOS_TX3_TXOP_LIMIT_CCK}
139 static struct ieee80211_qos_parameters def_parameters_OFDM = {
140 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
141 DEF_TX3_CW_MIN_OFDM},
142 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
143 DEF_TX3_CW_MAX_OFDM},
144 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
145 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
146 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
147 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
150 static struct ieee80211_qos_parameters def_parameters_CCK = {
151 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
153 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
155 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
156 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
157 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
158 DEF_TX3_TXOP_LIMIT_CCK}
161 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
163 static int from_priority_to_tx_queue[] = {
164 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
165 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
168 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
170 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
172 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
174 #endif /* CONFIG_IPW2200_QOS */
176 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
177 static void ipw_remove_current_network(struct ipw_priv *priv);
178 static void ipw_rx(struct ipw_priv *priv);
179 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
180 struct clx2_tx_queue *txq, int qindex);
181 static int ipw_queue_reset(struct ipw_priv *priv);
183 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
186 static void ipw_tx_queue_free(struct ipw_priv *);
188 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
189 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
190 static void ipw_rx_queue_replenish(void *);
191 static int ipw_up(struct ipw_priv *);
192 static void ipw_bg_up(void *);
193 static void ipw_down(struct ipw_priv *);
194 static void ipw_bg_down(void *);
195 static int ipw_config(struct ipw_priv *);
196 static int init_supported_rates(struct ipw_priv *priv,
197 struct ipw_supported_rates *prates);
198 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
199 static void ipw_send_wep_keys(struct ipw_priv *, int);
201 static int snprint_line(char *buf, size_t count,
202 const u8 * data, u32 len, u32 ofs)
207 out = snprintf(buf, count, "%08X", ofs);
209 for (l = 0, i = 0; i < 2; i++) {
210 out += snprintf(buf + out, count - out, " ");
211 for (j = 0; j < 8 && l < len; j++, l++)
212 out += snprintf(buf + out, count - out, "%02X ",
215 out += snprintf(buf + out, count - out, " ");
218 out += snprintf(buf + out, count - out, " ");
219 for (l = 0, i = 0; i < 2; i++) {
220 out += snprintf(buf + out, count - out, " ");
221 for (j = 0; j < 8 && l < len; j++, l++) {
222 c = data[(i * 8 + j)];
223 if (!isascii(c) || !isprint(c))
226 out += snprintf(buf + out, count - out, "%c", c);
230 out += snprintf(buf + out, count - out, " ");
236 static void printk_buf(int level, const u8 * data, u32 len)
240 if (!(ipw_debug_level & level))
244 snprint_line(line, sizeof(line), &data[ofs],
246 printk(KERN_DEBUG "%s\n", line);
248 len -= min(len, 16U);
252 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
258 while (size && len) {
259 out = snprint_line(output, size, &data[ofs],
260 min_t(size_t, len, 16U), ofs);
265 len -= min_t(size_t, len, 16U);
271 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
272 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
273 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
275 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
276 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
277 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
279 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
280 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
281 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
283 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
284 __LINE__, (u32) (b), (u32) (c));
285 _ipw_write_reg8(a, b, c);
288 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
289 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
290 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
292 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
293 __LINE__, (u32) (b), (u32) (c));
294 _ipw_write_reg16(a, b, c);
297 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
298 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
299 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
301 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
302 __LINE__, (u32) (b), (u32) (c));
303 _ipw_write_reg32(a, b, c);
306 /* 8-bit direct write (low 4K) */
307 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
309 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
310 #define ipw_write8(ipw, ofs, val) \
311 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
312 _ipw_write8(ipw, ofs, val)
314 /* 16-bit direct write (low 4K) */
315 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
317 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
318 #define ipw_write16(ipw, ofs, val) \
319 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
320 _ipw_write16(ipw, ofs, val)
322 /* 32-bit direct write (low 4K) */
323 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
325 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
326 #define ipw_write32(ipw, ofs, val) \
327 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
328 _ipw_write32(ipw, ofs, val)
330 /* 8-bit direct read (low 4K) */
331 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
333 /* 8-bit direct read (low 4K), with debug wrapper */
334 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
336 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
337 return _ipw_read8(ipw, ofs);
340 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
341 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
343 /* 16-bit direct read (low 4K) */
344 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
346 /* 16-bit direct read (low 4K), with debug wrapper */
347 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
349 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
350 return _ipw_read16(ipw, ofs);
353 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
354 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
356 /* 32-bit direct read (low 4K) */
357 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
359 /* 32-bit direct read (low 4K), with debug wrapper */
360 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
362 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
363 return _ipw_read32(ipw, ofs);
366 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
367 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
369 /* multi-byte read (above 4K), with debug wrapper */
370 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
371 static inline void __ipw_read_indirect(const char *f, int l,
372 struct ipw_priv *a, u32 b, u8 * c, int d)
374 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
376 _ipw_read_indirect(a, b, c, d);
379 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
380 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
382 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
383 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
385 #define ipw_write_indirect(a, b, c, d) \
386 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
387 _ipw_write_indirect(a, b, c, d)
389 /* 32-bit indirect write (above 4K) */
390 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
392 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
393 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
394 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
397 /* 8-bit indirect write (above 4K) */
398 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
400 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
401 u32 dif_len = reg - aligned_addr;
403 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
404 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
405 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
408 /* 16-bit indirect write (above 4K) */
409 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
411 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
412 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
414 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
415 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
416 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
419 /* 8-bit indirect read (above 4K) */
420 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
423 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
424 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
425 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
426 return (word >> ((reg & 0x3) * 8)) & 0xff;
429 /* 32-bit indirect read (above 4K) */
430 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
434 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
436 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
437 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
438 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
442 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
443 /* for area above 1st 4K of SRAM/reg space */
444 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
447 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
448 u32 dif_len = addr - aligned_addr;
451 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
457 /* Read the first dword (or portion) byte by byte */
458 if (unlikely(dif_len)) {
459 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
460 /* Start reading at aligned_addr + dif_len */
461 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
462 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
466 /* Read all of the middle dwords as dwords, with auto-increment */
467 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
468 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
469 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
471 /* Read the last dword (or portion) byte by byte */
473 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
474 for (i = 0; num > 0; i++, num--)
475 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
479 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
480 /* for area above 1st 4K of SRAM/reg space */
481 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
484 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
485 u32 dif_len = addr - aligned_addr;
488 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
494 /* Write the first dword (or portion) byte by byte */
495 if (unlikely(dif_len)) {
496 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
497 /* Start writing at aligned_addr + dif_len */
498 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
499 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
503 /* Write all of the middle dwords as dwords, with auto-increment */
504 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
505 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
506 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
508 /* Write the last dword (or portion) byte by byte */
510 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
511 for (i = 0; num > 0; i++, num--, buf++)
512 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
516 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
517 /* for 1st 4K of SRAM/regs space */
518 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
521 memcpy_toio((priv->hw_base + addr), buf, num);
524 /* Set bit(s) in low 4K of SRAM/regs */
525 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
527 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
530 /* Clear bit(s) in low 4K of SRAM/regs */
531 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
533 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
536 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
538 if (priv->status & STATUS_INT_ENABLED)
540 priv->status |= STATUS_INT_ENABLED;
541 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
544 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
546 if (!(priv->status & STATUS_INT_ENABLED))
548 priv->status &= ~STATUS_INT_ENABLED;
549 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
552 #ifdef CONFIG_IPW2200_DEBUG
553 static char *ipw_error_desc(u32 val)
556 case IPW_FW_ERROR_OK:
558 case IPW_FW_ERROR_FAIL:
560 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
561 return "MEMORY_UNDERFLOW";
562 case IPW_FW_ERROR_MEMORY_OVERFLOW:
563 return "MEMORY_OVERFLOW";
564 case IPW_FW_ERROR_BAD_PARAM:
566 case IPW_FW_ERROR_BAD_CHECKSUM:
567 return "BAD_CHECKSUM";
568 case IPW_FW_ERROR_NMI_INTERRUPT:
569 return "NMI_INTERRUPT";
570 case IPW_FW_ERROR_BAD_DATABASE:
571 return "BAD_DATABASE";
572 case IPW_FW_ERROR_ALLOC_FAIL:
574 case IPW_FW_ERROR_DMA_UNDERRUN:
575 return "DMA_UNDERRUN";
576 case IPW_FW_ERROR_DMA_STATUS:
578 case IPW_FW_ERROR_DINO_ERROR:
580 case IPW_FW_ERROR_EEPROM_ERROR:
581 return "EEPROM_ERROR";
582 case IPW_FW_ERROR_SYSASSERT:
584 case IPW_FW_ERROR_FATAL_ERROR:
585 return "FATAL_ERROR";
587 return "UNKNOWN_ERROR";
591 static void ipw_dump_error_log(struct ipw_priv *priv,
592 struct ipw_fw_error *error)
597 IPW_ERROR("Error allocating and capturing error log. "
598 "Nothing to dump.\n");
602 IPW_ERROR("Start IPW Error Log Dump:\n");
603 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
604 error->status, error->config);
606 for (i = 0; i < error->elem_len; i++)
607 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
608 ipw_error_desc(error->elem[i].desc),
610 error->elem[i].blink1,
611 error->elem[i].blink2,
612 error->elem[i].link1,
613 error->elem[i].link2, error->elem[i].data);
614 for (i = 0; i < error->log_len; i++)
615 IPW_ERROR("%i\t0x%08x\t%i\n",
617 error->log[i].data, error->log[i].event);
621 static inline int ipw_is_init(struct ipw_priv *priv)
623 return (priv->status & STATUS_INIT) ? 1 : 0;
626 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
628 u32 addr, field_info, field_len, field_count, total_len;
630 IPW_DEBUG_ORD("ordinal = %i\n", ord);
632 if (!priv || !val || !len) {
633 IPW_DEBUG_ORD("Invalid argument\n");
637 /* verify device ordinal tables have been initialized */
638 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
639 IPW_DEBUG_ORD("Access ordinals before initialization\n");
643 switch (IPW_ORD_TABLE_ID_MASK & ord) {
644 case IPW_ORD_TABLE_0_MASK:
646 * TABLE 0: Direct access to a table of 32 bit values
648 * This is a very simple table with the data directly
649 * read from the table
652 /* remove the table id from the ordinal */
653 ord &= IPW_ORD_TABLE_VALUE_MASK;
656 if (ord > priv->table0_len) {
657 IPW_DEBUG_ORD("ordinal value (%i) longer then "
658 "max (%i)\n", ord, priv->table0_len);
662 /* verify we have enough room to store the value */
663 if (*len < sizeof(u32)) {
664 IPW_DEBUG_ORD("ordinal buffer length too small, "
665 "need %zd\n", sizeof(u32));
669 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
670 ord, priv->table0_addr + (ord << 2));
674 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
677 case IPW_ORD_TABLE_1_MASK:
679 * TABLE 1: Indirect access to a table of 32 bit values
681 * This is a fairly large table of u32 values each
682 * representing starting addr for the data (which is
686 /* remove the table id from the ordinal */
687 ord &= IPW_ORD_TABLE_VALUE_MASK;
690 if (ord > priv->table1_len) {
691 IPW_DEBUG_ORD("ordinal value too long\n");
695 /* verify we have enough room to store the value */
696 if (*len < sizeof(u32)) {
697 IPW_DEBUG_ORD("ordinal buffer length too small, "
698 "need %zd\n", sizeof(u32));
703 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
707 case IPW_ORD_TABLE_2_MASK:
709 * TABLE 2: Indirect access to a table of variable sized values
711 * This table consist of six values, each containing
712 * - dword containing the starting offset of the data
713 * - dword containing the lengh in the first 16bits
714 * and the count in the second 16bits
717 /* remove the table id from the ordinal */
718 ord &= IPW_ORD_TABLE_VALUE_MASK;
721 if (ord > priv->table2_len) {
722 IPW_DEBUG_ORD("ordinal value too long\n");
726 /* get the address of statistic */
727 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
729 /* get the second DW of statistics ;
730 * two 16-bit words - first is length, second is count */
733 priv->table2_addr + (ord << 3) +
736 /* get each entry length */
737 field_len = *((u16 *) & field_info);
739 /* get number of entries */
740 field_count = *(((u16 *) & field_info) + 1);
742 /* abort if not enought memory */
743 total_len = field_len * field_count;
744 if (total_len > *len) {
753 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
754 "field_info = 0x%08x\n",
755 addr, total_len, field_info);
756 ipw_read_indirect(priv, addr, val, total_len);
760 IPW_DEBUG_ORD("Invalid ordinal!\n");
768 static void ipw_init_ordinals(struct ipw_priv *priv)
770 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
771 priv->table0_len = ipw_read32(priv, priv->table0_addr);
773 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
774 priv->table0_addr, priv->table0_len);
776 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
777 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
779 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
780 priv->table1_addr, priv->table1_len);
782 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
783 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
784 priv->table2_len &= 0x0000ffff; /* use first two bytes */
786 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
787 priv->table2_addr, priv->table2_len);
791 static u32 ipw_register_toggle(u32 reg)
793 reg &= ~IPW_START_STANDBY;
794 if (reg & IPW_GATE_ODMA)
795 reg &= ~IPW_GATE_ODMA;
796 if (reg & IPW_GATE_IDMA)
797 reg &= ~IPW_GATE_IDMA;
798 if (reg & IPW_GATE_ADMA)
799 reg &= ~IPW_GATE_ADMA;
805 * - On radio ON, turn on any LEDs that require to be on during start
806 * - On initialization, start unassociated blink
807 * - On association, disable unassociated blink
808 * - On disassociation, start unassociated blink
809 * - On radio OFF, turn off any LEDs started during radio on
812 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
813 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
814 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
816 static void ipw_led_link_on(struct ipw_priv *priv)
821 /* If configured to not use LEDs, or nic_type is 1,
822 * then we don't toggle a LINK led */
823 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
826 spin_lock_irqsave(&priv->lock, flags);
828 if (!(priv->status & STATUS_RF_KILL_MASK) &&
829 !(priv->status & STATUS_LED_LINK_ON)) {
830 IPW_DEBUG_LED("Link LED On\n");
831 led = ipw_read_reg32(priv, IPW_EVENT_REG);
832 led |= priv->led_association_on;
834 led = ipw_register_toggle(led);
836 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
837 ipw_write_reg32(priv, IPW_EVENT_REG, led);
839 priv->status |= STATUS_LED_LINK_ON;
841 /* If we aren't associated, schedule turning the LED off */
842 if (!(priv->status & STATUS_ASSOCIATED))
843 queue_delayed_work(priv->workqueue,
848 spin_unlock_irqrestore(&priv->lock, flags);
851 static void ipw_bg_led_link_on(void *data)
853 struct ipw_priv *priv = data;
854 mutex_lock(&priv->mutex);
855 ipw_led_link_on(data);
856 mutex_unlock(&priv->mutex);
859 static void ipw_led_link_off(struct ipw_priv *priv)
864 /* If configured not to use LEDs, or nic type is 1,
865 * then we don't goggle the LINK led. */
866 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
869 spin_lock_irqsave(&priv->lock, flags);
871 if (priv->status & STATUS_LED_LINK_ON) {
872 led = ipw_read_reg32(priv, IPW_EVENT_REG);
873 led &= priv->led_association_off;
874 led = ipw_register_toggle(led);
876 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
877 ipw_write_reg32(priv, IPW_EVENT_REG, led);
879 IPW_DEBUG_LED("Link LED Off\n");
881 priv->status &= ~STATUS_LED_LINK_ON;
883 /* If we aren't associated and the radio is on, schedule
884 * turning the LED on (blink while unassociated) */
885 if (!(priv->status & STATUS_RF_KILL_MASK) &&
886 !(priv->status & STATUS_ASSOCIATED))
887 queue_delayed_work(priv->workqueue, &priv->led_link_on,
892 spin_unlock_irqrestore(&priv->lock, flags);
895 static void ipw_bg_led_link_off(void *data)
897 struct ipw_priv *priv = data;
898 mutex_lock(&priv->mutex);
899 ipw_led_link_off(data);
900 mutex_unlock(&priv->mutex);
903 static void __ipw_led_activity_on(struct ipw_priv *priv)
907 if (priv->config & CFG_NO_LED)
910 if (priv->status & STATUS_RF_KILL_MASK)
913 if (!(priv->status & STATUS_LED_ACT_ON)) {
914 led = ipw_read_reg32(priv, IPW_EVENT_REG);
915 led |= priv->led_activity_on;
917 led = ipw_register_toggle(led);
919 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
920 ipw_write_reg32(priv, IPW_EVENT_REG, led);
922 IPW_DEBUG_LED("Activity LED On\n");
924 priv->status |= STATUS_LED_ACT_ON;
926 cancel_delayed_work(&priv->led_act_off);
927 queue_delayed_work(priv->workqueue, &priv->led_act_off,
930 /* Reschedule LED off for full time period */
931 cancel_delayed_work(&priv->led_act_off);
932 queue_delayed_work(priv->workqueue, &priv->led_act_off,
938 void ipw_led_activity_on(struct ipw_priv *priv)
941 spin_lock_irqsave(&priv->lock, flags);
942 __ipw_led_activity_on(priv);
943 spin_unlock_irqrestore(&priv->lock, flags);
947 static void ipw_led_activity_off(struct ipw_priv *priv)
952 if (priv->config & CFG_NO_LED)
955 spin_lock_irqsave(&priv->lock, flags);
957 if (priv->status & STATUS_LED_ACT_ON) {
958 led = ipw_read_reg32(priv, IPW_EVENT_REG);
959 led &= priv->led_activity_off;
961 led = ipw_register_toggle(led);
963 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
964 ipw_write_reg32(priv, IPW_EVENT_REG, led);
966 IPW_DEBUG_LED("Activity LED Off\n");
968 priv->status &= ~STATUS_LED_ACT_ON;
971 spin_unlock_irqrestore(&priv->lock, flags);
974 static void ipw_bg_led_activity_off(void *data)
976 struct ipw_priv *priv = data;
977 mutex_lock(&priv->mutex);
978 ipw_led_activity_off(data);
979 mutex_unlock(&priv->mutex);
982 static void ipw_led_band_on(struct ipw_priv *priv)
987 /* Only nic type 1 supports mode LEDs */
988 if (priv->config & CFG_NO_LED ||
989 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
992 spin_lock_irqsave(&priv->lock, flags);
994 led = ipw_read_reg32(priv, IPW_EVENT_REG);
995 if (priv->assoc_network->mode == IEEE_A) {
996 led |= priv->led_ofdm_on;
997 led &= priv->led_association_off;
998 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
999 } else if (priv->assoc_network->mode == IEEE_G) {
1000 led |= priv->led_ofdm_on;
1001 led |= priv->led_association_on;
1002 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1004 led &= priv->led_ofdm_off;
1005 led |= priv->led_association_on;
1006 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1009 led = ipw_register_toggle(led);
1011 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1012 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1014 spin_unlock_irqrestore(&priv->lock, flags);
1017 static void ipw_led_band_off(struct ipw_priv *priv)
1019 unsigned long flags;
1022 /* Only nic type 1 supports mode LEDs */
1023 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1026 spin_lock_irqsave(&priv->lock, flags);
1028 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1029 led &= priv->led_ofdm_off;
1030 led &= priv->led_association_off;
1032 led = ipw_register_toggle(led);
1034 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1035 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1037 spin_unlock_irqrestore(&priv->lock, flags);
1040 static void ipw_led_radio_on(struct ipw_priv *priv)
1042 ipw_led_link_on(priv);
1045 static void ipw_led_radio_off(struct ipw_priv *priv)
1047 ipw_led_activity_off(priv);
1048 ipw_led_link_off(priv);
1051 static void ipw_led_link_up(struct ipw_priv *priv)
1053 /* Set the Link Led on for all nic types */
1054 ipw_led_link_on(priv);
1057 static void ipw_led_link_down(struct ipw_priv *priv)
1059 ipw_led_activity_off(priv);
1060 ipw_led_link_off(priv);
1062 if (priv->status & STATUS_RF_KILL_MASK)
1063 ipw_led_radio_off(priv);
1066 static void ipw_led_init(struct ipw_priv *priv)
1068 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1070 /* Set the default PINs for the link and activity leds */
1071 priv->led_activity_on = IPW_ACTIVITY_LED;
1072 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1074 priv->led_association_on = IPW_ASSOCIATED_LED;
1075 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1077 /* Set the default PINs for the OFDM leds */
1078 priv->led_ofdm_on = IPW_OFDM_LED;
1079 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1081 switch (priv->nic_type) {
1082 case EEPROM_NIC_TYPE_1:
1083 /* In this NIC type, the LEDs are reversed.... */
1084 priv->led_activity_on = IPW_ASSOCIATED_LED;
1085 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1086 priv->led_association_on = IPW_ACTIVITY_LED;
1087 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1089 if (!(priv->config & CFG_NO_LED))
1090 ipw_led_band_on(priv);
1092 /* And we don't blink link LEDs for this nic, so
1093 * just return here */
1096 case EEPROM_NIC_TYPE_3:
1097 case EEPROM_NIC_TYPE_2:
1098 case EEPROM_NIC_TYPE_4:
1099 case EEPROM_NIC_TYPE_0:
1103 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1105 priv->nic_type = EEPROM_NIC_TYPE_0;
1109 if (!(priv->config & CFG_NO_LED)) {
1110 if (priv->status & STATUS_ASSOCIATED)
1111 ipw_led_link_on(priv);
1113 ipw_led_link_off(priv);
1117 static void ipw_led_shutdown(struct ipw_priv *priv)
1119 ipw_led_activity_off(priv);
1120 ipw_led_link_off(priv);
1121 ipw_led_band_off(priv);
1122 cancel_delayed_work(&priv->led_link_on);
1123 cancel_delayed_work(&priv->led_link_off);
1124 cancel_delayed_work(&priv->led_act_off);
1128 * The following adds a new attribute to the sysfs representation
1129 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1130 * used for controling the debug level.
1132 * See the level definitions in ipw for details.
1134 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1136 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1139 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1142 char *p = (char *)buf;
1145 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1147 if (p[0] == 'x' || p[0] == 'X')
1149 val = simple_strtoul(p, &p, 16);
1151 val = simple_strtoul(p, &p, 10);
1153 printk(KERN_INFO DRV_NAME
1154 ": %s is not in hex or decimal form.\n", buf);
1156 ipw_debug_level = val;
1158 return strnlen(buf, count);
1161 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1162 show_debug_level, store_debug_level);
1164 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1166 /* length = 1st dword in log */
1167 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1170 static void ipw_capture_event_log(struct ipw_priv *priv,
1171 u32 log_len, struct ipw_event *log)
1176 base = ipw_read32(priv, IPW_EVENT_LOG);
1177 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1178 (u8 *) log, sizeof(*log) * log_len);
1182 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1184 struct ipw_fw_error *error;
1185 u32 log_len = ipw_get_event_log_len(priv);
1186 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1187 u32 elem_len = ipw_read_reg32(priv, base);
1189 error = kmalloc(sizeof(*error) +
1190 sizeof(*error->elem) * elem_len +
1191 sizeof(*error->log) * log_len, GFP_ATOMIC);
1193 IPW_ERROR("Memory allocation for firmware error log "
1197 error->jiffies = jiffies;
1198 error->status = priv->status;
1199 error->config = priv->config;
1200 error->elem_len = elem_len;
1201 error->log_len = log_len;
1202 error->elem = (struct ipw_error_elem *)error->payload;
1203 error->log = (struct ipw_event *)(error->elem + elem_len);
1205 ipw_capture_event_log(priv, log_len, error->log);
1208 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1209 sizeof(*error->elem) * elem_len);
1214 static void ipw_free_error_log(struct ipw_fw_error *error)
1220 static ssize_t show_event_log(struct device *d,
1221 struct device_attribute *attr, char *buf)
1223 struct ipw_priv *priv = dev_get_drvdata(d);
1224 u32 log_len = ipw_get_event_log_len(priv);
1225 struct ipw_event log[log_len];
1228 ipw_capture_event_log(priv, log_len, log);
1230 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1231 for (i = 0; i < log_len; i++)
1232 len += snprintf(buf + len, PAGE_SIZE - len,
1234 log[i].time, log[i].event, log[i].data);
1235 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1239 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1241 static ssize_t show_error(struct device *d,
1242 struct device_attribute *attr, char *buf)
1244 struct ipw_priv *priv = dev_get_drvdata(d);
1248 len += snprintf(buf + len, PAGE_SIZE - len,
1249 "%08lX%08X%08X%08X",
1250 priv->error->jiffies,
1251 priv->error->status,
1252 priv->error->config, priv->error->elem_len);
1253 for (i = 0; i < priv->error->elem_len; i++)
1254 len += snprintf(buf + len, PAGE_SIZE - len,
1255 "\n%08X%08X%08X%08X%08X%08X%08X",
1256 priv->error->elem[i].time,
1257 priv->error->elem[i].desc,
1258 priv->error->elem[i].blink1,
1259 priv->error->elem[i].blink2,
1260 priv->error->elem[i].link1,
1261 priv->error->elem[i].link2,
1262 priv->error->elem[i].data);
1264 len += snprintf(buf + len, PAGE_SIZE - len,
1265 "\n%08X", priv->error->log_len);
1266 for (i = 0; i < priv->error->log_len; i++)
1267 len += snprintf(buf + len, PAGE_SIZE - len,
1269 priv->error->log[i].time,
1270 priv->error->log[i].event,
1271 priv->error->log[i].data);
1272 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1276 static ssize_t clear_error(struct device *d,
1277 struct device_attribute *attr,
1278 const char *buf, size_t count)
1280 struct ipw_priv *priv = dev_get_drvdata(d);
1282 ipw_free_error_log(priv->error);
1288 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1290 static ssize_t show_cmd_log(struct device *d,
1291 struct device_attribute *attr, char *buf)
1293 struct ipw_priv *priv = dev_get_drvdata(d);
1297 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1298 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1299 i = (i + 1) % priv->cmdlog_len) {
1301 snprintf(buf + len, PAGE_SIZE - len,
1302 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1303 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1304 priv->cmdlog[i].cmd.len);
1306 snprintk_buf(buf + len, PAGE_SIZE - len,
1307 (u8 *) priv->cmdlog[i].cmd.param,
1308 priv->cmdlog[i].cmd.len);
1309 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1311 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1315 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1317 #ifdef CONFIG_IPW2200_PROMISCUOUS
1318 static void ipw_prom_free(struct ipw_priv *priv);
1319 static int ipw_prom_alloc(struct ipw_priv *priv);
1320 static ssize_t store_rtap_iface(struct device *d,
1321 struct device_attribute *attr,
1322 const char *buf, size_t count)
1324 struct ipw_priv *priv = dev_get_drvdata(d);
1335 if (netif_running(priv->prom_net_dev)) {
1336 IPW_WARNING("Interface is up. Cannot unregister.\n");
1340 ipw_prom_free(priv);
1348 rc = ipw_prom_alloc(priv);
1358 IPW_ERROR("Failed to register promiscuous network "
1359 "device (error %d).\n", rc);
1365 static ssize_t show_rtap_iface(struct device *d,
1366 struct device_attribute *attr,
1369 struct ipw_priv *priv = dev_get_drvdata(d);
1371 return sprintf(buf, "%s", priv->prom_net_dev->name);
1380 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1383 static ssize_t store_rtap_filter(struct device *d,
1384 struct device_attribute *attr,
1385 const char *buf, size_t count)
1387 struct ipw_priv *priv = dev_get_drvdata(d);
1389 if (!priv->prom_priv) {
1390 IPW_ERROR("Attempting to set filter without "
1391 "rtap_iface enabled.\n");
1395 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1397 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1398 BIT_ARG16(priv->prom_priv->filter));
1403 static ssize_t show_rtap_filter(struct device *d,
1404 struct device_attribute *attr,
1407 struct ipw_priv *priv = dev_get_drvdata(d);
1408 return sprintf(buf, "0x%04X",
1409 priv->prom_priv ? priv->prom_priv->filter : 0);
1412 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1416 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1419 struct ipw_priv *priv = dev_get_drvdata(d);
1420 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1423 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1424 const char *buf, size_t count)
1426 struct ipw_priv *priv = dev_get_drvdata(d);
1427 #ifdef CONFIG_IPW2200_DEBUG
1428 struct net_device *dev = priv->net_dev;
1430 char buffer[] = "00000000";
1432 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1436 IPW_DEBUG_INFO("enter\n");
1438 strncpy(buffer, buf, len);
1441 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1443 if (p[0] == 'x' || p[0] == 'X')
1445 val = simple_strtoul(p, &p, 16);
1447 val = simple_strtoul(p, &p, 10);
1449 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1451 priv->ieee->scan_age = val;
1452 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1455 IPW_DEBUG_INFO("exit\n");
1459 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1461 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1464 struct ipw_priv *priv = dev_get_drvdata(d);
1465 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1468 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1469 const char *buf, size_t count)
1471 struct ipw_priv *priv = dev_get_drvdata(d);
1473 IPW_DEBUG_INFO("enter\n");
1479 IPW_DEBUG_LED("Disabling LED control.\n");
1480 priv->config |= CFG_NO_LED;
1481 ipw_led_shutdown(priv);
1483 IPW_DEBUG_LED("Enabling LED control.\n");
1484 priv->config &= ~CFG_NO_LED;
1488 IPW_DEBUG_INFO("exit\n");
1492 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1494 static ssize_t show_status(struct device *d,
1495 struct device_attribute *attr, char *buf)
1497 struct ipw_priv *p = d->driver_data;
1498 return sprintf(buf, "0x%08x\n", (int)p->status);
1501 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1503 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1506 struct ipw_priv *p = d->driver_data;
1507 return sprintf(buf, "0x%08x\n", (int)p->config);
1510 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1512 static ssize_t show_nic_type(struct device *d,
1513 struct device_attribute *attr, char *buf)
1515 struct ipw_priv *priv = d->driver_data;
1516 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1519 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1521 static ssize_t show_ucode_version(struct device *d,
1522 struct device_attribute *attr, char *buf)
1524 u32 len = sizeof(u32), tmp = 0;
1525 struct ipw_priv *p = d->driver_data;
1527 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1530 return sprintf(buf, "0x%08x\n", tmp);
1533 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1535 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1538 u32 len = sizeof(u32), tmp = 0;
1539 struct ipw_priv *p = d->driver_data;
1541 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1544 return sprintf(buf, "0x%08x\n", tmp);
1547 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1550 * Add a device attribute to view/control the delay between eeprom
1553 static ssize_t show_eeprom_delay(struct device *d,
1554 struct device_attribute *attr, char *buf)
1556 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1557 return sprintf(buf, "%i\n", n);
1559 static ssize_t store_eeprom_delay(struct device *d,
1560 struct device_attribute *attr,
1561 const char *buf, size_t count)
1563 struct ipw_priv *p = d->driver_data;
1564 sscanf(buf, "%i", &p->eeprom_delay);
1565 return strnlen(buf, count);
1568 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1569 show_eeprom_delay, store_eeprom_delay);
1571 static ssize_t show_command_event_reg(struct device *d,
1572 struct device_attribute *attr, char *buf)
1575 struct ipw_priv *p = d->driver_data;
1577 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1578 return sprintf(buf, "0x%08x\n", reg);
1580 static ssize_t store_command_event_reg(struct device *d,
1581 struct device_attribute *attr,
1582 const char *buf, size_t count)
1585 struct ipw_priv *p = d->driver_data;
1587 sscanf(buf, "%x", ®);
1588 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1589 return strnlen(buf, count);
1592 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1593 show_command_event_reg, store_command_event_reg);
1595 static ssize_t show_mem_gpio_reg(struct device *d,
1596 struct device_attribute *attr, char *buf)
1599 struct ipw_priv *p = d->driver_data;
1601 reg = ipw_read_reg32(p, 0x301100);
1602 return sprintf(buf, "0x%08x\n", reg);
1604 static ssize_t store_mem_gpio_reg(struct device *d,
1605 struct device_attribute *attr,
1606 const char *buf, size_t count)
1609 struct ipw_priv *p = d->driver_data;
1611 sscanf(buf, "%x", ®);
1612 ipw_write_reg32(p, 0x301100, reg);
1613 return strnlen(buf, count);
1616 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1617 show_mem_gpio_reg, store_mem_gpio_reg);
1619 static ssize_t show_indirect_dword(struct device *d,
1620 struct device_attribute *attr, char *buf)
1623 struct ipw_priv *priv = d->driver_data;
1625 if (priv->status & STATUS_INDIRECT_DWORD)
1626 reg = ipw_read_reg32(priv, priv->indirect_dword);
1630 return sprintf(buf, "0x%08x\n", reg);
1632 static ssize_t store_indirect_dword(struct device *d,
1633 struct device_attribute *attr,
1634 const char *buf, size_t count)
1636 struct ipw_priv *priv = d->driver_data;
1638 sscanf(buf, "%x", &priv->indirect_dword);
1639 priv->status |= STATUS_INDIRECT_DWORD;
1640 return strnlen(buf, count);
1643 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1644 show_indirect_dword, store_indirect_dword);
1646 static ssize_t show_indirect_byte(struct device *d,
1647 struct device_attribute *attr, char *buf)
1650 struct ipw_priv *priv = d->driver_data;
1652 if (priv->status & STATUS_INDIRECT_BYTE)
1653 reg = ipw_read_reg8(priv, priv->indirect_byte);
1657 return sprintf(buf, "0x%02x\n", reg);
1659 static ssize_t store_indirect_byte(struct device *d,
1660 struct device_attribute *attr,
1661 const char *buf, size_t count)
1663 struct ipw_priv *priv = d->driver_data;
1665 sscanf(buf, "%x", &priv->indirect_byte);
1666 priv->status |= STATUS_INDIRECT_BYTE;
1667 return strnlen(buf, count);
1670 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1671 show_indirect_byte, store_indirect_byte);
1673 static ssize_t show_direct_dword(struct device *d,
1674 struct device_attribute *attr, char *buf)
1677 struct ipw_priv *priv = d->driver_data;
1679 if (priv->status & STATUS_DIRECT_DWORD)
1680 reg = ipw_read32(priv, priv->direct_dword);
1684 return sprintf(buf, "0x%08x\n", reg);
1686 static ssize_t store_direct_dword(struct device *d,
1687 struct device_attribute *attr,
1688 const char *buf, size_t count)
1690 struct ipw_priv *priv = d->driver_data;
1692 sscanf(buf, "%x", &priv->direct_dword);
1693 priv->status |= STATUS_DIRECT_DWORD;
1694 return strnlen(buf, count);
1697 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1698 show_direct_dword, store_direct_dword);
1700 static int rf_kill_active(struct ipw_priv *priv)
1702 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1703 priv->status |= STATUS_RF_KILL_HW;
1705 priv->status &= ~STATUS_RF_KILL_HW;
1707 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1710 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1713 /* 0 - RF kill not enabled
1714 1 - SW based RF kill active (sysfs)
1715 2 - HW based RF kill active
1716 3 - Both HW and SW baed RF kill active */
1717 struct ipw_priv *priv = d->driver_data;
1718 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1719 (rf_kill_active(priv) ? 0x2 : 0x0);
1720 return sprintf(buf, "%i\n", val);
1723 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1725 if ((disable_radio ? 1 : 0) ==
1726 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1729 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1730 disable_radio ? "OFF" : "ON");
1732 if (disable_radio) {
1733 priv->status |= STATUS_RF_KILL_SW;
1735 if (priv->workqueue)
1736 cancel_delayed_work(&priv->request_scan);
1737 queue_work(priv->workqueue, &priv->down);
1739 priv->status &= ~STATUS_RF_KILL_SW;
1740 if (rf_kill_active(priv)) {
1741 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1742 "disabled by HW switch\n");
1743 /* Make sure the RF_KILL check timer is running */
1744 cancel_delayed_work(&priv->rf_kill);
1745 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1748 queue_work(priv->workqueue, &priv->up);
1754 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1755 const char *buf, size_t count)
1757 struct ipw_priv *priv = d->driver_data;
1759 ipw_radio_kill_sw(priv, buf[0] == '1');
1764 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1766 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1769 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1770 int pos = 0, len = 0;
1771 if (priv->config & CFG_SPEED_SCAN) {
1772 while (priv->speed_scan[pos] != 0)
1773 len += sprintf(&buf[len], "%d ",
1774 priv->speed_scan[pos++]);
1775 return len + sprintf(&buf[len], "\n");
1778 return sprintf(buf, "0\n");
1781 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1782 const char *buf, size_t count)
1784 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1785 int channel, pos = 0;
1786 const char *p = buf;
1788 /* list of space separated channels to scan, optionally ending with 0 */
1789 while ((channel = simple_strtol(p, NULL, 0))) {
1790 if (pos == MAX_SPEED_SCAN - 1) {
1791 priv->speed_scan[pos] = 0;
1795 if (ieee80211_is_valid_channel(priv->ieee, channel))
1796 priv->speed_scan[pos++] = channel;
1798 IPW_WARNING("Skipping invalid channel request: %d\n",
1803 while (*p == ' ' || *p == '\t')
1808 priv->config &= ~CFG_SPEED_SCAN;
1810 priv->speed_scan_pos = 0;
1811 priv->config |= CFG_SPEED_SCAN;
1817 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1820 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1823 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1824 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1827 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1828 const char *buf, size_t count)
1830 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1832 priv->config |= CFG_NET_STATS;
1834 priv->config &= ~CFG_NET_STATS;
1839 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1840 show_net_stats, store_net_stats);
1842 static void notify_wx_assoc_event(struct ipw_priv *priv)
1844 union iwreq_data wrqu;
1845 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1846 if (priv->status & STATUS_ASSOCIATED)
1847 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1849 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1850 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1853 static void ipw_irq_tasklet(struct ipw_priv *priv)
1855 u32 inta, inta_mask, handled = 0;
1856 unsigned long flags;
1859 spin_lock_irqsave(&priv->lock, flags);
1861 inta = ipw_read32(priv, IPW_INTA_RW);
1862 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1863 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1865 /* Add any cached INTA values that need to be handled */
1866 inta |= priv->isr_inta;
1868 /* handle all the justifications for the interrupt */
1869 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1871 handled |= IPW_INTA_BIT_RX_TRANSFER;
1874 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1875 IPW_DEBUG_HC("Command completed.\n");
1876 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1877 priv->status &= ~STATUS_HCMD_ACTIVE;
1878 wake_up_interruptible(&priv->wait_command_queue);
1879 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1882 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1883 IPW_DEBUG_TX("TX_QUEUE_1\n");
1884 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1885 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1888 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1889 IPW_DEBUG_TX("TX_QUEUE_2\n");
1890 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1891 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1894 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1895 IPW_DEBUG_TX("TX_QUEUE_3\n");
1896 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1897 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1900 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1901 IPW_DEBUG_TX("TX_QUEUE_4\n");
1902 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1903 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1906 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1907 IPW_WARNING("STATUS_CHANGE\n");
1908 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1911 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1912 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1913 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1916 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1917 IPW_WARNING("HOST_CMD_DONE\n");
1918 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1921 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1922 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1923 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1926 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1927 IPW_WARNING("PHY_OFF_DONE\n");
1928 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1931 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1932 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1933 priv->status |= STATUS_RF_KILL_HW;
1934 wake_up_interruptible(&priv->wait_command_queue);
1935 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1936 cancel_delayed_work(&priv->request_scan);
1937 schedule_work(&priv->link_down);
1938 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1939 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1942 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1943 IPW_WARNING("Firmware error detected. Restarting.\n");
1945 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1946 #ifdef CONFIG_IPW2200_DEBUG
1947 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1948 struct ipw_fw_error *error =
1949 ipw_alloc_error_log(priv);
1950 ipw_dump_error_log(priv, error);
1952 ipw_free_error_log(error);
1956 priv->error = ipw_alloc_error_log(priv);
1958 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1960 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1962 #ifdef CONFIG_IPW2200_DEBUG
1963 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1964 ipw_dump_error_log(priv, priv->error);
1968 /* XXX: If hardware encryption is for WPA/WPA2,
1969 * we have to notify the supplicant. */
1970 if (priv->ieee->sec.encrypt) {
1971 priv->status &= ~STATUS_ASSOCIATED;
1972 notify_wx_assoc_event(priv);
1975 /* Keep the restart process from trying to send host
1976 * commands by clearing the INIT status bit */
1977 priv->status &= ~STATUS_INIT;
1979 /* Cancel currently queued command. */
1980 priv->status &= ~STATUS_HCMD_ACTIVE;
1981 wake_up_interruptible(&priv->wait_command_queue);
1983 queue_work(priv->workqueue, &priv->adapter_restart);
1984 handled |= IPW_INTA_BIT_FATAL_ERROR;
1987 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1988 IPW_ERROR("Parity error\n");
1989 handled |= IPW_INTA_BIT_PARITY_ERROR;
1992 if (handled != inta) {
1993 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1996 /* enable all interrupts */
1997 ipw_enable_interrupts(priv);
1999 spin_unlock_irqrestore(&priv->lock, flags);
2002 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2003 static char *get_cmd_string(u8 cmd)
2006 IPW_CMD(HOST_COMPLETE);
2007 IPW_CMD(POWER_DOWN);
2008 IPW_CMD(SYSTEM_CONFIG);
2009 IPW_CMD(MULTICAST_ADDRESS);
2011 IPW_CMD(ADAPTER_ADDRESS);
2013 IPW_CMD(RTS_THRESHOLD);
2014 IPW_CMD(FRAG_THRESHOLD);
2015 IPW_CMD(POWER_MODE);
2017 IPW_CMD(TGI_TX_KEY);
2018 IPW_CMD(SCAN_REQUEST);
2019 IPW_CMD(SCAN_REQUEST_EXT);
2021 IPW_CMD(SUPPORTED_RATES);
2022 IPW_CMD(SCAN_ABORT);
2024 IPW_CMD(QOS_PARAMETERS);
2025 IPW_CMD(DINO_CONFIG);
2026 IPW_CMD(RSN_CAPABILITIES);
2028 IPW_CMD(CARD_DISABLE);
2029 IPW_CMD(SEED_NUMBER);
2031 IPW_CMD(COUNTRY_INFO);
2032 IPW_CMD(AIRONET_INFO);
2033 IPW_CMD(AP_TX_POWER);
2035 IPW_CMD(CCX_VER_INFO);
2036 IPW_CMD(SET_CALIBRATION);
2037 IPW_CMD(SENSITIVITY_CALIB);
2038 IPW_CMD(RETRY_LIMIT);
2039 IPW_CMD(IPW_PRE_POWER_DOWN);
2040 IPW_CMD(VAP_BEACON_TEMPLATE);
2041 IPW_CMD(VAP_DTIM_PERIOD);
2042 IPW_CMD(EXT_SUPPORTED_RATES);
2043 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2044 IPW_CMD(VAP_QUIET_INTERVALS);
2045 IPW_CMD(VAP_CHANNEL_SWITCH);
2046 IPW_CMD(VAP_MANDATORY_CHANNELS);
2047 IPW_CMD(VAP_CELL_PWR_LIMIT);
2048 IPW_CMD(VAP_CF_PARAM_SET);
2049 IPW_CMD(VAP_SET_BEACONING_STATE);
2050 IPW_CMD(MEASUREMENT);
2051 IPW_CMD(POWER_CAPABILITY);
2052 IPW_CMD(SUPPORTED_CHANNELS);
2053 IPW_CMD(TPC_REPORT);
2055 IPW_CMD(PRODUCTION_COMMAND);
2061 #define HOST_COMPLETE_TIMEOUT HZ
2063 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2066 unsigned long flags;
2068 spin_lock_irqsave(&priv->lock, flags);
2069 if (priv->status & STATUS_HCMD_ACTIVE) {
2070 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2071 get_cmd_string(cmd->cmd));
2072 spin_unlock_irqrestore(&priv->lock, flags);
2076 priv->status |= STATUS_HCMD_ACTIVE;
2079 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2080 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2081 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2082 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2084 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2087 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2088 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2091 #ifndef DEBUG_CMD_WEP_KEY
2092 if (cmd->cmd == IPW_CMD_WEP_KEY)
2093 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2096 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2098 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2100 priv->status &= ~STATUS_HCMD_ACTIVE;
2101 IPW_ERROR("Failed to send %s: Reason %d\n",
2102 get_cmd_string(cmd->cmd), rc);
2103 spin_unlock_irqrestore(&priv->lock, flags);
2106 spin_unlock_irqrestore(&priv->lock, flags);
2108 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2110 status & STATUS_HCMD_ACTIVE),
2111 HOST_COMPLETE_TIMEOUT);
2113 spin_lock_irqsave(&priv->lock, flags);
2114 if (priv->status & STATUS_HCMD_ACTIVE) {
2115 IPW_ERROR("Failed to send %s: Command timed out.\n",
2116 get_cmd_string(cmd->cmd));
2117 priv->status &= ~STATUS_HCMD_ACTIVE;
2118 spin_unlock_irqrestore(&priv->lock, flags);
2122 spin_unlock_irqrestore(&priv->lock, flags);
2126 if (priv->status & STATUS_RF_KILL_HW) {
2127 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2128 get_cmd_string(cmd->cmd));
2135 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2136 priv->cmdlog_pos %= priv->cmdlog_len;
2141 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2143 struct host_cmd cmd = {
2147 return __ipw_send_cmd(priv, &cmd);
2150 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2153 struct host_cmd cmd = {
2159 return __ipw_send_cmd(priv, &cmd);
2162 static int ipw_send_host_complete(struct ipw_priv *priv)
2165 IPW_ERROR("Invalid args\n");
2169 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2172 static int ipw_send_system_config(struct ipw_priv *priv)
2174 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2175 sizeof(priv->sys_config),
2179 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2181 if (!priv || !ssid) {
2182 IPW_ERROR("Invalid args\n");
2186 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2190 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2192 if (!priv || !mac) {
2193 IPW_ERROR("Invalid args\n");
2197 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2198 priv->net_dev->name, MAC_ARG(mac));
2200 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2204 * NOTE: This must be executed from our workqueue as it results in udelay
2205 * being called which may corrupt the keyboard if executed on default
2208 static void ipw_adapter_restart(void *adapter)
2210 struct ipw_priv *priv = adapter;
2212 if (priv->status & STATUS_RF_KILL_MASK)
2217 if (priv->assoc_network &&
2218 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2219 ipw_remove_current_network(priv);
2222 IPW_ERROR("Failed to up device\n");
2227 static void ipw_bg_adapter_restart(void *data)
2229 struct ipw_priv *priv = data;
2230 mutex_lock(&priv->mutex);
2231 ipw_adapter_restart(data);
2232 mutex_unlock(&priv->mutex);
2235 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2237 static void ipw_scan_check(void *data)
2239 struct ipw_priv *priv = data;
2240 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2241 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2242 "adapter after (%dms).\n",
2243 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2244 queue_work(priv->workqueue, &priv->adapter_restart);
2248 static void ipw_bg_scan_check(void *data)
2250 struct ipw_priv *priv = data;
2251 mutex_lock(&priv->mutex);
2252 ipw_scan_check(data);
2253 mutex_unlock(&priv->mutex);
2256 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2257 struct ipw_scan_request_ext *request)
2259 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2260 sizeof(*request), request);
2263 static int ipw_send_scan_abort(struct ipw_priv *priv)
2266 IPW_ERROR("Invalid args\n");
2270 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2273 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2275 struct ipw_sensitivity_calib calib = {
2276 .beacon_rssi_raw = sens,
2279 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2283 static int ipw_send_associate(struct ipw_priv *priv,
2284 struct ipw_associate *associate)
2286 struct ipw_associate tmp_associate;
2288 if (!priv || !associate) {
2289 IPW_ERROR("Invalid args\n");
2293 memcpy(&tmp_associate, associate, sizeof(*associate));
2294 tmp_associate.policy_support =
2295 cpu_to_le16(tmp_associate.policy_support);
2296 tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2297 tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2298 tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2299 tmp_associate.listen_interval =
2300 cpu_to_le16(tmp_associate.listen_interval);
2301 tmp_associate.beacon_interval =
2302 cpu_to_le16(tmp_associate.beacon_interval);
2303 tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2305 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2309 static int ipw_send_supported_rates(struct ipw_priv *priv,
2310 struct ipw_supported_rates *rates)
2312 if (!priv || !rates) {
2313 IPW_ERROR("Invalid args\n");
2317 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2321 static int ipw_set_random_seed(struct ipw_priv *priv)
2326 IPW_ERROR("Invalid args\n");
2330 get_random_bytes(&val, sizeof(val));
2332 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2335 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2338 IPW_ERROR("Invalid args\n");
2342 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2346 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2348 if (!priv || !power) {
2349 IPW_ERROR("Invalid args\n");
2353 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2356 static int ipw_set_tx_power(struct ipw_priv *priv)
2358 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2359 struct ipw_tx_power tx_power;
2363 memset(&tx_power, 0, sizeof(tx_power));
2365 /* configure device for 'G' band */
2366 tx_power.ieee_mode = IPW_G_MODE;
2367 tx_power.num_channels = geo->bg_channels;
2368 for (i = 0; i < geo->bg_channels; i++) {
2369 max_power = geo->bg[i].max_power;
2370 tx_power.channels_tx_power[i].channel_number =
2372 tx_power.channels_tx_power[i].tx_power = max_power ?
2373 min(max_power, priv->tx_power) : priv->tx_power;
2375 if (ipw_send_tx_power(priv, &tx_power))
2378 /* configure device to also handle 'B' band */
2379 tx_power.ieee_mode = IPW_B_MODE;
2380 if (ipw_send_tx_power(priv, &tx_power))
2383 /* configure device to also handle 'A' band */
2384 if (priv->ieee->abg_true) {
2385 tx_power.ieee_mode = IPW_A_MODE;
2386 tx_power.num_channels = geo->a_channels;
2387 for (i = 0; i < tx_power.num_channels; i++) {
2388 max_power = geo->a[i].max_power;
2389 tx_power.channels_tx_power[i].channel_number =
2391 tx_power.channels_tx_power[i].tx_power = max_power ?
2392 min(max_power, priv->tx_power) : priv->tx_power;
2394 if (ipw_send_tx_power(priv, &tx_power))
2400 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2402 struct ipw_rts_threshold rts_threshold = {
2403 .rts_threshold = rts,
2407 IPW_ERROR("Invalid args\n");
2411 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2412 sizeof(rts_threshold), &rts_threshold);
2415 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2417 struct ipw_frag_threshold frag_threshold = {
2418 .frag_threshold = frag,
2422 IPW_ERROR("Invalid args\n");
2426 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2427 sizeof(frag_threshold), &frag_threshold);
2430 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2435 IPW_ERROR("Invalid args\n");
2439 /* If on battery, set to 3, if AC set to CAM, else user
2442 case IPW_POWER_BATTERY:
2443 param = IPW_POWER_INDEX_3;
2446 param = IPW_POWER_MODE_CAM;
2453 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2457 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2459 struct ipw_retry_limit retry_limit = {
2460 .short_retry_limit = slimit,
2461 .long_retry_limit = llimit
2465 IPW_ERROR("Invalid args\n");
2469 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2474 * The IPW device contains a Microwire compatible EEPROM that stores
2475 * various data like the MAC address. Usually the firmware has exclusive
2476 * access to the eeprom, but during device initialization (before the
2477 * device driver has sent the HostComplete command to the firmware) the
2478 * device driver has read access to the EEPROM by way of indirect addressing
2479 * through a couple of memory mapped registers.
2481 * The following is a simplified implementation for pulling data out of the
2482 * the eeprom, along with some helper functions to find information in
2483 * the per device private data's copy of the eeprom.
2485 * NOTE: To better understand how these functions work (i.e what is a chip
2486 * select and why do have to keep driving the eeprom clock?), read
2487 * just about any data sheet for a Microwire compatible EEPROM.
2490 /* write a 32 bit value into the indirect accessor register */
2491 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2493 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2495 /* the eeprom requires some time to complete the operation */
2496 udelay(p->eeprom_delay);
2501 /* perform a chip select operation */
2502 static void eeprom_cs(struct ipw_priv *priv)
2504 eeprom_write_reg(priv, 0);
2505 eeprom_write_reg(priv, EEPROM_BIT_CS);
2506 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2507 eeprom_write_reg(priv, EEPROM_BIT_CS);
2510 /* perform a chip select operation */
2511 static void eeprom_disable_cs(struct ipw_priv *priv)
2513 eeprom_write_reg(priv, EEPROM_BIT_CS);
2514 eeprom_write_reg(priv, 0);
2515 eeprom_write_reg(priv, EEPROM_BIT_SK);
2518 /* push a single bit down to the eeprom */
2519 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2521 int d = (bit ? EEPROM_BIT_DI : 0);
2522 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2523 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2526 /* push an opcode followed by an address down to the eeprom */
2527 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2532 eeprom_write_bit(priv, 1);
2533 eeprom_write_bit(priv, op & 2);
2534 eeprom_write_bit(priv, op & 1);
2535 for (i = 7; i >= 0; i--) {
2536 eeprom_write_bit(priv, addr & (1 << i));
2540 /* pull 16 bits off the eeprom, one bit at a time */
2541 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2546 /* Send READ Opcode */
2547 eeprom_op(priv, EEPROM_CMD_READ, addr);
2549 /* Send dummy bit */
2550 eeprom_write_reg(priv, EEPROM_BIT_CS);
2552 /* Read the byte off the eeprom one bit at a time */
2553 for (i = 0; i < 16; i++) {
2555 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2556 eeprom_write_reg(priv, EEPROM_BIT_CS);
2557 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2558 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2561 /* Send another dummy bit */
2562 eeprom_write_reg(priv, 0);
2563 eeprom_disable_cs(priv);
2568 /* helper function for pulling the mac address out of the private */
2569 /* data's copy of the eeprom data */
2570 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2572 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2576 * Either the device driver (i.e. the host) or the firmware can
2577 * load eeprom data into the designated region in SRAM. If neither
2578 * happens then the FW will shutdown with a fatal error.
2580 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2581 * bit needs region of shared SRAM needs to be non-zero.
2583 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2586 u16 *eeprom = (u16 *) priv->eeprom;
2588 IPW_DEBUG_TRACE(">>\n");
2590 /* read entire contents of eeprom into private buffer */
2591 for (i = 0; i < 128; i++)
2592 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2595 If the data looks correct, then copy it to our private
2596 copy. Otherwise let the firmware know to perform the operation
2599 if (priv->eeprom[EEPROM_VERSION] != 0) {
2600 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2602 /* write the eeprom data to sram */
2603 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2604 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2606 /* Do not load eeprom data on fatal error or suspend */
2607 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2609 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2611 /* Load eeprom data on fatal error or suspend */
2612 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2615 IPW_DEBUG_TRACE("<<\n");
2618 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2623 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2625 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2628 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2630 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2631 CB_NUMBER_OF_ELEMENTS_SMALL *
2632 sizeof(struct command_block));
2635 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2636 { /* start dma engine but no transfers yet */
2638 IPW_DEBUG_FW(">> : \n");
2641 ipw_fw_dma_reset_command_blocks(priv);
2643 /* Write CB base address */
2644 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2646 IPW_DEBUG_FW("<< : \n");
2650 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2654 IPW_DEBUG_FW(">> :\n");
2656 //set the Stop and Abort bit
2657 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2658 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2659 priv->sram_desc.last_cb_index = 0;
2661 IPW_DEBUG_FW("<< \n");
2664 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2665 struct command_block *cb)
2668 IPW_SHARED_SRAM_DMA_CONTROL +
2669 (sizeof(struct command_block) * index);
2670 IPW_DEBUG_FW(">> :\n");
2672 ipw_write_indirect(priv, address, (u8 *) cb,
2673 (int)sizeof(struct command_block));
2675 IPW_DEBUG_FW("<< :\n");
2680 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2685 IPW_DEBUG_FW(">> :\n");
2687 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2688 ipw_fw_dma_write_command_block(priv, index,
2689 &priv->sram_desc.cb_list[index]);
2691 /* Enable the DMA in the CSR register */
2692 ipw_clear_bit(priv, IPW_RESET_REG,
2693 IPW_RESET_REG_MASTER_DISABLED |
2694 IPW_RESET_REG_STOP_MASTER);
2696 /* Set the Start bit. */
2697 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2698 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2700 IPW_DEBUG_FW("<< :\n");
2704 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2707 u32 register_value = 0;
2708 u32 cb_fields_address = 0;
2710 IPW_DEBUG_FW(">> :\n");
2711 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2712 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2714 /* Read the DMA Controlor register */
2715 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2716 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2718 /* Print the CB values */
2719 cb_fields_address = address;
2720 register_value = ipw_read_reg32(priv, cb_fields_address);
2721 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2723 cb_fields_address += sizeof(u32);
2724 register_value = ipw_read_reg32(priv, cb_fields_address);
2725 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2727 cb_fields_address += sizeof(u32);
2728 register_value = ipw_read_reg32(priv, cb_fields_address);
2729 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2732 cb_fields_address += sizeof(u32);
2733 register_value = ipw_read_reg32(priv, cb_fields_address);
2734 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2736 IPW_DEBUG_FW(">> :\n");
2739 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2741 u32 current_cb_address = 0;
2742 u32 current_cb_index = 0;
2744 IPW_DEBUG_FW("<< :\n");
2745 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2747 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2748 sizeof(struct command_block);
2750 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2751 current_cb_index, current_cb_address);
2753 IPW_DEBUG_FW(">> :\n");
2754 return current_cb_index;
2758 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2762 int interrupt_enabled, int is_last)
2765 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2766 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2768 struct command_block *cb;
2769 u32 last_cb_element = 0;
2771 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2772 src_address, dest_address, length);
2774 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2777 last_cb_element = priv->sram_desc.last_cb_index;
2778 cb = &priv->sram_desc.cb_list[last_cb_element];
2779 priv->sram_desc.last_cb_index++;
2781 /* Calculate the new CB control word */
2782 if (interrupt_enabled)
2783 control |= CB_INT_ENABLED;
2786 control |= CB_LAST_VALID;
2790 /* Calculate the CB Element's checksum value */
2791 cb->status = control ^ src_address ^ dest_address;
2793 /* Copy the Source and Destination addresses */
2794 cb->dest_addr = dest_address;
2795 cb->source_addr = src_address;
2797 /* Copy the Control Word last */
2798 cb->control = control;
2803 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2804 u32 src_phys, u32 dest_address, u32 length)
2806 u32 bytes_left = length;
2808 u32 dest_offset = 0;
2810 IPW_DEBUG_FW(">> \n");
2811 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2812 src_phys, dest_address, length);
2813 while (bytes_left > CB_MAX_LENGTH) {
2814 status = ipw_fw_dma_add_command_block(priv,
2815 src_phys + src_offset,
2818 CB_MAX_LENGTH, 0, 0);
2820 IPW_DEBUG_FW_INFO(": Failed\n");
2823 IPW_DEBUG_FW_INFO(": Added new cb\n");
2825 src_offset += CB_MAX_LENGTH;
2826 dest_offset += CB_MAX_LENGTH;
2827 bytes_left -= CB_MAX_LENGTH;
2830 /* add the buffer tail */
2831 if (bytes_left > 0) {
2833 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2834 dest_address + dest_offset,
2837 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2841 (": Adding new cb - the buffer tail\n");
2844 IPW_DEBUG_FW("<< \n");
2848 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2850 u32 current_index = 0, previous_index;
2853 IPW_DEBUG_FW(">> : \n");
2855 current_index = ipw_fw_dma_command_block_index(priv);
2856 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2857 (int)priv->sram_desc.last_cb_index);
2859 while (current_index < priv->sram_desc.last_cb_index) {
2861 previous_index = current_index;
2862 current_index = ipw_fw_dma_command_block_index(priv);
2864 if (previous_index < current_index) {
2868 if (++watchdog > 400) {
2869 IPW_DEBUG_FW_INFO("Timeout\n");
2870 ipw_fw_dma_dump_command_block(priv);
2871 ipw_fw_dma_abort(priv);
2876 ipw_fw_dma_abort(priv);
2878 /*Disable the DMA in the CSR register */
2879 ipw_set_bit(priv, IPW_RESET_REG,
2880 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2882 IPW_DEBUG_FW("<< dmaWaitSync \n");
2886 static void ipw_remove_current_network(struct ipw_priv *priv)
2888 struct list_head *element, *safe;
2889 struct ieee80211_network *network = NULL;
2890 unsigned long flags;
2892 spin_lock_irqsave(&priv->ieee->lock, flags);
2893 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2894 network = list_entry(element, struct ieee80211_network, list);
2895 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2897 list_add_tail(&network->list,
2898 &priv->ieee->network_free_list);
2901 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2905 * Check that card is still alive.
2906 * Reads debug register from domain0.
2907 * If card is present, pre-defined value should
2911 * @return 1 if card is present, 0 otherwise
2913 static inline int ipw_alive(struct ipw_priv *priv)
2915 return ipw_read32(priv, 0x90) == 0xd55555d5;
2918 /* timeout in msec, attempted in 10-msec quanta */
2919 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2925 if ((ipw_read32(priv, addr) & mask) == mask)
2929 } while (i < timeout);
2934 /* These functions load the firmware and micro code for the operation of
2935 * the ipw hardware. It assumes the buffer has all the bits for the
2936 * image and the caller is handling the memory allocation and clean up.
2939 static int ipw_stop_master(struct ipw_priv *priv)
2943 IPW_DEBUG_TRACE(">> \n");
2944 /* stop master. typical delay - 0 */
2945 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2947 /* timeout is in msec, polled in 10-msec quanta */
2948 rc = ipw_poll_bit(priv, IPW_RESET_REG,
2949 IPW_RESET_REG_MASTER_DISABLED, 100);
2951 IPW_ERROR("wait for stop master failed after 100ms\n");
2955 IPW_DEBUG_INFO("stop master %dms\n", rc);
2960 static void ipw_arc_release(struct ipw_priv *priv)
2962 IPW_DEBUG_TRACE(">> \n");
2965 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2967 /* no one knows timing, for safety add some delay */
2976 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2978 int rc = 0, i, addr;
2982 image = (u16 *) data;
2984 IPW_DEBUG_TRACE(">> \n");
2986 rc = ipw_stop_master(priv);
2991 // spin_lock_irqsave(&priv->lock, flags);
2993 for (addr = IPW_SHARED_LOWER_BOUND;
2994 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
2995 ipw_write32(priv, addr, 0);
2998 /* no ucode (yet) */
2999 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3000 /* destroy DMA queues */
3001 /* reset sequence */
3003 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3004 ipw_arc_release(priv);
3005 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3009 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3012 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3015 /* enable ucode store */
3016 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3017 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3023 * Do NOT set indirect address register once and then
3024 * store data to indirect data register in the loop.
3025 * It seems very reasonable, but in this case DINO do not
3026 * accept ucode. It is essential to set address each time.
3028 /* load new ipw uCode */
3029 for (i = 0; i < len / 2; i++)
3030 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3031 cpu_to_le16(image[i]));
3034 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3035 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3037 /* this is where the igx / win driver deveates from the VAP driver. */
3039 /* wait for alive response */
3040 for (i = 0; i < 100; i++) {
3041 /* poll for incoming data */
3042 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3043 if (cr & DINO_RXFIFO_DATA)
3048 if (cr & DINO_RXFIFO_DATA) {
3049 /* alive_command_responce size is NOT multiple of 4 */
3050 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3052 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3053 response_buffer[i] =
3054 le32_to_cpu(ipw_read_reg32(priv,
3055 IPW_BASEBAND_RX_FIFO_READ));
3056 memcpy(&priv->dino_alive, response_buffer,
3057 sizeof(priv->dino_alive));
3058 if (priv->dino_alive.alive_command == 1
3059 && priv->dino_alive.ucode_valid == 1) {
3062 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3063 "of %02d/%02d/%02d %02d:%02d\n",
3064 priv->dino_alive.software_revision,
3065 priv->dino_alive.software_revision,
3066 priv->dino_alive.device_identifier,
3067 priv->dino_alive.device_identifier,
3068 priv->dino_alive.time_stamp[0],
3069 priv->dino_alive.time_stamp[1],
3070 priv->dino_alive.time_stamp[2],
3071 priv->dino_alive.time_stamp[3],
3072 priv->dino_alive.time_stamp[4]);
3074 IPW_DEBUG_INFO("Microcode is not alive\n");
3078 IPW_DEBUG_INFO("No alive response from DINO\n");
3082 /* disable DINO, otherwise for some reason
3083 firmware have problem getting alive resp. */
3084 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3086 // spin_unlock_irqrestore(&priv->lock, flags);
3091 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3095 struct fw_chunk *chunk;
3096 dma_addr_t shared_phys;
3099 IPW_DEBUG_TRACE("<< : \n");
3100 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3105 memmove(shared_virt, data, len);
3108 rc = ipw_fw_dma_enable(priv);
3110 if (priv->sram_desc.last_cb_index > 0) {
3111 /* the DMA is already ready this would be a bug. */
3117 chunk = (struct fw_chunk *)(data + offset);
3118 offset += sizeof(struct fw_chunk);
3119 /* build DMA packet and queue up for sending */
3120 /* dma to chunk->address, the chunk->length bytes from data +
3123 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3124 le32_to_cpu(chunk->address),
3125 le32_to_cpu(chunk->length));
3127 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3131 offset += le32_to_cpu(chunk->length);
3132 } while (offset < len);
3134 /* Run the DMA and wait for the answer */
3135 rc = ipw_fw_dma_kick(priv);
3137 IPW_ERROR("dmaKick Failed\n");
3141 rc = ipw_fw_dma_wait(priv);
3143 IPW_ERROR("dmaWaitSync Failed\n");
3147 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3152 static int ipw_stop_nic(struct ipw_priv *priv)
3157 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3159 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3160 IPW_RESET_REG_MASTER_DISABLED, 500);
3162 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3166 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3171 static void ipw_start_nic(struct ipw_priv *priv)
3173 IPW_DEBUG_TRACE(">>\n");
3175 /* prvHwStartNic release ARC */
3176 ipw_clear_bit(priv, IPW_RESET_REG,
3177 IPW_RESET_REG_MASTER_DISABLED |
3178 IPW_RESET_REG_STOP_MASTER |
3179 CBD_RESET_REG_PRINCETON_RESET);
3181 /* enable power management */
3182 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3183 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3185 IPW_DEBUG_TRACE("<<\n");
3188 static int ipw_init_nic(struct ipw_priv *priv)
3192 IPW_DEBUG_TRACE(">>\n");
3195 /* set "initialization complete" bit to move adapter to D0 state */
3196 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3198 /* low-level PLL activation */
3199 ipw_write32(priv, IPW_READ_INT_REGISTER,
3200 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3202 /* wait for clock stabilization */
3203 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3204 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3206 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3208 /* assert SW reset */
3209 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3213 /* set "initialization complete" bit to move adapter to D0 state */
3214 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3216 IPW_DEBUG_TRACE(">>\n");
3220 /* Call this function from process context, it will sleep in request_firmware.
3221 * Probe is an ok place to call this from.
3223 static int ipw_reset_nic(struct ipw_priv *priv)
3226 unsigned long flags;
3228 IPW_DEBUG_TRACE(">>\n");
3230 rc = ipw_init_nic(priv);
3232 spin_lock_irqsave(&priv->lock, flags);
3233 /* Clear the 'host command active' bit... */
3234 priv->status &= ~STATUS_HCMD_ACTIVE;
3235 wake_up_interruptible(&priv->wait_command_queue);
3236 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3237 wake_up_interruptible(&priv->wait_state);
3238 spin_unlock_irqrestore(&priv->lock, flags);
3240 IPW_DEBUG_TRACE("<<\n");
3253 static int ipw_get_fw(struct ipw_priv *priv,
3254 const struct firmware **raw, const char *name)
3259 /* ask firmware_class module to get the boot firmware off disk */
3260 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3262 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3266 if ((*raw)->size < sizeof(*fw)) {
3267 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3271 fw = (void *)(*raw)->data;
3273 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3274 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3275 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3276 name, (*raw)->size);
3280 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3282 le32_to_cpu(fw->ver) >> 16,
3283 le32_to_cpu(fw->ver) & 0xff,
3284 (*raw)->size - sizeof(*fw));
3288 #define IPW_RX_BUF_SIZE (3000)
3290 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3291 struct ipw_rx_queue *rxq)
3293 unsigned long flags;
3296 spin_lock_irqsave(&rxq->lock, flags);
3298 INIT_LIST_HEAD(&rxq->rx_free);
3299 INIT_LIST_HEAD(&rxq->rx_used);
3301 /* Fill the rx_used queue with _all_ of the Rx buffers */
3302 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3303 /* In the reset function, these buffers may have been allocated
3304 * to an SKB, so we need to unmap and free potential storage */
3305 if (rxq->pool[i].skb != NULL) {
3306 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3307 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3308 dev_kfree_skb(rxq->pool[i].skb);
3309 rxq->pool[i].skb = NULL;
3311 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3314 /* Set us so that we have processed and used all buffers, but have
3315 * not restocked the Rx queue with fresh buffers */
3316 rxq->read = rxq->write = 0;
3317 rxq->processed = RX_QUEUE_SIZE - 1;
3318 rxq->free_count = 0;
3319 spin_unlock_irqrestore(&rxq->lock, flags);
3323 static int fw_loaded = 0;
3324 static const struct firmware *raw = NULL;
3326 static void free_firmware(void)
3329 release_firmware(raw);
3335 #define free_firmware() do {} while (0)
3338 static int ipw_load(struct ipw_priv *priv)
3341 const struct firmware *raw = NULL;
3344 u8 *boot_img, *ucode_img, *fw_img;
3346 int rc = 0, retries = 3;
3348 switch (priv->ieee->iw_mode) {
3350 name = "ipw2200-ibss.fw";
3352 #ifdef CONFIG_IPW2200_MONITOR
3353 case IW_MODE_MONITOR:
3354 name = "ipw2200-sniffer.fw";
3358 name = "ipw2200-bss.fw";
3370 rc = ipw_get_fw(priv, &raw, name);
3377 fw = (void *)raw->data;
3378 boot_img = &fw->data[0];
3379 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3380 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3381 le32_to_cpu(fw->ucode_size)];
3387 priv->rxq = ipw_rx_queue_alloc(priv);
3389 ipw_rx_queue_reset(priv, priv->rxq);
3391 IPW_ERROR("Unable to initialize Rx queue\n");
3396 /* Ensure interrupts are disabled */
3397 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3398 priv->status &= ~STATUS_INT_ENABLED;
3400 /* ack pending interrupts */
3401 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3405 rc = ipw_reset_nic(priv);
3407 IPW_ERROR("Unable to reset NIC\n");
3411 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3412 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3414 /* DMA the initial boot firmware into the device */
3415 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3417 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3421 /* kick start the device */
3422 ipw_start_nic(priv);
3424 /* wait for the device to finish its initial startup sequence */
3425 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3426 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3428 IPW_ERROR("device failed to boot initial fw image\n");
3431 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3433 /* ack fw init done interrupt */
3434 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3436 /* DMA the ucode into the device */
3437 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3439 IPW_ERROR("Unable to load ucode: %d\n", rc);
3446 /* DMA bss firmware into the device */
3447 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3449 IPW_ERROR("Unable to load firmware: %d\n", rc);
3456 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3458 rc = ipw_queue_reset(priv);
3460 IPW_ERROR("Unable to initialize queues\n");
3464 /* Ensure interrupts are disabled */
3465 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3466 /* ack pending interrupts */
3467 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3469 /* kick start the device */
3470 ipw_start_nic(priv);
3472 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3474 IPW_WARNING("Parity error. Retrying init.\n");
3479 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3484 /* wait for the device */
3485 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3486 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3488 IPW_ERROR("device failed to start within 500ms\n");
3491 IPW_DEBUG_INFO("device response after %dms\n", rc);
3493 /* ack fw init done interrupt */
3494 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3496 /* read eeprom data and initialize the eeprom region of sram */
3497 priv->eeprom_delay = 1;
3498 ipw_eeprom_init_sram(priv);
3500 /* enable interrupts */
3501 ipw_enable_interrupts(priv);
3503 /* Ensure our queue has valid packets */
3504 ipw_rx_queue_replenish(priv);
3506 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3508 /* ack pending interrupts */
3509 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3512 release_firmware(raw);
3518 ipw_rx_queue_free(priv, priv->rxq);
3521 ipw_tx_queue_free(priv);
3523 release_firmware(raw);
3535 * Theory of operation
3537 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3538 * 2 empty entries always kept in the buffer to protect from overflow.
3540 * For Tx queue, there are low mark and high mark limits. If, after queuing
3541 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3542 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3545 * The IPW operates with six queues, one receive queue in the device's
3546 * sram, one transmit queue for sending commands to the device firmware,
3547 * and four transmit queues for data.
3549 * The four transmit queues allow for performing quality of service (qos)
3550 * transmissions as per the 802.11 protocol. Currently Linux does not
3551 * provide a mechanism to the user for utilizing prioritized queues, so
3552 * we only utilize the first data transmit queue (queue1).
3556 * Driver allocates buffers of this size for Rx
3559 static inline int ipw_queue_space(const struct clx2_queue *q)
3561 int s = q->last_used - q->first_empty;
3564 s -= 2; /* keep some reserve to not confuse empty and full situations */
3570 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3572 return (++index == n_bd) ? 0 : index;
3576 * Initialize common DMA queue structure
3578 * @param q queue to init
3579 * @param count Number of BD's to allocate. Should be power of 2
3580 * @param read_register Address for 'read' register
3581 * (not offset within BAR, full address)
3582 * @param write_register Address for 'write' register
3583 * (not offset within BAR, full address)
3584 * @param base_register Address for 'base' register
3585 * (not offset within BAR, full address)
3586 * @param size Address for 'size' register
3587 * (not offset within BAR, full address)
3589 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3590 int count, u32 read, u32 write, u32 base, u32 size)
3594 q->low_mark = q->n_bd / 4;
3595 if (q->low_mark < 4)
3598 q->high_mark = q->n_bd / 8;
3599 if (q->high_mark < 2)
3602 q->first_empty = q->last_used = 0;
3606 ipw_write32(priv, base, q->dma_addr);
3607 ipw_write32(priv, size, count);
3608 ipw_write32(priv, read, 0);
3609 ipw_write32(priv, write, 0);
3611 _ipw_read32(priv, 0x90);
3614 static int ipw_queue_tx_init(struct ipw_priv *priv,
3615 struct clx2_tx_queue *q,
3616 int count, u32 read, u32 write, u32 base, u32 size)
3618 struct pci_dev *dev = priv->pci_dev;
3620 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3622 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3627 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3629 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3630 sizeof(q->bd[0]) * count);
3636 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3641 * Free one TFD, those at index [txq->q.last_used].
3642 * Do NOT advance any indexes
3647 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3648 struct clx2_tx_queue *txq)
3650 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3651 struct pci_dev *dev = priv->pci_dev;
3655 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3656 /* nothing to cleanup after for host commands */
3660 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3661 IPW_ERROR("Too many chunks: %i\n",
3662 le32_to_cpu(bd->u.data.num_chunks));
3663 /** @todo issue fatal error, it is quite serious situation */
3667 /* unmap chunks if any */
3668 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3669 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3670 le16_to_cpu(bd->u.data.chunk_len[i]),
3672 if (txq->txb[txq->q.last_used]) {
3673 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3674 txq->txb[txq->q.last_used] = NULL;
3680 * Deallocate DMA queue.
3682 * Empty queue by removing and destroying all BD's.
3688 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3690 struct clx2_queue *q = &txq->q;
3691 struct pci_dev *dev = priv->pci_dev;
3696 /* first, empty all BD's */
3697 for (; q->first_empty != q->last_used;
3698 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3699 ipw_queue_tx_free_tfd(priv, txq);
3702 /* free buffers belonging to queue itself */
3703 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3707 /* 0 fill whole structure */
3708 memset(txq, 0, sizeof(*txq));
3712 * Destroy all DMA queues and structures
3716 static void ipw_tx_queue_free(struct ipw_priv *priv)
3719 ipw_queue_tx_free(priv, &priv->txq_cmd);
3722 ipw_queue_tx_free(priv, &priv->txq[0]);
3723 ipw_queue_tx_free(priv, &priv->txq[1]);
3724 ipw_queue_tx_free(priv, &priv->txq[2]);
3725 ipw_queue_tx_free(priv, &priv->txq[3]);
3728 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3730 /* First 3 bytes are manufacturer */
3731 bssid[0] = priv->mac_addr[0];
3732 bssid[1] = priv->mac_addr[1];
3733 bssid[2] = priv->mac_addr[2];
3735 /* Last bytes are random */
3736 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3738 bssid[0] &= 0xfe; /* clear multicast bit */
3739 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3742 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3744 struct ipw_station_entry entry;
3747 for (i = 0; i < priv->num_stations; i++) {
3748 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3749 /* Another node is active in network */
3750 priv->missed_adhoc_beacons = 0;
3751 if (!(priv->config & CFG_STATIC_CHANNEL))
3752 /* when other nodes drop out, we drop out */
3753 priv->config &= ~CFG_ADHOC_PERSIST;
3759 if (i == MAX_STATIONS)
3760 return IPW_INVALID_STATION;
3762 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3765 entry.support_mode = 0;
3766 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3767 memcpy(priv->stations[i], bssid, ETH_ALEN);
3768 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3769 &entry, sizeof(entry));
3770 priv->num_stations++;
3775 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3779 for (i = 0; i < priv->num_stations; i++)
3780 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3783 return IPW_INVALID_STATION;
3786 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3790 if (priv->status & STATUS_ASSOCIATING) {
3791 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3792 queue_work(priv->workqueue, &priv->disassociate);
3796 if (!(priv->status & STATUS_ASSOCIATED)) {
3797 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3801 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3803 MAC_ARG(priv->assoc_request.bssid),
3804 priv->assoc_request.channel);
3806 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3807 priv->status |= STATUS_DISASSOCIATING;
3810 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3812 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3814 err = ipw_send_associate(priv, &priv->assoc_request);
3816 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3823 static int ipw_disassociate(void *data)
3825 struct ipw_priv *priv = data;
3826 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3828 ipw_send_disassociate(data, 0);
3832 static void ipw_bg_disassociate(void *data)
3834 struct ipw_priv *priv = data;
3835 mutex_lock(&priv->mutex);
3836 ipw_disassociate(data);
3837 mutex_unlock(&priv->mutex);
3840 static void ipw_system_config(void *data)
3842 struct ipw_priv *priv = data;
3844 #ifdef CONFIG_IPW2200_PROMISCUOUS
3845 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3846 priv->sys_config.accept_all_data_frames = 1;
3847 priv->sys_config.accept_non_directed_frames = 1;
3848 priv->sys_config.accept_all_mgmt_bcpr = 1;
3849 priv->sys_config.accept_all_mgmt_frames = 1;
3853 ipw_send_system_config(priv);
3856 struct ipw_status_code {
3861 static const struct ipw_status_code ipw_status_codes[] = {
3862 {0x00, "Successful"},
3863 {0x01, "Unspecified failure"},
3864 {0x0A, "Cannot support all requested capabilities in the "
3865 "Capability information field"},
3866 {0x0B, "Reassociation denied due to inability to confirm that "
3867 "association exists"},
3868 {0x0C, "Association denied due to reason outside the scope of this "
3871 "Responding station does not support the specified authentication "
3874 "Received an Authentication frame with authentication&