[SK_BUFF]: Introduce skb_copy_from_linear_data{_offset}
[linux-3.10.git] / drivers / net / wireless / ipw2200.c
1 /******************************************************************************
2
3   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5   802.11 status code portion of this file from ethereal-0.10.6:
6     Copyright 2000, Axis Communications AB
7     Ethereal - Network traffic analyzer
8     By Gerald Combs <gerald@ethereal.com>
9     Copyright 1998 Gerald Combs
10
11   This program is free software; you can redistribute it and/or modify it
12   under the terms of version 2 of the GNU General Public License as
13   published by the Free Software Foundation.
14
15   This program is distributed in the hope that it will be useful, but WITHOUT
16   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18   more details.
19
20   You should have received a copy of the GNU General Public License along with
21   this program; if not, write to the Free Software Foundation, Inc., 59
22   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23
24   The full GNU General Public License is included in this distribution in the
25   file called LICENSE.
26
27   Contact Information:
28   James P. Ketrenos <ipw2100-admin@linux.intel.com>
29   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31 ******************************************************************************/
32
33 #include "ipw2200.h"
34 #include <linux/version.h>
35
36
37 #ifndef KBUILD_EXTMOD
38 #define VK "k"
39 #else
40 #define VK
41 #endif
42
43 #ifdef CONFIG_IPW2200_DEBUG
44 #define VD "d"
45 #else
46 #define VD
47 #endif
48
49 #ifdef CONFIG_IPW2200_MONITOR
50 #define VM "m"
51 #else
52 #define VM
53 #endif
54
55 #ifdef CONFIG_IPW2200_PROMISCUOUS
56 #define VP "p"
57 #else
58 #define VP
59 #endif
60
61 #ifdef CONFIG_IPW2200_RADIOTAP
62 #define VR "r"
63 #else
64 #define VR
65 #endif
66
67 #ifdef CONFIG_IPW2200_QOS
68 #define VQ "q"
69 #else
70 #define VQ
71 #endif
72
73 #define IPW2200_VERSION "1.2.0" VK VD VM VP VR VQ
74 #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75 #define DRV_COPYRIGHT   "Copyright(c) 2003-2006 Intel Corporation"
76 #define DRV_VERSION     IPW2200_VERSION
77
78 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
79
80 MODULE_DESCRIPTION(DRV_DESCRIPTION);
81 MODULE_VERSION(DRV_VERSION);
82 MODULE_AUTHOR(DRV_COPYRIGHT);
83 MODULE_LICENSE("GPL");
84
85 static int cmdlog = 0;
86 static int debug = 0;
87 static int channel = 0;
88 static int mode = 0;
89
90 static u32 ipw_debug_level;
91 static int associate = 1;
92 static int auto_create = 1;
93 static int led = 0;
94 static int disable = 0;
95 static int bt_coexist = 0;
96 static int hwcrypto = 0;
97 static int roaming = 1;
98 static const char ipw_modes[] = {
99         'a', 'b', 'g', '?'
100 };
101 static int antenna = CFG_SYS_ANTENNA_BOTH;
102
103 #ifdef CONFIG_IPW2200_PROMISCUOUS
104 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
105 #endif
106
107
108 #ifdef CONFIG_IPW2200_QOS
109 static int qos_enable = 0;
110 static int qos_burst_enable = 0;
111 static int qos_no_ack_mask = 0;
112 static int burst_duration_CCK = 0;
113 static int burst_duration_OFDM = 0;
114
115 static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
116         {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
117          QOS_TX3_CW_MIN_OFDM},
118         {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
119          QOS_TX3_CW_MAX_OFDM},
120         {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
121         {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
122         {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
123          QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
124 };
125
126 static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
127         {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
128          QOS_TX3_CW_MIN_CCK},
129         {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
130          QOS_TX3_CW_MAX_CCK},
131         {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
132         {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
133         {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
134          QOS_TX3_TXOP_LIMIT_CCK}
135 };
136
137 static struct ieee80211_qos_parameters def_parameters_OFDM = {
138         {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
139          DEF_TX3_CW_MIN_OFDM},
140         {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
141          DEF_TX3_CW_MAX_OFDM},
142         {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
143         {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
144         {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
145          DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
146 };
147
148 static struct ieee80211_qos_parameters def_parameters_CCK = {
149         {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
150          DEF_TX3_CW_MIN_CCK},
151         {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
152          DEF_TX3_CW_MAX_CCK},
153         {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
154         {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
155         {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
156          DEF_TX3_TXOP_LIMIT_CCK}
157 };
158
159 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
160
161 static int from_priority_to_tx_queue[] = {
162         IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
163         IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
164 };
165
166 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
167
168 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
169                                        *qos_param);
170 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
171                                      *qos_param);
172 #endif                          /* CONFIG_IPW2200_QOS */
173
174 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
175 static void ipw_remove_current_network(struct ipw_priv *priv);
176 static void ipw_rx(struct ipw_priv *priv);
177 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
178                                 struct clx2_tx_queue *txq, int qindex);
179 static int ipw_queue_reset(struct ipw_priv *priv);
180
181 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
182                              int len, int sync);
183
184 static void ipw_tx_queue_free(struct ipw_priv *);
185
186 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
187 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
188 static void ipw_rx_queue_replenish(void *);
189 static int ipw_up(struct ipw_priv *);
190 static void ipw_bg_up(struct work_struct *work);
191 static void ipw_down(struct ipw_priv *);
192 static void ipw_bg_down(struct work_struct *work);
193 static int ipw_config(struct ipw_priv *);
194 static int init_supported_rates(struct ipw_priv *priv,
195                                 struct ipw_supported_rates *prates);
196 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
197 static void ipw_send_wep_keys(struct ipw_priv *, int);
198
199 static int snprint_line(char *buf, size_t count,
200                         const u8 * data, u32 len, u32 ofs)
201 {
202         int out, i, j, l;
203         char c;
204
205         out = snprintf(buf, count, "%08X", ofs);
206
207         for (l = 0, i = 0; i < 2; i++) {
208                 out += snprintf(buf + out, count - out, " ");
209                 for (j = 0; j < 8 && l < len; j++, l++)
210                         out += snprintf(buf + out, count - out, "%02X ",
211                                         data[(i * 8 + j)]);
212                 for (; j < 8; j++)
213                         out += snprintf(buf + out, count - out, "   ");
214         }
215
216         out += snprintf(buf + out, count - out, " ");
217         for (l = 0, i = 0; i < 2; i++) {
218                 out += snprintf(buf + out, count - out, " ");
219                 for (j = 0; j < 8 && l < len; j++, l++) {
220                         c = data[(i * 8 + j)];
221                         if (!isascii(c) || !isprint(c))
222                                 c = '.';
223
224                         out += snprintf(buf + out, count - out, "%c", c);
225                 }
226
227                 for (; j < 8; j++)
228                         out += snprintf(buf + out, count - out, " ");
229         }
230
231         return out;
232 }
233
234 static void printk_buf(int level, const u8 * data, u32 len)
235 {
236         char line[81];
237         u32 ofs = 0;
238         if (!(ipw_debug_level & level))
239                 return;
240
241         while (len) {
242                 snprint_line(line, sizeof(line), &data[ofs],
243                              min(len, 16U), ofs);
244                 printk(KERN_DEBUG "%s\n", line);
245                 ofs += 16;
246                 len -= min(len, 16U);
247         }
248 }
249
250 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
251 {
252         size_t out = size;
253         u32 ofs = 0;
254         int total = 0;
255
256         while (size && len) {
257                 out = snprint_line(output, size, &data[ofs],
258                                    min_t(size_t, len, 16U), ofs);
259
260                 ofs += 16;
261                 output += out;
262                 size -= out;
263                 len -= min_t(size_t, len, 16U);
264                 total += out;
265         }
266         return total;
267 }
268
269 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
270 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
271 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
272
273 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
274 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
275 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
276
277 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
278 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
279 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
280 {
281         IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
282                      __LINE__, (u32) (b), (u32) (c));
283         _ipw_write_reg8(a, b, c);
284 }
285
286 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
287 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
288 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
289 {
290         IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
291                      __LINE__, (u32) (b), (u32) (c));
292         _ipw_write_reg16(a, b, c);
293 }
294
295 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
296 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
297 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
298 {
299         IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
300                      __LINE__, (u32) (b), (u32) (c));
301         _ipw_write_reg32(a, b, c);
302 }
303
304 /* 8-bit direct write (low 4K) */
305 #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
306
307 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
308 #define ipw_write8(ipw, ofs, val) \
309  IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
310  _ipw_write8(ipw, ofs, val)
311
312 /* 16-bit direct write (low 4K) */
313 #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316 #define ipw_write16(ipw, ofs, val) \
317  IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318  _ipw_write16(ipw, ofs, val)
319
320 /* 32-bit direct write (low 4K) */
321 #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324 #define ipw_write32(ipw, ofs, val) \
325  IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326  _ipw_write32(ipw, ofs, val)
327
328 /* 8-bit direct read (low 4K) */
329 #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331 /* 8-bit direct read (low 4K), with debug wrapper */
332 static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333 {
334         IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335         return _ipw_read8(ipw, ofs);
336 }
337
338 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339 #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341 /* 16-bit direct read (low 4K) */
342 #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344 /* 16-bit direct read (low 4K), with debug wrapper */
345 static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346 {
347         IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348         return _ipw_read16(ipw, ofs);
349 }
350
351 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352 #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354 /* 32-bit direct read (low 4K) */
355 #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357 /* 32-bit direct read (low 4K), with debug wrapper */
358 static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359 {
360         IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361         return _ipw_read32(ipw, ofs);
362 }
363
364 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365 #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367 /* multi-byte read (above 4K), with debug wrapper */
368 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369 static inline void __ipw_read_indirect(const char *f, int l,
370                                        struct ipw_priv *a, u32 b, u8 * c, int d)
371 {
372         IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373                      d);
374         _ipw_read_indirect(a, b, c, d);
375 }
376
377 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378 #define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382                                 int num);
383 #define ipw_write_indirect(a, b, c, d) \
384         IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385         _ipw_write_indirect(a, b, c, d)
386
387 /* 32-bit indirect write (above 4K) */
388 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389 {
390         IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391         _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392         _ipw_write32(priv, IPW_INDIRECT_DATA, value);
393 }
394
395 /* 8-bit indirect write (above 4K) */
396 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397 {
398         u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;        /* dword align */
399         u32 dif_len = reg - aligned_addr;
400
401         IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402         _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403         _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404 }
405
406 /* 16-bit indirect write (above 4K) */
407 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408 {
409         u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;        /* dword align */
410         u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412         IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413         _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414         _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415 }
416
417 /* 8-bit indirect read (above 4K) */
418 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419 {
420         u32 word;
421         _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422         IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423         word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424         return (word >> ((reg & 0x3) * 8)) & 0xff;
425 }
426
427 /* 32-bit indirect read (above 4K) */
428 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429 {
430         u32 value;
431
432         IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434         _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435         value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436         IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437         return value;
438 }
439
440 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
441 /*    for area above 1st 4K of SRAM/reg space */
442 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443                                int num)
444 {
445         u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;       /* dword align */
446         u32 dif_len = addr - aligned_addr;
447         u32 i;
448
449         IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451         if (num <= 0) {
452                 return;
453         }
454
455         /* Read the first dword (or portion) byte by byte */
456         if (unlikely(dif_len)) {
457                 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458                 /* Start reading at aligned_addr + dif_len */
459                 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460                         *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461                 aligned_addr += 4;
462         }
463
464         /* Read all of the middle dwords as dwords, with auto-increment */
465         _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466         for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467                 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469         /* Read the last dword (or portion) byte by byte */
470         if (unlikely(num)) {
471                 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472                 for (i = 0; num > 0; i++, num--)
473                         *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474         }
475 }
476
477 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
478 /*    for area above 1st 4K of SRAM/reg space */
479 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480                                 int num)
481 {
482         u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;       /* dword align */
483         u32 dif_len = addr - aligned_addr;
484         u32 i;
485
486         IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488         if (num <= 0) {
489                 return;
490         }
491
492         /* Write the first dword (or portion) byte by byte */
493         if (unlikely(dif_len)) {
494                 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495                 /* Start writing at aligned_addr + dif_len */
496                 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497                         _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498                 aligned_addr += 4;
499         }
500
501         /* Write all of the middle dwords as dwords, with auto-increment */
502         _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503         for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504                 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506         /* Write the last dword (or portion) byte by byte */
507         if (unlikely(num)) {
508                 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509                 for (i = 0; num > 0; i++, num--, buf++)
510                         _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511         }
512 }
513
514 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
515 /*    for 1st 4K of SRAM/regs space */
516 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517                              int num)
518 {
519         memcpy_toio((priv->hw_base + addr), buf, num);
520 }
521
522 /* Set bit(s) in low 4K of SRAM/regs */
523 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524 {
525         ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526 }
527
528 /* Clear bit(s) in low 4K of SRAM/regs */
529 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530 {
531         ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532 }
533
534 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535 {
536         if (priv->status & STATUS_INT_ENABLED)
537                 return;
538         priv->status |= STATUS_INT_ENABLED;
539         ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540 }
541
542 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543 {
544         if (!(priv->status & STATUS_INT_ENABLED))
545                 return;
546         priv->status &= ~STATUS_INT_ENABLED;
547         ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548 }
549
550 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551 {
552         unsigned long flags;
553
554         spin_lock_irqsave(&priv->irq_lock, flags);
555         __ipw_enable_interrupts(priv);
556         spin_unlock_irqrestore(&priv->irq_lock, flags);
557 }
558
559 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560 {
561         unsigned long flags;
562
563         spin_lock_irqsave(&priv->irq_lock, flags);
564         __ipw_disable_interrupts(priv);
565         spin_unlock_irqrestore(&priv->irq_lock, flags);
566 }
567
568 static char *ipw_error_desc(u32 val)
569 {
570         switch (val) {
571         case IPW_FW_ERROR_OK:
572                 return "ERROR_OK";
573         case IPW_FW_ERROR_FAIL:
574                 return "ERROR_FAIL";
575         case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576                 return "MEMORY_UNDERFLOW";
577         case IPW_FW_ERROR_MEMORY_OVERFLOW:
578                 return "MEMORY_OVERFLOW";
579         case IPW_FW_ERROR_BAD_PARAM:
580                 return "BAD_PARAM";
581         case IPW_FW_ERROR_BAD_CHECKSUM:
582                 return "BAD_CHECKSUM";
583         case IPW_FW_ERROR_NMI_INTERRUPT:
584                 return "NMI_INTERRUPT";
585         case IPW_FW_ERROR_BAD_DATABASE:
586                 return "BAD_DATABASE";
587         case IPW_FW_ERROR_ALLOC_FAIL:
588                 return "ALLOC_FAIL";
589         case IPW_FW_ERROR_DMA_UNDERRUN:
590                 return "DMA_UNDERRUN";
591         case IPW_FW_ERROR_DMA_STATUS:
592                 return "DMA_STATUS";
593         case IPW_FW_ERROR_DINO_ERROR:
594                 return "DINO_ERROR";
595         case IPW_FW_ERROR_EEPROM_ERROR:
596                 return "EEPROM_ERROR";
597         case IPW_FW_ERROR_SYSASSERT:
598                 return "SYSASSERT";
599         case IPW_FW_ERROR_FATAL_ERROR:
600                 return "FATAL_ERROR";
601         default:
602                 return "UNKNOWN_ERROR";
603         }
604 }
605
606 static void ipw_dump_error_log(struct ipw_priv *priv,
607                                struct ipw_fw_error *error)
608 {
609         u32 i;
610
611         if (!error) {
612                 IPW_ERROR("Error allocating and capturing error log.  "
613                           "Nothing to dump.\n");
614                 return;
615         }
616
617         IPW_ERROR("Start IPW Error Log Dump:\n");
618         IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619                   error->status, error->config);
620
621         for (i = 0; i < error->elem_len; i++)
622                 IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
623                           ipw_error_desc(error->elem[i].desc),
624                           error->elem[i].time,
625                           error->elem[i].blink1,
626                           error->elem[i].blink2,
627                           error->elem[i].link1,
628                           error->elem[i].link2, error->elem[i].data);
629         for (i = 0; i < error->log_len; i++)
630                 IPW_ERROR("%i\t0x%08x\t%i\n",
631                           error->log[i].time,
632                           error->log[i].data, error->log[i].event);
633 }
634
635 static inline int ipw_is_init(struct ipw_priv *priv)
636 {
637         return (priv->status & STATUS_INIT) ? 1 : 0;
638 }
639
640 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641 {
642         u32 addr, field_info, field_len, field_count, total_len;
643
644         IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646         if (!priv || !val || !len) {
647                 IPW_DEBUG_ORD("Invalid argument\n");
648                 return -EINVAL;
649         }
650
651         /* verify device ordinal tables have been initialized */
652         if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653                 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654                 return -EINVAL;
655         }
656
657         switch (IPW_ORD_TABLE_ID_MASK & ord) {
658         case IPW_ORD_TABLE_0_MASK:
659                 /*
660                  * TABLE 0: Direct access to a table of 32 bit values
661                  *
662                  * This is a very simple table with the data directly
663                  * read from the table
664                  */
665
666                 /* remove the table id from the ordinal */
667                 ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669                 /* boundary check */
670                 if (ord > priv->table0_len) {
671                         IPW_DEBUG_ORD("ordinal value (%i) longer then "
672                                       "max (%i)\n", ord, priv->table0_len);
673                         return -EINVAL;
674                 }
675
676                 /* verify we have enough room to store the value */
677                 if (*len < sizeof(u32)) {
678                         IPW_DEBUG_ORD("ordinal buffer length too small, "
679                                       "need %zd\n", sizeof(u32));
680                         return -EINVAL;
681                 }
682
683                 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684                               ord, priv->table0_addr + (ord << 2));
685
686                 *len = sizeof(u32);
687                 ord <<= 2;
688                 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689                 break;
690
691         case IPW_ORD_TABLE_1_MASK:
692                 /*
693                  * TABLE 1: Indirect access to a table of 32 bit values
694                  *
695                  * This is a fairly large table of u32 values each
696                  * representing starting addr for the data (which is
697                  * also a u32)
698                  */
699
700                 /* remove the table id from the ordinal */
701                 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703                 /* boundary check */
704                 if (ord > priv->table1_len) {
705                         IPW_DEBUG_ORD("ordinal value too long\n");
706                         return -EINVAL;
707                 }
708
709                 /* verify we have enough room to store the value */
710                 if (*len < sizeof(u32)) {
711                         IPW_DEBUG_ORD("ordinal buffer length too small, "
712                                       "need %zd\n", sizeof(u32));
713                         return -EINVAL;
714                 }
715
716                 *((u32 *) val) =
717                     ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718                 *len = sizeof(u32);
719                 break;
720
721         case IPW_ORD_TABLE_2_MASK:
722                 /*
723                  * TABLE 2: Indirect access to a table of variable sized values
724                  *
725                  * This table consist of six values, each containing
726                  *     - dword containing the starting offset of the data
727                  *     - dword containing the lengh in the first 16bits
728                  *       and the count in the second 16bits
729                  */
730
731                 /* remove the table id from the ordinal */
732                 ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734                 /* boundary check */
735                 if (ord > priv->table2_len) {
736                         IPW_DEBUG_ORD("ordinal value too long\n");
737                         return -EINVAL;
738                 }
739
740                 /* get the address of statistic */
741                 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743                 /* get the second DW of statistics ;
744                  * two 16-bit words - first is length, second is count */
745                 field_info =
746                     ipw_read_reg32(priv,
747                                    priv->table2_addr + (ord << 3) +
748                                    sizeof(u32));
749
750                 /* get each entry length */
751                 field_len = *((u16 *) & field_info);
752
753                 /* get number of entries */
754                 field_count = *(((u16 *) & field_info) + 1);
755
756                 /* abort if not enought memory */
757                 total_len = field_len * field_count;
758                 if (total_len > *len) {
759                         *len = total_len;
760                         return -EINVAL;
761                 }
762
763                 *len = total_len;
764                 if (!total_len)
765                         return 0;
766
767                 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768                               "field_info = 0x%08x\n",
769                               addr, total_len, field_info);
770                 ipw_read_indirect(priv, addr, val, total_len);
771                 break;
772
773         default:
774                 IPW_DEBUG_ORD("Invalid ordinal!\n");
775                 return -EINVAL;
776
777         }
778
779         return 0;
780 }
781
782 static void ipw_init_ordinals(struct ipw_priv *priv)
783 {
784         priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785         priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787         IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788                       priv->table0_addr, priv->table0_len);
789
790         priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791         priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793         IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794                       priv->table1_addr, priv->table1_len);
795
796         priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797         priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798         priv->table2_len &= 0x0000ffff; /* use first two bytes */
799
800         IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801                       priv->table2_addr, priv->table2_len);
802
803 }
804
805 static u32 ipw_register_toggle(u32 reg)
806 {
807         reg &= ~IPW_START_STANDBY;
808         if (reg & IPW_GATE_ODMA)
809                 reg &= ~IPW_GATE_ODMA;
810         if (reg & IPW_GATE_IDMA)
811                 reg &= ~IPW_GATE_IDMA;
812         if (reg & IPW_GATE_ADMA)
813                 reg &= ~IPW_GATE_ADMA;
814         return reg;
815 }
816
817 /*
818  * LED behavior:
819  * - On radio ON, turn on any LEDs that require to be on during start
820  * - On initialization, start unassociated blink
821  * - On association, disable unassociated blink
822  * - On disassociation, start unassociated blink
823  * - On radio OFF, turn off any LEDs started during radio on
824  *
825  */
826 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
827 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830 static void ipw_led_link_on(struct ipw_priv *priv)
831 {
832         unsigned long flags;
833         u32 led;
834
835         /* If configured to not use LEDs, or nic_type is 1,
836          * then we don't toggle a LINK led */
837         if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838                 return;
839
840         spin_lock_irqsave(&priv->lock, flags);
841
842         if (!(priv->status & STATUS_RF_KILL_MASK) &&
843             !(priv->status & STATUS_LED_LINK_ON)) {
844                 IPW_DEBUG_LED("Link LED On\n");
845                 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846                 led |= priv->led_association_on;
847
848                 led = ipw_register_toggle(led);
849
850                 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851                 ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853                 priv->status |= STATUS_LED_LINK_ON;
854
855                 /* If we aren't associated, schedule turning the LED off */
856                 if (!(priv->status & STATUS_ASSOCIATED))
857                         queue_delayed_work(priv->workqueue,
858                                            &priv->led_link_off,
859                                            LD_TIME_LINK_ON);
860         }
861
862         spin_unlock_irqrestore(&priv->lock, flags);
863 }
864
865 static void ipw_bg_led_link_on(struct work_struct *work)
866 {
867         struct ipw_priv *priv =
868                 container_of(work, struct ipw_priv, led_link_on.work);
869         mutex_lock(&priv->mutex);
870         ipw_led_link_on(priv);
871         mutex_unlock(&priv->mutex);
872 }
873
874 static void ipw_led_link_off(struct ipw_priv *priv)
875 {
876         unsigned long flags;
877         u32 led;
878
879         /* If configured not to use LEDs, or nic type is 1,
880          * then we don't goggle the LINK led. */
881         if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882                 return;
883
884         spin_lock_irqsave(&priv->lock, flags);
885
886         if (priv->status & STATUS_LED_LINK_ON) {
887                 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888                 led &= priv->led_association_off;
889                 led = ipw_register_toggle(led);
890
891                 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892                 ipw_write_reg32(priv, IPW_EVENT_REG, led);
893
894                 IPW_DEBUG_LED("Link LED Off\n");
895
896                 priv->status &= ~STATUS_LED_LINK_ON;
897
898                 /* If we aren't associated and the radio is on, schedule
899                  * turning the LED on (blink while unassociated) */
900                 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901                     !(priv->status & STATUS_ASSOCIATED))
902                         queue_delayed_work(priv->workqueue, &priv->led_link_on,
903                                            LD_TIME_LINK_OFF);
904
905         }
906
907         spin_unlock_irqrestore(&priv->lock, flags);
908 }
909
910 static void ipw_bg_led_link_off(struct work_struct *work)
911 {
912         struct ipw_priv *priv =
913                 container_of(work, struct ipw_priv, led_link_off.work);
914         mutex_lock(&priv->mutex);
915         ipw_led_link_off(priv);
916         mutex_unlock(&priv->mutex);
917 }
918
919 static void __ipw_led_activity_on(struct ipw_priv *priv)
920 {
921         u32 led;
922
923         if (priv->config & CFG_NO_LED)
924                 return;
925
926         if (priv->status & STATUS_RF_KILL_MASK)
927                 return;
928
929         if (!(priv->status & STATUS_LED_ACT_ON)) {
930                 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931                 led |= priv->led_activity_on;
932
933                 led = ipw_register_toggle(led);
934
935                 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936                 ipw_write_reg32(priv, IPW_EVENT_REG, led);
937
938                 IPW_DEBUG_LED("Activity LED On\n");
939
940                 priv->status |= STATUS_LED_ACT_ON;
941
942                 cancel_delayed_work(&priv->led_act_off);
943                 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944                                    LD_TIME_ACT_ON);
945         } else {
946                 /* Reschedule LED off for full time period */
947                 cancel_delayed_work(&priv->led_act_off);
948                 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949                                    LD_TIME_ACT_ON);
950         }
951 }
952
953 #if 0
954 void ipw_led_activity_on(struct ipw_priv *priv)
955 {
956         unsigned long flags;
957         spin_lock_irqsave(&priv->lock, flags);
958         __ipw_led_activity_on(priv);
959         spin_unlock_irqrestore(&priv->lock, flags);
960 }
961 #endif  /*  0  */
962
963 static void ipw_led_activity_off(struct ipw_priv *priv)
964 {
965         unsigned long flags;
966         u32 led;
967
968         if (priv->config & CFG_NO_LED)
969                 return;
970
971         spin_lock_irqsave(&priv->lock, flags);
972
973         if (priv->status & STATUS_LED_ACT_ON) {
974                 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975                 led &= priv->led_activity_off;
976
977                 led = ipw_register_toggle(led);
978
979                 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980                 ipw_write_reg32(priv, IPW_EVENT_REG, led);
981
982                 IPW_DEBUG_LED("Activity LED Off\n");
983
984                 priv->status &= ~STATUS_LED_ACT_ON;
985         }
986
987         spin_unlock_irqrestore(&priv->lock, flags);
988 }
989
990 static void ipw_bg_led_activity_off(struct work_struct *work)
991 {
992         struct ipw_priv *priv =
993                 container_of(work, struct ipw_priv, led_act_off.work);
994         mutex_lock(&priv->mutex);
995         ipw_led_activity_off(priv);
996         mutex_unlock(&priv->mutex);
997 }
998
999 static void ipw_led_band_on(struct ipw_priv *priv)
1000 {
1001         unsigned long flags;
1002         u32 led;
1003
1004         /* Only nic type 1 supports mode LEDs */
1005         if (priv->config & CFG_NO_LED ||
1006             priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007                 return;
1008
1009         spin_lock_irqsave(&priv->lock, flags);
1010
1011         led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012         if (priv->assoc_network->mode == IEEE_A) {
1013                 led |= priv->led_ofdm_on;
1014                 led &= priv->led_association_off;
1015                 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016         } else if (priv->assoc_network->mode == IEEE_G) {
1017                 led |= priv->led_ofdm_on;
1018                 led |= priv->led_association_on;
1019                 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020         } else {
1021                 led &= priv->led_ofdm_off;
1022                 led |= priv->led_association_on;
1023                 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1024         }
1025
1026         led = ipw_register_toggle(led);
1027
1028         IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029         ipw_write_reg32(priv, IPW_EVENT_REG, led);
1030
1031         spin_unlock_irqrestore(&priv->lock, flags);
1032 }
1033
1034 static void ipw_led_band_off(struct ipw_priv *priv)
1035 {
1036         unsigned long flags;
1037         u32 led;
1038
1039         /* Only nic type 1 supports mode LEDs */
1040         if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041                 return;
1042
1043         spin_lock_irqsave(&priv->lock, flags);
1044
1045         led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046         led &= priv->led_ofdm_off;
1047         led &= priv->led_association_off;
1048
1049         led = ipw_register_toggle(led);
1050
1051         IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052         ipw_write_reg32(priv, IPW_EVENT_REG, led);
1053
1054         spin_unlock_irqrestore(&priv->lock, flags);
1055 }
1056
1057 static void ipw_led_radio_on(struct ipw_priv *priv)
1058 {
1059         ipw_led_link_on(priv);
1060 }
1061
1062 static void ipw_led_radio_off(struct ipw_priv *priv)
1063 {
1064         ipw_led_activity_off(priv);
1065         ipw_led_link_off(priv);
1066 }
1067
1068 static void ipw_led_link_up(struct ipw_priv *priv)
1069 {
1070         /* Set the Link Led on for all nic types */
1071         ipw_led_link_on(priv);
1072 }
1073
1074 static void ipw_led_link_down(struct ipw_priv *priv)
1075 {
1076         ipw_led_activity_off(priv);
1077         ipw_led_link_off(priv);
1078
1079         if (priv->status & STATUS_RF_KILL_MASK)
1080                 ipw_led_radio_off(priv);
1081 }
1082
1083 static void ipw_led_init(struct ipw_priv *priv)
1084 {
1085         priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1086
1087         /* Set the default PINs for the link and activity leds */
1088         priv->led_activity_on = IPW_ACTIVITY_LED;
1089         priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1090
1091         priv->led_association_on = IPW_ASSOCIATED_LED;
1092         priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1093
1094         /* Set the default PINs for the OFDM leds */
1095         priv->led_ofdm_on = IPW_OFDM_LED;
1096         priv->led_ofdm_off = ~(IPW_OFDM_LED);
1097
1098         switch (priv->nic_type) {
1099         case EEPROM_NIC_TYPE_1:
1100                 /* In this NIC type, the LEDs are reversed.... */
1101                 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102                 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103                 priv->led_association_on = IPW_ACTIVITY_LED;
1104                 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1105
1106                 if (!(priv->config & CFG_NO_LED))
1107                         ipw_led_band_on(priv);
1108
1109                 /* And we don't blink link LEDs for this nic, so
1110                  * just return here */
1111                 return;
1112
1113         case EEPROM_NIC_TYPE_3:
1114         case EEPROM_NIC_TYPE_2:
1115         case EEPROM_NIC_TYPE_4:
1116         case EEPROM_NIC_TYPE_0:
1117                 break;
1118
1119         default:
1120                 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121                                priv->nic_type);
1122                 priv->nic_type = EEPROM_NIC_TYPE_0;
1123                 break;
1124         }
1125
1126         if (!(priv->config & CFG_NO_LED)) {
1127                 if (priv->status & STATUS_ASSOCIATED)
1128                         ipw_led_link_on(priv);
1129                 else
1130                         ipw_led_link_off(priv);
1131         }
1132 }
1133
1134 static void ipw_led_shutdown(struct ipw_priv *priv)
1135 {
1136         ipw_led_activity_off(priv);
1137         ipw_led_link_off(priv);
1138         ipw_led_band_off(priv);
1139         cancel_delayed_work(&priv->led_link_on);
1140         cancel_delayed_work(&priv->led_link_off);
1141         cancel_delayed_work(&priv->led_act_off);
1142 }
1143
1144 /*
1145  * The following adds a new attribute to the sysfs representation
1146  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147  * used for controling the debug level.
1148  *
1149  * See the level definitions in ipw for details.
1150  */
1151 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1152 {
1153         return sprintf(buf, "0x%08X\n", ipw_debug_level);
1154 }
1155
1156 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157                                  size_t count)
1158 {
1159         char *p = (char *)buf;
1160         u32 val;
1161
1162         if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163                 p++;
1164                 if (p[0] == 'x' || p[0] == 'X')
1165                         p++;
1166                 val = simple_strtoul(p, &p, 16);
1167         } else
1168                 val = simple_strtoul(p, &p, 10);
1169         if (p == buf)
1170                 printk(KERN_INFO DRV_NAME
1171                        ": %s is not in hex or decimal form.\n", buf);
1172         else
1173                 ipw_debug_level = val;
1174
1175         return strnlen(buf, count);
1176 }
1177
1178 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179                    show_debug_level, store_debug_level);
1180
1181 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1182 {
1183         /* length = 1st dword in log */
1184         return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1185 }
1186
1187 static void ipw_capture_event_log(struct ipw_priv *priv,
1188                                   u32 log_len, struct ipw_event *log)
1189 {
1190         u32 base;
1191
1192         if (log_len) {
1193                 base = ipw_read32(priv, IPW_EVENT_LOG);
1194                 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195                                   (u8 *) log, sizeof(*log) * log_len);
1196         }
1197 }
1198
1199 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1200 {
1201         struct ipw_fw_error *error;
1202         u32 log_len = ipw_get_event_log_len(priv);
1203         u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204         u32 elem_len = ipw_read_reg32(priv, base);
1205
1206         error = kmalloc(sizeof(*error) +
1207                         sizeof(*error->elem) * elem_len +
1208                         sizeof(*error->log) * log_len, GFP_ATOMIC);
1209         if (!error) {
1210                 IPW_ERROR("Memory allocation for firmware error log "
1211                           "failed.\n");
1212                 return NULL;
1213         }
1214         error->jiffies = jiffies;
1215         error->status = priv->status;
1216         error->config = priv->config;
1217         error->elem_len = elem_len;
1218         error->log_len = log_len;
1219         error->elem = (struct ipw_error_elem *)error->payload;
1220         error->log = (struct ipw_event *)(error->elem + elem_len);
1221
1222         ipw_capture_event_log(priv, log_len, error->log);
1223
1224         if (elem_len)
1225                 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226                                   sizeof(*error->elem) * elem_len);
1227
1228         return error;
1229 }
1230
1231 static ssize_t show_event_log(struct device *d,
1232                               struct device_attribute *attr, char *buf)
1233 {
1234         struct ipw_priv *priv = dev_get_drvdata(d);
1235         u32 log_len = ipw_get_event_log_len(priv);
1236         struct ipw_event log[log_len];
1237         u32 len = 0, i;
1238
1239         ipw_capture_event_log(priv, log_len, log);
1240
1241         len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1242         for (i = 0; i < log_len; i++)
1243                 len += snprintf(buf + len, PAGE_SIZE - len,
1244                                 "\n%08X%08X%08X",
1245                                 log[i].time, log[i].event, log[i].data);
1246         len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1247         return len;
1248 }
1249
1250 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1251
1252 static ssize_t show_error(struct device *d,
1253                           struct device_attribute *attr, char *buf)
1254 {
1255         struct ipw_priv *priv = dev_get_drvdata(d);
1256         u32 len = 0, i;
1257         if (!priv->error)
1258                 return 0;
1259         len += snprintf(buf + len, PAGE_SIZE - len,
1260                         "%08lX%08X%08X%08X",
1261                         priv->error->jiffies,
1262                         priv->error->status,
1263                         priv->error->config, priv->error->elem_len);
1264         for (i = 0; i < priv->error->elem_len; i++)
1265                 len += snprintf(buf + len, PAGE_SIZE - len,
1266                                 "\n%08X%08X%08X%08X%08X%08X%08X",
1267                                 priv->error->elem[i].time,
1268                                 priv->error->elem[i].desc,
1269                                 priv->error->elem[i].blink1,
1270                                 priv->error->elem[i].blink2,
1271                                 priv->error->elem[i].link1,
1272                                 priv->error->elem[i].link2,
1273                                 priv->error->elem[i].data);
1274
1275         len += snprintf(buf + len, PAGE_SIZE - len,
1276                         "\n%08X", priv->error->log_len);
1277         for (i = 0; i < priv->error->log_len; i++)
1278                 len += snprintf(buf + len, PAGE_SIZE - len,
1279                                 "\n%08X%08X%08X",
1280                                 priv->error->log[i].time,
1281                                 priv->error->log[i].event,
1282                                 priv->error->log[i].data);
1283         len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1284         return len;
1285 }
1286
1287 static ssize_t clear_error(struct device *d,
1288                            struct device_attribute *attr,
1289                            const char *buf, size_t count)
1290 {
1291         struct ipw_priv *priv = dev_get_drvdata(d);
1292
1293         kfree(priv->error);
1294         priv->error = NULL;
1295         return count;
1296 }
1297
1298 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1299
1300 static ssize_t show_cmd_log(struct device *d,
1301                             struct device_attribute *attr, char *buf)
1302 {
1303         struct ipw_priv *priv = dev_get_drvdata(d);
1304         u32 len = 0, i;
1305         if (!priv->cmdlog)
1306                 return 0;
1307         for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1308              (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1309              i = (i + 1) % priv->cmdlog_len) {
1310                 len +=
1311                     snprintf(buf + len, PAGE_SIZE - len,
1312                              "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1313                              priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1314                              priv->cmdlog[i].cmd.len);
1315                 len +=
1316                     snprintk_buf(buf + len, PAGE_SIZE - len,
1317                                  (u8 *) priv->cmdlog[i].cmd.param,
1318                                  priv->cmdlog[i].cmd.len);
1319                 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1320         }
1321         len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1322         return len;
1323 }
1324
1325 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1326
1327 #ifdef CONFIG_IPW2200_PROMISCUOUS
1328 static void ipw_prom_free(struct ipw_priv *priv);
1329 static int ipw_prom_alloc(struct ipw_priv *priv);
1330 static ssize_t store_rtap_iface(struct device *d,
1331                          struct device_attribute *attr,
1332                          const char *buf, size_t count)
1333 {
1334         struct ipw_priv *priv = dev_get_drvdata(d);
1335         int rc = 0;
1336
1337         if (count < 1)
1338                 return -EINVAL;
1339
1340         switch (buf[0]) {
1341         case '0':
1342                 if (!rtap_iface)
1343                         return count;
1344
1345                 if (netif_running(priv->prom_net_dev)) {
1346                         IPW_WARNING("Interface is up.  Cannot unregister.\n");
1347                         return count;
1348                 }
1349
1350                 ipw_prom_free(priv);
1351                 rtap_iface = 0;
1352                 break;
1353
1354         case '1':
1355                 if (rtap_iface)
1356                         return count;
1357
1358                 rc = ipw_prom_alloc(priv);
1359                 if (!rc)
1360                         rtap_iface = 1;
1361                 break;
1362
1363         default:
1364                 return -EINVAL;
1365         }
1366
1367         if (rc) {
1368                 IPW_ERROR("Failed to register promiscuous network "
1369                           "device (error %d).\n", rc);
1370         }
1371
1372         return count;
1373 }
1374
1375 static ssize_t show_rtap_iface(struct device *d,
1376                         struct device_attribute *attr,
1377                         char *buf)
1378 {
1379         struct ipw_priv *priv = dev_get_drvdata(d);
1380         if (rtap_iface)
1381                 return sprintf(buf, "%s", priv->prom_net_dev->name);
1382         else {
1383                 buf[0] = '-';
1384                 buf[1] = '1';
1385                 buf[2] = '\0';
1386                 return 3;
1387         }
1388 }
1389
1390 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1391                    store_rtap_iface);
1392
1393 static ssize_t store_rtap_filter(struct device *d,
1394                          struct device_attribute *attr,
1395                          const char *buf, size_t count)
1396 {
1397         struct ipw_priv *priv = dev_get_drvdata(d);
1398
1399         if (!priv->prom_priv) {
1400                 IPW_ERROR("Attempting to set filter without "
1401                           "rtap_iface enabled.\n");
1402                 return -EPERM;
1403         }
1404
1405         priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1406
1407         IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1408                        BIT_ARG16(priv->prom_priv->filter));
1409
1410         return count;
1411 }
1412
1413 static ssize_t show_rtap_filter(struct device *d,
1414                         struct device_attribute *attr,
1415                         char *buf)
1416 {
1417         struct ipw_priv *priv = dev_get_drvdata(d);
1418         return sprintf(buf, "0x%04X",
1419                        priv->prom_priv ? priv->prom_priv->filter : 0);
1420 }
1421
1422 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1423                    store_rtap_filter);
1424 #endif
1425
1426 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1427                              char *buf)
1428 {
1429         struct ipw_priv *priv = dev_get_drvdata(d);
1430         return sprintf(buf, "%d\n", priv->ieee->scan_age);
1431 }
1432
1433 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1434                               const char *buf, size_t count)
1435 {
1436         struct ipw_priv *priv = dev_get_drvdata(d);
1437         struct net_device *dev = priv->net_dev;
1438         char buffer[] = "00000000";
1439         unsigned long len =
1440             (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1441         unsigned long val;
1442         char *p = buffer;
1443
1444         IPW_DEBUG_INFO("enter\n");
1445
1446         strncpy(buffer, buf, len);
1447         buffer[len] = 0;
1448
1449         if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1450                 p++;
1451                 if (p[0] == 'x' || p[0] == 'X')
1452                         p++;
1453                 val = simple_strtoul(p, &p, 16);
1454         } else
1455                 val = simple_strtoul(p, &p, 10);
1456         if (p == buffer) {
1457                 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1458         } else {
1459                 priv->ieee->scan_age = val;
1460                 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1461         }
1462
1463         IPW_DEBUG_INFO("exit\n");
1464         return len;
1465 }
1466
1467 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1468
1469 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1470                         char *buf)
1471 {
1472         struct ipw_priv *priv = dev_get_drvdata(d);
1473         return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1474 }
1475
1476 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1477                          const char *buf, size_t count)
1478 {
1479         struct ipw_priv *priv = dev_get_drvdata(d);
1480
1481         IPW_DEBUG_INFO("enter\n");
1482
1483         if (count == 0)
1484                 return 0;
1485
1486         if (*buf == 0) {
1487                 IPW_DEBUG_LED("Disabling LED control.\n");
1488                 priv->config |= CFG_NO_LED;
1489                 ipw_led_shutdown(priv);
1490         } else {
1491                 IPW_DEBUG_LED("Enabling LED control.\n");
1492                 priv->config &= ~CFG_NO_LED;
1493                 ipw_led_init(priv);
1494         }
1495
1496         IPW_DEBUG_INFO("exit\n");
1497         return count;
1498 }
1499
1500 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1501
1502 static ssize_t show_status(struct device *d,
1503                            struct device_attribute *attr, char *buf)
1504 {
1505         struct ipw_priv *p = d->driver_data;
1506         return sprintf(buf, "0x%08x\n", (int)p->status);
1507 }
1508
1509 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1510
1511 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1512                         char *buf)
1513 {
1514         struct ipw_priv *p = d->driver_data;
1515         return sprintf(buf, "0x%08x\n", (int)p->config);
1516 }
1517
1518 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1519
1520 static ssize_t show_nic_type(struct device *d,
1521                              struct device_attribute *attr, char *buf)
1522 {
1523         struct ipw_priv *priv = d->driver_data;
1524         return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1525 }
1526
1527 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1528
1529 static ssize_t show_ucode_version(struct device *d,
1530                                   struct device_attribute *attr, char *buf)
1531 {
1532         u32 len = sizeof(u32), tmp = 0;
1533         struct ipw_priv *p = d->driver_data;
1534
1535         if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1536                 return 0;
1537
1538         return sprintf(buf, "0x%08x\n", tmp);
1539 }
1540
1541 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1542
1543 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1544                         char *buf)
1545 {
1546         u32 len = sizeof(u32), tmp = 0;
1547         struct ipw_priv *p = d->driver_data;
1548
1549         if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1550                 return 0;
1551
1552         return sprintf(buf, "0x%08x\n", tmp);
1553 }
1554
1555 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1556
1557 /*
1558  * Add a device attribute to view/control the delay between eeprom
1559  * operations.
1560  */
1561 static ssize_t show_eeprom_delay(struct device *d,
1562                                  struct device_attribute *attr, char *buf)
1563 {
1564         int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1565         return sprintf(buf, "%i\n", n);
1566 }
1567 static ssize_t store_eeprom_delay(struct device *d,
1568                                   struct device_attribute *attr,
1569                                   const char *buf, size_t count)
1570 {
1571         struct ipw_priv *p = d->driver_data;
1572         sscanf(buf, "%i", &p->eeprom_delay);
1573         return strnlen(buf, count);
1574 }
1575
1576 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1577                    show_eeprom_delay, store_eeprom_delay);
1578
1579 static ssize_t show_command_event_reg(struct device *d,
1580                                       struct device_attribute *attr, char *buf)
1581 {
1582         u32 reg = 0;
1583         struct ipw_priv *p = d->driver_data;
1584
1585         reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1586         return sprintf(buf, "0x%08x\n", reg);
1587 }
1588 static ssize_t store_command_event_reg(struct device *d,
1589                                        struct device_attribute *attr,
1590                                        const char *buf, size_t count)
1591 {
1592         u32 reg;
1593         struct ipw_priv *p = d->driver_data;
1594
1595         sscanf(buf, "%x", &reg);
1596         ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1597         return strnlen(buf, count);
1598 }
1599
1600 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1601                    show_command_event_reg, store_command_event_reg);
1602
1603 static ssize_t show_mem_gpio_reg(struct device *d,
1604                                  struct device_attribute *attr, char *buf)
1605 {
1606         u32 reg = 0;
1607         struct ipw_priv *p = d->driver_data;
1608
1609         reg = ipw_read_reg32(p, 0x301100);
1610         return sprintf(buf, "0x%08x\n", reg);
1611 }
1612 static ssize_t store_mem_gpio_reg(struct device *d,
1613                                   struct device_attribute *attr,
1614                                   const char *buf, size_t count)
1615 {
1616         u32 reg;
1617         struct ipw_priv *p = d->driver_data;
1618
1619         sscanf(buf, "%x", &reg);
1620         ipw_write_reg32(p, 0x301100, reg);
1621         return strnlen(buf, count);
1622 }
1623
1624 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1625                    show_mem_gpio_reg, store_mem_gpio_reg);
1626
1627 static ssize_t show_indirect_dword(struct device *d,
1628                                    struct device_attribute *attr, char *buf)
1629 {
1630         u32 reg = 0;
1631         struct ipw_priv *priv = d->driver_data;
1632
1633         if (priv->status & STATUS_INDIRECT_DWORD)
1634                 reg = ipw_read_reg32(priv, priv->indirect_dword);
1635         else
1636                 reg = 0;
1637
1638         return sprintf(buf, "0x%08x\n", reg);
1639 }
1640 static ssize_t store_indirect_dword(struct device *d,
1641                                     struct device_attribute *attr,
1642                                     const char *buf, size_t count)
1643 {
1644         struct ipw_priv *priv = d->driver_data;
1645
1646         sscanf(buf, "%x", &priv->indirect_dword);
1647         priv->status |= STATUS_INDIRECT_DWORD;
1648         return strnlen(buf, count);
1649 }
1650
1651 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1652                    show_indirect_dword, store_indirect_dword);
1653
1654 static ssize_t show_indirect_byte(struct device *d,
1655                                   struct device_attribute *attr, char *buf)
1656 {
1657         u8 reg = 0;
1658         struct ipw_priv *priv = d->driver_data;
1659
1660         if (priv->status & STATUS_INDIRECT_BYTE)
1661                 reg = ipw_read_reg8(priv, priv->indirect_byte);
1662         else
1663                 reg = 0;
1664
1665         return sprintf(buf, "0x%02x\n", reg);
1666 }
1667 static ssize_t store_indirect_byte(struct device *d,
1668                                    struct device_attribute *attr,
1669                                    const char *buf, size_t count)
1670 {
1671         struct ipw_priv *priv = d->driver_data;
1672
1673         sscanf(buf, "%x", &priv->indirect_byte);
1674         priv->status |= STATUS_INDIRECT_BYTE;
1675         return strnlen(buf, count);
1676 }
1677
1678 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1679                    show_indirect_byte, store_indirect_byte);
1680
1681 static ssize_t show_direct_dword(struct device *d,
1682                                  struct device_attribute *attr, char *buf)
1683 {
1684         u32 reg = 0;
1685         struct ipw_priv *priv = d->driver_data;
1686
1687         if (priv->status & STATUS_DIRECT_DWORD)
1688                 reg = ipw_read32(priv, priv->direct_dword);
1689         else
1690                 reg = 0;
1691
1692         return sprintf(buf, "0x%08x\n", reg);
1693 }
1694 static ssize_t store_direct_dword(struct device *d,
1695                                   struct device_attribute *attr,
1696                                   const char *buf, size_t count)
1697 {
1698         struct ipw_priv *priv = d->driver_data;
1699
1700         sscanf(buf, "%x", &priv->direct_dword);
1701         priv->status |= STATUS_DIRECT_DWORD;
1702         return strnlen(buf, count);
1703 }
1704
1705 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1706                    show_direct_dword, store_direct_dword);
1707
1708 static int rf_kill_active(struct ipw_priv *priv)
1709 {
1710         if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1711                 priv->status |= STATUS_RF_KILL_HW;
1712         else
1713                 priv->status &= ~STATUS_RF_KILL_HW;
1714
1715         return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1716 }
1717
1718 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1719                             char *buf)
1720 {
1721         /* 0 - RF kill not enabled
1722            1 - SW based RF kill active (sysfs)
1723            2 - HW based RF kill active
1724            3 - Both HW and SW baed RF kill active */
1725         struct ipw_priv *priv = d->driver_data;
1726         int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1727             (rf_kill_active(priv) ? 0x2 : 0x0);
1728         return sprintf(buf, "%i\n", val);
1729 }
1730
1731 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1732 {
1733         if ((disable_radio ? 1 : 0) ==
1734             ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1735                 return 0;
1736
1737         IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1738                           disable_radio ? "OFF" : "ON");
1739
1740         if (disable_radio) {
1741                 priv->status |= STATUS_RF_KILL_SW;
1742
1743                 if (priv->workqueue)
1744                         cancel_delayed_work(&priv->request_scan);
1745                 queue_work(priv->workqueue, &priv->down);
1746         } else {
1747                 priv->status &= ~STATUS_RF_KILL_SW;
1748                 if (rf_kill_active(priv)) {
1749                         IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1750                                           "disabled by HW switch\n");
1751                         /* Make sure the RF_KILL check timer is running */
1752                         cancel_delayed_work(&priv->rf_kill);
1753                         queue_delayed_work(priv->workqueue, &priv->rf_kill,
1754                                            2 * HZ);
1755                 } else
1756                         queue_work(priv->workqueue, &priv->up);
1757         }
1758
1759         return 1;
1760 }
1761
1762 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1763                              const char *buf, size_t count)
1764 {
1765         struct ipw_priv *priv = d->driver_data;
1766
1767         ipw_radio_kill_sw(priv, buf[0] == '1');
1768
1769         return count;
1770 }
1771
1772 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1773
1774 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1775                                char *buf)
1776 {
1777         struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1778         int pos = 0, len = 0;
1779         if (priv->config & CFG_SPEED_SCAN) {
1780                 while (priv->speed_scan[pos] != 0)
1781                         len += sprintf(&buf[len], "%d ",
1782                                        priv->speed_scan[pos++]);
1783                 return len + sprintf(&buf[len], "\n");
1784         }
1785
1786         return sprintf(buf, "0\n");
1787 }
1788
1789 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1790                                 const char *buf, size_t count)
1791 {
1792         struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793         int channel, pos = 0;
1794         const char *p = buf;
1795
1796         /* list of space separated channels to scan, optionally ending with 0 */
1797         while ((channel = simple_strtol(p, NULL, 0))) {
1798                 if (pos == MAX_SPEED_SCAN - 1) {
1799                         priv->speed_scan[pos] = 0;
1800                         break;
1801                 }
1802
1803                 if (ieee80211_is_valid_channel(priv->ieee, channel))
1804                         priv->speed_scan[pos++] = channel;
1805                 else
1806                         IPW_WARNING("Skipping invalid channel request: %d\n",
1807                                     channel);
1808                 p = strchr(p, ' ');
1809                 if (!p)
1810                         break;
1811                 while (*p == ' ' || *p == '\t')
1812                         p++;
1813         }
1814
1815         if (pos == 0)
1816                 priv->config &= ~CFG_SPEED_SCAN;
1817         else {
1818                 priv->speed_scan_pos = 0;
1819                 priv->config |= CFG_SPEED_SCAN;
1820         }
1821
1822         return count;
1823 }
1824
1825 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1826                    store_speed_scan);
1827
1828 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1829                               char *buf)
1830 {
1831         struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1832         return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1833 }
1834
1835 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1836                                const char *buf, size_t count)
1837 {
1838         struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1839         if (buf[0] == '1')
1840                 priv->config |= CFG_NET_STATS;
1841         else
1842                 priv->config &= ~CFG_NET_STATS;
1843
1844         return count;
1845 }
1846
1847 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1848                    show_net_stats, store_net_stats);
1849
1850 static void notify_wx_assoc_event(struct ipw_priv *priv)
1851 {
1852         union iwreq_data wrqu;
1853         wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1854         if (priv->status & STATUS_ASSOCIATED)
1855                 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1856         else
1857                 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1858         wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1859 }
1860
1861 static void ipw_irq_tasklet(struct ipw_priv *priv)
1862 {
1863         u32 inta, inta_mask, handled = 0;
1864         unsigned long flags;
1865         int rc = 0;
1866
1867         spin_lock_irqsave(&priv->irq_lock, flags);
1868
1869         inta = ipw_read32(priv, IPW_INTA_RW);
1870         inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1871         inta &= (IPW_INTA_MASK_ALL & inta_mask);
1872
1873         /* Add any cached INTA values that need to be handled */
1874         inta |= priv->isr_inta;
1875
1876         spin_unlock_irqrestore(&priv->irq_lock, flags);
1877
1878         spin_lock_irqsave(&priv->lock, flags);
1879
1880         /* handle all the justifications for the interrupt */
1881         if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1882                 ipw_rx(priv);
1883                 handled |= IPW_INTA_BIT_RX_TRANSFER;
1884         }
1885
1886         if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1887                 IPW_DEBUG_HC("Command completed.\n");
1888                 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1889                 priv->status &= ~STATUS_HCMD_ACTIVE;
1890                 wake_up_interruptible(&priv->wait_command_queue);
1891                 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1892         }
1893
1894         if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1895                 IPW_DEBUG_TX("TX_QUEUE_1\n");
1896                 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1897                 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1898         }
1899
1900         if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1901                 IPW_DEBUG_TX("TX_QUEUE_2\n");
1902                 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1903                 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1904         }
1905
1906         if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1907                 IPW_DEBUG_TX("TX_QUEUE_3\n");
1908                 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1909                 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1910         }
1911
1912         if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1913                 IPW_DEBUG_TX("TX_QUEUE_4\n");
1914                 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1915                 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1916         }
1917
1918         if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1919                 IPW_WARNING("STATUS_CHANGE\n");
1920                 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1921         }
1922
1923         if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1924                 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1925                 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1926         }
1927
1928         if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1929                 IPW_WARNING("HOST_CMD_DONE\n");
1930                 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1931         }
1932
1933         if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1934                 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1935                 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1936         }
1937
1938         if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1939                 IPW_WARNING("PHY_OFF_DONE\n");
1940                 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1941         }
1942
1943         if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
1944                 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1945                 priv->status |= STATUS_RF_KILL_HW;
1946                 wake_up_interruptible(&priv->wait_command_queue);
1947                 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1948                 cancel_delayed_work(&priv->request_scan);
1949                 schedule_work(&priv->link_down);
1950                 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1951                 handled |= IPW_INTA_BIT_RF_KILL_DONE;
1952         }
1953
1954         if (inta & IPW_INTA_BIT_FATAL_ERROR) {
1955                 IPW_WARNING("Firmware error detected.  Restarting.\n");
1956                 if (priv->error) {
1957                         IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1958                         if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1959                                 struct ipw_fw_error *error =
1960                                     ipw_alloc_error_log(priv);
1961                                 ipw_dump_error_log(priv, error);
1962                                 kfree(error);
1963                         }
1964                 } else {
1965                         priv->error = ipw_alloc_error_log(priv);
1966                         if (priv->error)
1967                                 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
1968                         else
1969                                 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1970                                              "log.\n");
1971                         if (ipw_debug_level & IPW_DL_FW_ERRORS)
1972                                 ipw_dump_error_log(priv, priv->error);
1973                 }
1974
1975                 /* XXX: If hardware encryption is for WPA/WPA2,
1976                  * we have to notify the supplicant. */
1977                 if (priv->ieee->sec.encrypt) {
1978                         priv->status &= ~STATUS_ASSOCIATED;
1979                         notify_wx_assoc_event(priv);
1980                 }
1981
1982                 /* Keep the restart process from trying to send host
1983                  * commands by clearing the INIT status bit */
1984                 priv->status &= ~STATUS_INIT;
1985
1986                 /* Cancel currently queued command. */
1987                 priv->status &= ~STATUS_HCMD_ACTIVE;
1988                 wake_up_interruptible(&priv->wait_command_queue);
1989
1990                 queue_work(priv->workqueue, &priv->adapter_restart);
1991                 handled |= IPW_INTA_BIT_FATAL_ERROR;
1992         }
1993
1994         if (inta & IPW_INTA_BIT_PARITY_ERROR) {
1995                 IPW_ERROR("Parity error\n");
1996                 handled |= IPW_INTA_BIT_PARITY_ERROR;
1997         }
1998
1999         if (handled != inta) {
2000                 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2001         }
2002
2003         spin_unlock_irqrestore(&priv->lock, flags);
2004
2005         /* enable all interrupts */
2006         ipw_enable_interrupts(priv);
2007 }
2008
2009 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2010 static char *get_cmd_string(u8 cmd)
2011 {
2012         switch (cmd) {
2013                 IPW_CMD(HOST_COMPLETE);
2014                 IPW_CMD(POWER_DOWN);
2015                 IPW_CMD(SYSTEM_CONFIG);
2016                 IPW_CMD(MULTICAST_ADDRESS);
2017                 IPW_CMD(SSID);
2018                 IPW_CMD(ADAPTER_ADDRESS);
2019                 IPW_CMD(PORT_TYPE);
2020                 IPW_CMD(RTS_THRESHOLD);
2021                 IPW_CMD(FRAG_THRESHOLD);
2022                 IPW_CMD(POWER_MODE);
2023                 IPW_CMD(WEP_KEY);
2024                 IPW_CMD(TGI_TX_KEY);
2025                 IPW_CMD(SCAN_REQUEST);
2026                 IPW_CMD(SCAN_REQUEST_EXT);
2027                 IPW_CMD(ASSOCIATE);
2028                 IPW_CMD(SUPPORTED_RATES);
2029                 IPW_CMD(SCAN_ABORT);
2030                 IPW_CMD(TX_FLUSH);
2031                 IPW_CMD(QOS_PARAMETERS);
2032                 IPW_CMD(DINO_CONFIG);
2033                 IPW_CMD(RSN_CAPABILITIES);
2034                 IPW_CMD(RX_KEY);
2035                 IPW_CMD(CARD_DISABLE);
2036                 IPW_CMD(SEED_NUMBER);
2037                 IPW_CMD(TX_POWER);
2038                 IPW_CMD(COUNTRY_INFO);
2039                 IPW_CMD(AIRONET_INFO);
2040                 IPW_CMD(AP_TX_POWER);
2041                 IPW_CMD(CCKM_INFO);
2042                 IPW_CMD(CCX_VER_INFO);
2043                 IPW_CMD(SET_CALIBRATION);
2044                 IPW_CMD(SENSITIVITY_CALIB);
2045                 IPW_CMD(RETRY_LIMIT);
2046                 IPW_CMD(IPW_PRE_POWER_DOWN);
2047                 IPW_CMD(VAP_BEACON_TEMPLATE);
2048                 IPW_CMD(VAP_DTIM_PERIOD);
2049                 IPW_CMD(EXT_SUPPORTED_RATES);
2050                 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2051                 IPW_CMD(VAP_QUIET_INTERVALS);
2052                 IPW_CMD(VAP_CHANNEL_SWITCH);
2053                 IPW_CMD(VAP_MANDATORY_CHANNELS);
2054                 IPW_CMD(VAP_CELL_PWR_LIMIT);
2055                 IPW_CMD(VAP_CF_PARAM_SET);
2056                 IPW_CMD(VAP_SET_BEACONING_STATE);
2057                 IPW_CMD(MEASUREMENT);
2058                 IPW_CMD(POWER_CAPABILITY);
2059                 IPW_CMD(SUPPORTED_CHANNELS);
2060                 IPW_CMD(TPC_REPORT);
2061                 IPW_CMD(WME_INFO);
2062                 IPW_CMD(PRODUCTION_COMMAND);
2063         default:
2064                 return "UNKNOWN";
2065         }
2066 }
2067
2068 #define HOST_COMPLETE_TIMEOUT HZ
2069
2070 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2071 {
2072         int rc = 0;
2073         unsigned long flags;
2074
2075         spin_lock_irqsave(&priv->lock, flags);
2076         if (priv->status & STATUS_HCMD_ACTIVE) {
2077                 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2078                           get_cmd_string(cmd->cmd));
2079                 spin_unlock_irqrestore(&priv->lock, flags);
2080                 return -EAGAIN;
2081         }
2082
2083         priv->status |= STATUS_HCMD_ACTIVE;
2084
2085         if (priv->cmdlog) {
2086                 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2087                 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2088                 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2089                 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2090                        cmd->len);
2091                 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2092         }
2093
2094         IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2095                      get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2096                      priv->status);
2097
2098 #ifndef DEBUG_CMD_WEP_KEY
2099         if (cmd->cmd == IPW_CMD_WEP_KEY)
2100                 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2101         else
2102 #endif
2103                 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2104
2105         rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2106         if (rc) {
2107                 priv->status &= ~STATUS_HCMD_ACTIVE;
2108                 IPW_ERROR("Failed to send %s: Reason %d\n",
2109                           get_cmd_string(cmd->cmd), rc);
2110                 spin_unlock_irqrestore(&priv->lock, flags);
2111                 goto exit;
2112         }
2113         spin_unlock_irqrestore(&priv->lock, flags);
2114
2115         rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2116                                               !(priv->
2117                                                 status & STATUS_HCMD_ACTIVE),
2118                                               HOST_COMPLETE_TIMEOUT);
2119         if (rc == 0) {
2120                 spin_lock_irqsave(&priv->lock, flags);
2121                 if (priv->status & STATUS_HCMD_ACTIVE) {
2122                         IPW_ERROR("Failed to send %s: Command timed out.\n",
2123                                   get_cmd_string(cmd->cmd));
2124                         priv->status &= ~STATUS_HCMD_ACTIVE;
2125                         spin_unlock_irqrestore(&priv->lock, flags);
2126                         rc = -EIO;
2127                         goto exit;
2128                 }
2129                 spin_unlock_irqrestore(&priv->lock, flags);
2130         } else
2131                 rc = 0;
2132
2133         if (priv->status & STATUS_RF_KILL_HW) {
2134                 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2135                           get_cmd_string(cmd->cmd));
2136                 rc = -EIO;
2137                 goto exit;
2138         }
2139
2140       exit:
2141         if (priv->cmdlog) {
2142                 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2143                 priv->cmdlog_pos %= priv->cmdlog_len;
2144         }
2145         return rc;
2146 }
2147
2148 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2149 {
2150         struct host_cmd cmd = {
2151                 .cmd = command,
2152         };
2153
2154         return __ipw_send_cmd(priv, &cmd);
2155 }
2156
2157 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2158                             void *data)
2159 {
2160         struct host_cmd cmd = {
2161                 .cmd = command,
2162                 .len = len,
2163                 .param = data,
2164         };
2165
2166         return __ipw_send_cmd(priv, &cmd);
2167 }
2168
2169 static int ipw_send_host_complete(struct ipw_priv *priv)
2170 {
2171         if (!priv) {
2172                 IPW_ERROR("Invalid args\n");
2173                 return -1;
2174         }
2175
2176         return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2177 }
2178
2179 static int ipw_send_system_config(struct ipw_priv *priv)
2180 {
2181         return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2182                                 sizeof(priv->sys_config),
2183                                 &priv->sys_config);
2184 }
2185
2186 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2187 {
2188         if (!priv || !ssid) {
2189                 IPW_ERROR("Invalid args\n");
2190                 return -1;
2191         }
2192
2193         return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2194                                 ssid);
2195 }
2196
2197 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2198 {
2199         if (!priv || !mac) {
2200                 IPW_ERROR("Invalid args\n");
2201                 return -1;
2202         }
2203
2204         IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
2205                        priv->net_dev->name, MAC_ARG(mac));
2206
2207         return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2208 }
2209
2210 /*
2211  * NOTE: This must be executed from our workqueue as it results in udelay
2212  * being called which may corrupt the keyboard if executed on default
2213  * workqueue
2214  */
2215 static void ipw_adapter_restart(void *adapter)
2216 {
2217         struct ipw_priv *priv = adapter;
2218
2219         if (priv->status & STATUS_RF_KILL_MASK)
2220                 return;
2221
2222         ipw_down(priv);
2223
2224         if (priv->assoc_network &&
2225             (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2226                 ipw_remove_current_network(priv);
2227
2228         if (ipw_up(priv)) {
2229                 IPW_ERROR("Failed to up device\n");
2230                 return;
2231         }
2232 }
2233
2234 static void ipw_bg_adapter_restart(struct work_struct *work)
2235 {
2236         struct ipw_priv *priv =
2237                 container_of(work, struct ipw_priv, adapter_restart);
2238         mutex_lock(&priv->mutex);
2239         ipw_adapter_restart(priv);
2240         mutex_unlock(&priv->mutex);
2241 }
2242
2243 #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2244
2245 static void ipw_scan_check(void *data)
2246 {
2247         struct ipw_priv *priv = data;
2248         if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2249                 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2250                                "adapter after (%dms).\n",
2251                                jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2252                 queue_work(priv->workqueue, &priv->adapter_restart);
2253         }
2254 }
2255
2256 static void ipw_bg_scan_check(struct work_struct *work)
2257 {
2258         struct ipw_priv *priv =
2259                 container_of(work, struct ipw_priv, scan_check.work);
2260         mutex_lock(&priv->mutex);
2261         ipw_scan_check(priv);
2262         mutex_unlock(&priv->mutex);
2263 }
2264
2265 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2266                                      struct ipw_scan_request_ext *request)
2267 {
2268         return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2269                                 sizeof(*request), request);
2270 }
2271
2272 static int ipw_send_scan_abort(struct ipw_priv *priv)
2273 {
2274         if (!priv) {
2275                 IPW_ERROR("Invalid args\n");
2276                 return -1;
2277         }
2278
2279         return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2280 }
2281
2282 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2283 {
2284         struct ipw_sensitivity_calib calib = {
2285                 .beacon_rssi_raw = cpu_to_le16(sens),
2286         };
2287
2288         return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2289                                 &calib);
2290 }
2291
2292 static int ipw_send_associate(struct ipw_priv *priv,
2293                               struct ipw_associate *associate)
2294 {
2295         struct ipw_associate tmp_associate;
2296
2297         if (!priv || !associate) {
2298                 IPW_ERROR("Invalid args\n");
2299                 return -1;
2300         }
2301
2302         memcpy(&tmp_associate, associate, sizeof(*associate));
2303         tmp_associate.policy_support =
2304             cpu_to_le16(tmp_associate.policy_support);
2305         tmp_associate.assoc_tsf_msw = cpu_to_le32(tmp_associate.assoc_tsf_msw);
2306         tmp_associate.assoc_tsf_lsw = cpu_to_le32(tmp_associate.assoc_tsf_lsw);
2307         tmp_associate.capability = cpu_to_le16(tmp_associate.capability);
2308         tmp_associate.listen_interval =
2309             cpu_to_le16(tmp_associate.listen_interval);
2310         tmp_associate.beacon_interval =
2311             cpu_to_le16(tmp_associate.beacon_interval);
2312         tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
2313
2314         return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
2315                                 &tmp_associate);
2316 }
2317
2318 static int ipw_send_supported_rates(struct ipw_priv *priv,
2319                                     struct ipw_supported_rates *rates)
2320 {
2321         if (!priv || !rates) {
2322                 IPW_ERROR("Invalid args\n");
2323                 return -1;
2324         }
2325
2326         return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2327                                 rates);
2328 }
2329
2330 static int ipw_set_random_seed(struct ipw_priv *priv)
2331 {
2332         u32 val;
2333
2334         if (!priv) {
2335                 IPW_ERROR("Invalid args\n");
2336                 return -1;
2337         }
2338
2339         get_random_bytes(&val, sizeof(val));
2340
2341         return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2342 }
2343
2344 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2345 {
2346         if (!priv) {
2347                 IPW_ERROR("Invalid args\n");
2348                 return -1;
2349         }
2350
2351         phy_off = cpu_to_le32(phy_off);
2352         return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2353                                 &phy_off);
2354 }
2355
2356 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2357 {
2358         if (!priv || !power) {
2359                 IPW_ERROR("Invalid args\n");
2360                 return -1;
2361         }
2362
2363         return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2364 }
2365
2366 static int ipw_set_tx_power(struct ipw_priv *priv)
2367 {
2368         const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2369         struct ipw_tx_power tx_power;
2370         s8 max_power;
2371         int i;
2372
2373         memset(&tx_power, 0, sizeof(tx_power));
2374
2375         /* configure device for 'G' band */
2376         tx_power.ieee_mode = IPW_G_MODE;
2377         tx_power.num_channels = geo->bg_channels;
2378         for (i = 0; i < geo->bg_channels; i++) {
2379                 max_power = geo->bg[i].max_power;
2380                 tx_power.channels_tx_power[i].channel_number =
2381                     geo->bg[i].channel;
2382                 tx_power.channels_tx_power[i].tx_power = max_power ?
2383                     min(max_power, priv->tx_power) : priv->tx_power;
2384         }
2385         if (ipw_send_tx_power(priv, &tx_power))
2386                 return -EIO;
2387
2388         /* configure device to also handle 'B' band */
2389         tx_power.ieee_mode = IPW_B_MODE;
2390         if (ipw_send_tx_power(priv, &tx_power))
2391                 return -EIO;
2392
2393         /* configure device to also handle 'A' band */
2394         if (priv->ieee->abg_true) {
2395                 tx_power.ieee_mode = IPW_A_MODE;
2396                 tx_power.num_channels = geo->a_channels;
2397                 for (i = 0; i < tx_power.num_channels; i++) {
2398                         max_power = geo->a[i].max_power;
2399                         tx_power.channels_tx_power[i].channel_number =
2400                             geo->a[i].channel;
2401                         tx_power.channels_tx_power[i].tx_power = max_power ?
2402                             min(max_power, priv->tx_power) : priv->tx_power;
2403                 }
2404                 if (ipw_send_tx_power(priv, &tx_power))
2405                         return -EIO;
2406         }
2407         return 0;
2408 }
2409
2410 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2411 {
2412         struct ipw_rts_threshold rts_threshold = {
2413                 .rts_threshold = cpu_to_le16(rts),
2414         };
2415
2416         if (!priv) {
2417                 IPW_ERROR("Invalid args\n");
2418                 return -1;
2419         }
2420
2421         return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2422                                 sizeof(rts_threshold), &rts_threshold);
2423 }
2424
2425 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2426 {
2427         struct ipw_frag_threshold frag_threshold = {
2428                 .frag_threshold = cpu_to_le16(frag),
2429         };
2430
2431         if (!priv) {
2432                 IPW_ERROR("Invalid args\n");
2433                 return -1;
2434         }
2435
2436         return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2437                                 sizeof(frag_threshold), &frag_threshold);
2438 }
2439
2440 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2441 {
2442         u32 param;
2443
2444         if (!priv) {
2445                 IPW_ERROR("Invalid args\n");
2446                 return -1;
2447         }
2448
2449         /* If on battery, set to 3, if AC set to CAM, else user
2450          * level */
2451         switch (mode) {
2452         case IPW_POWER_BATTERY:
2453                 param = IPW_POWER_INDEX_3;
2454                 break;
2455         case IPW_POWER_AC:
2456                 param = IPW_POWER_MODE_CAM;
2457                 break;
2458         default:
2459                 param = mode;
2460                 break;
2461         }
2462
2463         param = cpu_to_le32(mode);
2464         return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2465                                 &param);
2466 }
2467
2468 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2469 {
2470         struct ipw_retry_limit retry_limit = {
2471                 .short_retry_limit = slimit,
2472                 .long_retry_limit = llimit
2473         };
2474
2475         if (!priv) {
2476                 IPW_ERROR("Invalid args\n");
2477                 return -1;
2478         }
2479
2480         return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2481                                 &retry_limit);
2482 }
2483
2484 /*
2485  * The IPW device contains a Microwire compatible EEPROM that stores
2486  * various data like the MAC address.  Usually the firmware has exclusive
2487  * access to the eeprom, but during device initialization (before the
2488  * device driver has sent the HostComplete command to the firmware) the
2489  * device driver has read access to the EEPROM by way of indirect addressing
2490  * through a couple of memory mapped registers.
2491  *
2492  * The following is a simplified implementation for pulling data out of the
2493  * the eeprom, along with some helper functions to find information in
2494  * the per device private data's copy of the eeprom.
2495  *
2496  * NOTE: To better understand how these functions work (i.e what is a chip
2497  *       select and why do have to keep driving the eeprom clock?), read
2498  *       just about any data sheet for a Microwire compatible EEPROM.
2499  */
2500
2501 /* write a 32 bit value into the indirect accessor register */
2502 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2503 {
2504         ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2505
2506         /* the eeprom requires some time to complete the operation */
2507         udelay(p->eeprom_delay);
2508
2509         return;
2510 }
2511
2512 /* perform a chip select operation */
2513 static void eeprom_cs(struct ipw_priv *priv)
2514 {
2515         eeprom_write_reg(priv, 0);
2516         eeprom_write_reg(priv, EEPROM_BIT_CS);
2517         eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2518         eeprom_write_reg(priv, EEPROM_BIT_CS);
2519 }
2520
2521 /* perform a chip select operation */
2522 static void eeprom_disable_cs(struct ipw_priv *priv)
2523 {
2524         eeprom_write_reg(priv, EEPROM_BIT_CS);
2525         eeprom_write_reg(priv, 0);
2526         eeprom_write_reg(priv, EEPROM_BIT_SK);
2527 }
2528
2529 /* push a single bit down to the eeprom */
2530 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2531 {
2532         int d = (bit ? EEPROM_BIT_DI : 0);
2533         eeprom_write_reg(p, EEPROM_BIT_CS | d);
2534         eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2535 }
2536
2537 /* push an opcode followed by an address down to the eeprom */
2538 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2539 {
2540         int i;
2541
2542         eeprom_cs(priv);
2543         eeprom_write_bit(priv, 1);
2544         eeprom_write_bit(priv, op & 2);
2545         eeprom_write_bit(priv, op & 1);
2546         for (i = 7; i >= 0; i--) {
2547                 eeprom_write_bit(priv, addr & (1 << i));
2548         }
2549 }
2550
2551 /* pull 16 bits off the eeprom, one bit at a time */
2552 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2553 {
2554         int i;
2555         u16 r = 0;
2556
2557         /* Send READ Opcode */
2558         eeprom_op(priv, EEPROM_CMD_READ, addr);
2559
2560         /* Send dummy bit */
2561         eeprom_write_reg(priv, EEPROM_BIT_CS);
2562
2563         /* Read the byte off the eeprom one bit at a time */
2564         for (i = 0; i < 16; i++) {
2565                 u32 data = 0;
2566                 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2567                 eeprom_write_reg(priv, EEPROM_BIT_CS);
2568                 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2569                 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2570         }
2571
2572         /* Send another dummy bit */
2573         eeprom_write_reg(priv, 0);
2574         eeprom_disable_cs(priv);
2575
2576         return r;
2577 }
2578
2579 /* helper function for pulling the mac address out of the private */
2580 /* data's copy of the eeprom data                                 */
2581 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2582 {
2583         memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2584 }
2585
2586 /*
2587  * Either the device driver (i.e. the host) or the firmware can
2588  * load eeprom data into the designated region in SRAM.  If neither
2589  * happens then the FW will shutdown with a fatal error.
2590  *
2591  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2592  * bit needs region of shared SRAM needs to be non-zero.
2593  */
2594 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2595 {
2596         int i;
2597         u16 *eeprom = (u16 *) priv->eeprom;
2598
2599         IPW_DEBUG_TRACE(">>\n");
2600
2601         /* read entire contents of eeprom into private buffer */
2602         for (i = 0; i < 128; i++)
2603                 eeprom[i] = le16_to_cpu(eeprom_read_u16(priv, (u8) i));
2604
2605         /*
2606            If the data looks correct, then copy it to our private
2607            copy.  Otherwise let the firmware know to perform the operation
2608            on its own.
2609          */
2610         if (priv->eeprom[EEPROM_VERSION] != 0) {
2611                 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2612
2613                 /* write the eeprom data to sram */
2614                 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2615                         ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2616
2617                 /* Do not load eeprom data on fatal error or suspend */
2618                 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2619         } else {
2620                 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2621
2622                 /* Load eeprom data on fatal error or suspend */
2623                 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2624         }
2625
2626         IPW_DEBUG_TRACE("<<\n");
2627 }
2628
2629 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2630 {
2631         count >>= 2;
2632         if (!count)
2633                 return;
2634         _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2635         while (count--)
2636                 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2637 }
2638
2639 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2640 {
2641         ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2642                         CB_NUMBER_OF_ELEMENTS_SMALL *
2643                         sizeof(struct command_block));
2644 }
2645
2646 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2647 {                               /* start dma engine but no transfers yet */
2648
2649         IPW_DEBUG_FW(">> : \n");
2650
2651         /* Start the dma */
2652         ipw_fw_dma_reset_command_blocks(priv);
2653
2654         /* Write CB base address */
2655         ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2656
2657         IPW_DEBUG_FW("<< : \n");
2658         return 0;
2659 }
2660
2661 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2662 {
2663         u32 control = 0;
2664
2665         IPW_DEBUG_FW(">> :\n");
2666
2667         /* set the Stop and Abort bit */
2668         control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2669         ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2670         priv->sram_desc.last_cb_index = 0;
2671
2672         IPW_DEBUG_FW("<< \n");
2673 }
2674
2675 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2676                                           struct command_block *cb)
2677 {
2678         u32 address =
2679             IPW_SHARED_SRAM_DMA_CONTROL +
2680             (sizeof(struct command_block) * index);
2681         IPW_DEBUG_FW(">> :\n");
2682
2683         ipw_write_indirect(priv, address, (u8 *) cb,
2684                            (int)sizeof(struct command_block));
2685
2686         IPW_DEBUG_FW("<< :\n");
2687         return 0;
2688
2689 }
2690
2691 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2692 {
2693         u32 control = 0;
2694         u32 index = 0;
2695
2696         IPW_DEBUG_FW(">> :\n");
2697
2698         for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2699                 ipw_fw_dma_write_command_block(priv, index,
2700                                                &priv->sram_desc.cb_list[index]);
2701
2702         /* Enable the DMA in the CSR register */
2703         ipw_clear_bit(priv, IPW_RESET_REG,
2704                       IPW_RESET_REG_MASTER_DISABLED |
2705                       IPW_RESET_REG_STOP_MASTER);
2706
2707         /* Set the Start bit. */
2708         control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2709         ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2710
2711         IPW_DEBUG_FW("<< :\n");
2712         return 0;
2713 }
2714
2715 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2716 {
2717         u32 address;
2718         u32 register_value = 0;
2719         u32 cb_fields_address = 0;
2720
2721         IPW_DEBUG_FW(">> :\n");
2722         address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2723         IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2724
2725         /* Read the DMA Controlor register */
2726         register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2727         IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2728
2729         /* Print the CB values */
2730         cb_fields_address = address;
2731         register_value = ipw_read_reg32(priv, cb_fields_address);
2732         IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2733
2734         cb_fields_address += sizeof(u32);
2735         register_value = ipw_read_reg32(priv, cb_fields_address);
2736         IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2737
2738         cb_fields_address += sizeof(u32);
2739         register_value = ipw_read_reg32(priv, cb_fields_address);
2740         IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2741                           register_value);
2742
2743         cb_fields_address += sizeof(u32);
2744         register_value = ipw_read_reg32(priv, cb_fields_address);
2745         IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2746
2747         IPW_DEBUG_FW(">> :\n");
2748 }
2749
2750 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2751 {
2752         u32 current_cb_address = 0;
2753         u32 current_cb_index = 0;
2754
2755         IPW_DEBUG_FW("<< :\n");
2756         current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2757
2758         current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2759             sizeof(struct command_block);
2760
2761         IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2762                           current_cb_index, current_cb_address);
2763
2764         IPW_DEBUG_FW(">> :\n");
2765         return current_cb_index;
2766
2767 }
2768
2769 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2770                                         u32 src_address,
2771                                         u32 dest_address,
2772                                         u32 length,
2773                                         int interrupt_enabled, int is_last)
2774 {
2775
2776         u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2777             CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2778             CB_DEST_SIZE_LONG;
2779         struct command_block *cb;
2780         u32 last_cb_element = 0;
2781
2782         IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2783                           src_address, dest_address, length);
2784
2785         if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2786                 return -1;
2787
2788         last_cb_element = priv->sram_desc.last_cb_index;
2789         cb = &priv->sram_desc.cb_list[last_cb_element];
2790         priv->sram_desc.last_cb_index++;
2791
2792         /* Calculate the new CB control word */
2793         if (interrupt_enabled)
2794                 control |= CB_INT_ENABLED;
2795
2796         if (is_last)
2797                 control |= CB_LAST_VALID;
2798
2799         control |= length;
2800
2801         /* Calculate the CB Element's checksum value */
2802         cb->status = control ^ src_address ^ dest_address;
2803
2804         /* Copy the Source and Destination addresses */
2805         cb->dest_addr = dest_address;
2806         cb->source_addr = src_address;
2807
2808         /* Copy the Control Word last */
2809         cb->control = control;
2810
2811         return 0;
2812 }
2813
2814 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2815                                  u32 src_phys, u32 dest_address, u32 length)
2816 {
2817         u32 bytes_left = length;
2818         u32 src_offset = 0;
2819         u32 dest_offset = 0;
2820         int status = 0;
2821         IPW_DEBUG_FW(">> \n");
2822         IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2823                           src_phys, dest_address, length);
2824         while (bytes_left > CB_MAX_LENGTH) {
2825                 status = ipw_fw_dma_add_command_block(priv,
2826                                                       src_phys + src_offset,
2827                                                       dest_address +
2828                                                       dest_offset,
2829                                                       CB_MAX_LENGTH, 0, 0);
2830                 if (status) {
2831                         IPW_DEBUG_FW_INFO(": Failed\n");
2832                         return -1;
2833                 } else
2834                         IPW_DEBUG_FW_INFO(": Added new cb\n");
2835
2836                 src_offset += CB_MAX_LENGTH;
2837                 dest_offset += CB_MAX_LENGTH;
2838                 bytes_left -= CB_MAX_LENGTH;
2839         }
2840
2841         /* add the buffer tail */
2842         if (bytes_left > 0) {
2843                 status =
2844                     ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2845                                                  dest_address + dest_offset,
2846                                                  bytes_left, 0, 0);
2847                 if (status) {
2848                         IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2849                         return -1;
2850                 } else
2851                         IPW_DEBUG_FW_INFO
2852                             (": Adding new cb - the buffer tail\n");
2853         }
2854
2855         IPW_DEBUG_FW("<< \n");
2856         return 0;
2857 }
2858
2859 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2860 {
2861         u32 current_index = 0, previous_index;
2862         u32 watchdog = 0;
2863
2864         IPW_DEBUG_FW(">> : \n");
2865
2866         current_index = ipw_fw_dma_command_block_index(priv);
2867         IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2868                           (int)priv->sram_desc.last_cb_index);
2869
2870         while (current_index < priv->sram_desc.last_cb_index) {
2871                 udelay(50);
2872                 previous_index = current_index;
2873                 current_index = ipw_fw_dma_command_block_index(priv);
2874
2875                 if (previous_index < current_index) {
2876                         watchdog = 0;
2877                         continue;
2878                 }
2879                 if (++watchdog > 400) {
2880                         IPW_DEBUG_FW_INFO("Timeout\n");
2881                         ipw_fw_dma_dump_command_block(priv);
2882                         ipw_fw_dma_abort(priv);
2883                         return -1;
2884                 }
2885         }
2886
2887         ipw_fw_dma_abort(priv);
2888
2889         /*Disable the DMA in the CSR register */
2890         ipw_set_bit(priv, IPW_RESET_REG,
2891                     IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2892
2893         IPW_DEBUG_FW("<< dmaWaitSync \n");
2894         return 0;
2895 }
2896
2897 static void ipw_remove_current_network(struct ipw_priv *priv)
2898 {
2899         struct list_head *element, *safe;
2900         struct ieee80211_network *network = NULL;
2901         unsigned long flags;
2902
2903         spin_lock_irqsave(&priv->ieee->lock, flags);
2904         list_for_each_safe(element, safe, &priv->ieee->network_list) {
2905                 network = list_entry(element, struct ieee80211_network, list);
2906                 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2907                         list_del(element);
2908                         list_add_tail(&network->list,
2909                                       &priv->ieee->network_free_list);
2910                 }
2911         }
2912         spin_unlock_irqrestore(&priv->ieee->lock, flags);
2913 }
2914
2915 /**
2916  * Check that card is still alive.
2917  * Reads debug register from domain0.
2918  * If card is present, pre-defined value should
2919  * be found there.
2920  *
2921  * @param priv
2922  * @return 1 if card is present, 0 otherwise
2923  */
2924 static inline int ipw_alive(struct ipw_priv *priv)
2925 {
2926         return ipw_read32(priv, 0x90) == 0xd55555d5;
2927 }
2928
2929 /* timeout in msec, attempted in 10-msec quanta */
2930 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2931                                int timeout)
2932 {
2933         int i = 0;
2934
2935         do {
2936                 if ((ipw_read32(priv, addr) & mask) == mask)
2937                         return i;
2938                 mdelay(10);
2939                 i += 10;
2940         } while (i < timeout);
2941
2942         return -ETIME;
2943 }
2944
2945 /* These functions load the firmware and micro code for the operation of
2946  * the ipw hardware.  It assumes the buffer has all the bits for the
2947  * image and the caller is handling the memory allocation and clean up.
2948  */
2949
2950 static int ipw_stop_master(struct ipw_priv *priv)
2951 {
2952         int rc;
2953
2954         IPW_DEBUG_TRACE(">> \n");
2955         /* stop master. typical delay - 0 */
2956         ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
2957
2958         /* timeout is in msec, polled in 10-msec quanta */
2959         rc = ipw_poll_bit(priv, IPW_RESET_REG,
2960                           IPW_RESET_REG_MASTER_DISABLED, 100);
2961         if (rc < 0) {
2962                 IPW_ERROR("wait for stop master failed after 100ms\n");
2963                 return -1;
2964         }
2965
2966         IPW_DEBUG_INFO("stop master %dms\n", rc);
2967
2968         return rc;
2969 }
2970
2971 static void ipw_arc_release(struct ipw_priv *priv)
2972 {
2973         IPW_DEBUG_TRACE(">> \n");
2974         mdelay(5);
2975
2976         ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2977
2978         /* no one knows timing, for safety add some delay */
2979         mdelay(5);
2980 }
2981
2982 struct fw_chunk {
2983         u32 address;
2984         u32 length;
2985 };
2986
2987 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2988 {
2989         int rc = 0, i, addr;
2990         u8 cr = 0;
2991         u16 *image;
2992
2993         image = (u16 *) data;
2994
2995         IPW_DEBUG_TRACE(">> \n");
2996
2997         rc = ipw_stop_master(priv);
2998
2999         if (rc < 0)
3000                 return rc;
3001
3002         for (addr = IPW_SHARED_LOWER_BOUND;
3003              addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3004                 ipw_write32(priv, addr, 0);
3005         }
3006
3007         /* no ucode (yet) */
3008         memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3009         /* destroy DMA queues */
3010         /* reset sequence */
3011
3012         ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3013         ipw_arc_release(priv);
3014         ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3015         mdelay(1);
3016
3017         /* reset PHY */
3018         ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3019         mdelay(1);
3020
3021         ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3022         mdelay(1);
3023
3024         /* enable ucode store */
3025         ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3026         ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3027         mdelay(1);
3028
3029         /* write ucode */
3030         /**
3031          * @bug
3032          * Do NOT set indirect address register once and then
3033          * store data to indirect data register in the loop.
3034          * It seems very reasonable, but in this case DINO do not
3035          * accept ucode. It is essential to set address each time.
3036          */
3037         /* load new ipw uCode */
3038         for (i = 0; i < len / 2; i++)
3039                 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3040                                 cpu_to_le16(image[i]));
3041
3042         /* enable DINO */
3043         ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3044         ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3045
3046         /* this is where the igx / win driver deveates from the VAP driver. */
3047
3048         /* wait for alive response */
3049         for (i = 0; i < 100; i++) {
3050                 /* poll for incoming data */
3051                 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3052                 if (cr & DINO_RXFIFO_DATA)
3053                         break;
3054                 mdelay(1);
3055         }
3056
3057         if (cr & DINO_RXFIFO_DATA) {
3058                 /* alive_command_responce size is NOT multiple of 4 */
3059                 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3060
3061                 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3062                         response_buffer[i] =
3063                             le32_to_cpu(ipw_read_reg32(priv,
3064                                                        IPW_BASEBAND_RX_FIFO_READ));
3065                 memcpy(&priv->dino_alive, response_buffer,
3066                        sizeof(priv->dino_alive));
3067                 if (priv->dino_alive.alive_command == 1
3068                     && priv->dino_alive.ucode_valid == 1) {
3069                         rc = 0;
3070                         IPW_DEBUG_INFO
3071                             ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3072                              "of %02d/%02d/%02d %02d:%02d\n",
3073                              priv->dino_alive.software_revision,
3074                              priv->dino_alive.software_revision,
3075                              priv->dino_alive.device_identifier,
3076                              priv->dino_alive.device_identifier,
3077                              priv->dino_alive.time_stamp[0],
3078                              priv->dino_alive.time_stamp[1],
3079                              priv->dino_alive.time_stamp[2],
3080                              priv->dino_alive.time_stamp[3],
3081                              priv->dino_alive.time_stamp[4]);
3082                 } else {
3083                         IPW_DEBUG_INFO("Microcode is not alive\n");
3084                         rc = -EINVAL;
3085                 }
3086         } else {
3087                 IPW_DEBUG_INFO("No alive response from DINO\n");
3088                 rc = -ETIME;
3089         }
3090
3091         /* disable DINO, otherwise for some reason
3092            firmware have problem getting alive resp. */
3093         ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3094
3095         return rc;
3096 }
3097
3098 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3099 {
3100         int rc = -1;
3101         int offset = 0;
3102         struct fw_chunk *chunk;
3103         dma_addr_t shared_phys;
3104         u8 *shared_virt;
3105
3106         IPW_DEBUG_TRACE("<< : \n");
3107         shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3108
3109         if (!shared_virt)
3110                 return -ENOMEM;
3111
3112         memmove(shared_virt, data, len);
3113
3114         /* Start the Dma */
3115         rc = ipw_fw_dma_enable(priv);
3116
3117         if (priv->sram_desc.last_cb_index > 0) {
3118                 /* the DMA is already ready this would be a bug. */
3119                 BUG();
3120                 goto out;
3121         }
3122
3123         do {
3124                 chunk = (struct fw_chunk *)(data + offset);
3125                 offset += sizeof(struct fw_chunk);
3126                 /* build DMA packet and queue up for sending */
3127                 /* dma to chunk->address, the chunk->length bytes from data +
3128                  * offeset*/
3129                 /* Dma loading */
3130                 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3131                                            le32_to_cpu(chunk->address),
3132                                            le32_to_cpu(chunk->length));
3133                 if (rc) {
3134                         IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3135                         goto out;
3136                 }
3137
3138                 offset += le32_to_cpu(chunk->length);
3139         } while (offset < len);
3140
3141         /* Run the DMA and wait for the answer */
3142         rc = ipw_fw_dma_kick(priv);
3143         if (rc) {
3144                 IPW_ERROR("dmaKick Failed\n");
3145                 goto out;
3146         }
3147
3148         rc = ipw_fw_dma_wait(priv);
3149         if (rc) {
3150                 IPW_ERROR("dmaWaitSync Failed\n");
3151                 goto out;
3152         }
3153       out:
3154         pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3155         return rc;
3156 }
3157
3158 /* stop nic */
3159 static int ipw_stop_nic(struct ipw_priv *priv)
3160 {
3161         int rc = 0;
3162
3163         /* stop */
3164         ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3165
3166         rc = ipw_poll_bit(priv, IPW_RESET_REG,
3167                           IPW_RESET_REG_MASTER_DISABLED, 500);
3168         if (rc < 0) {
3169                 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3170                 return rc;
3171         }
3172
3173         ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3174
3175         return rc;
3176 }
3177
3178 static void ipw_start_nic(struct ipw_priv *priv)
3179 {
3180         IPW_DEBUG_TRACE(">>\n");
3181
3182         /* prvHwStartNic  release ARC */
3183         ipw_clear_bit(priv, IPW_RESET_REG,
3184                       IPW_RESET_REG_MASTER_DISABLED |
3185                       IPW_RESET_REG_STOP_MASTER |
3186                       CBD_RESET_REG_PRINCETON_RESET);
3187
3188         /* enable power management */
3189         ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3190                     IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3191
3192         IPW_DEBUG_TRACE("<<\n");
3193 }
3194
3195 static int ipw_init_nic(struct ipw_priv *priv)
3196 {
3197         int rc;
3198
3199         IPW_DEBUG_TRACE(">>\n");
3200         /* reset */
3201         /*prvHwInitNic */
3202         /* set "initialization complete" bit to move adapter to D0 state */
3203         ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3204
3205         /* low-level PLL activation */
3206         ipw_write32(priv, IPW_READ_INT_REGISTER,
3207                     IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3208
3209         /* wait for clock stabilization */
3210         rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3211                           IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3212         if (rc < 0)
3213                 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3214
3215         /* assert SW reset */
3216         ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3217
3218         udelay(10);
3219
3220         /* set "initialization complete" bit to move adapter to D0 state */
3221         ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3222
3223         IPW_DEBUG_TRACE(">>\n");
3224         return 0;
3225 }
3226
3227 /* Call this function from process context, it will sleep in request_firmware.
3228  * Probe is an ok place to call this from.
3229  */
3230 static int ipw_reset_nic(struct ipw_priv *priv)
3231 {
3232         int rc = 0;
3233         unsigned long flags;
3234
3235         IPW_DEBUG_TRACE(">>\n");
3236
3237         rc = ipw_init_nic(priv);
3238
3239         spin_lock_irqsave(&priv->lock, flags);
3240         /* Clear the 'host command active' bit... */
3241         priv->status &= ~STATUS_HCMD_ACTIVE;
3242         wake_up_interruptible(&priv->wait_command_queue);
3243         priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3244         wake_up_interruptible(&priv->wait_state);
3245         spin_unlock_irqrestore(&priv->lock, flags);
3246
3247         IPW_DEBUG_TRACE("<<\n");
3248         return rc;
3249 }
3250
3251
3252 struct ipw_fw {
3253         __le32 ver;
3254         __le32 boot_size;
3255         __le32 ucode_size;
3256         __le32 fw_size;
3257         u8 data[0];
3258 };
3259
3260 static int ipw_get_fw(struct ipw_priv *priv,
3261                       const struct firmware **raw, const char *name)
3262 {
3263         struct ipw_fw *fw;
3264         int rc;
3265
3266         /* ask firmware_class module to get the boot firmware off disk */
3267         rc = request_firmware(raw, name, &priv->pci_dev->dev);
3268         if (rc < 0) {
3269                 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3270                 return rc;
3271         }
3272
3273         if ((*raw)->size < sizeof(*fw)) {
3274                 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3275                 return -EINVAL;
3276         }
3277
3278         fw = (void *)(*raw)->data;
3279
3280         if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3281             le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3282                 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3283                           name, (*raw)->size);
3284                 return -EINVAL;
3285         }
3286
3287         IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3288                        name,
3289                        le32_to_cpu(fw->ver) >> 16,
3290                        le32_to_cpu(fw->ver) & 0xff,
3291                        (*raw)->size - sizeof(*fw));
3292         return 0;
3293 }
3294
3295 #define IPW_RX_BUF_SIZE (3000)
3296
3297 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3298                                       struct ipw_rx_queue *rxq)
3299 {
3300         unsigned long flags;
3301         int i;
3302
3303         spin_lock_irqsave(&rxq->lock, flags);
3304
3305         INIT_LIST_HEAD(&rxq->rx_free);
3306         INIT_LIST_HEAD(&rxq->rx_used);
3307
3308         /* Fill the rx_used queue with _all_ of the Rx buffers */
3309         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3310                 /* In the reset function, these buffers may have been allocated
3311                  * to an SKB, so we need to unmap and free potential storage */
3312                 if (rxq->pool[i].skb != NULL) {
3313                         pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3314                                          IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3315                         dev_kfree_skb(rxq->pool[i].skb);
3316                         rxq->pool[i].skb = NULL;
3317                 }
3318                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3319         }
3320
3321         /* Set us so that we have processed and used all buffers, but have
3322          * not restocked the Rx queue with fresh buffers */
3323         rxq->read = rxq->write = 0;
3324         rxq->processed = RX_QUEUE_SIZE - 1;
3325         rxq->free_count = 0;
3326         spin_unlock_irqrestore(&rxq->lock, flags);
3327 }
3328
3329 #ifdef CONFIG_PM
3330 static int fw_loaded = 0;
3331 static const struct firmware *raw = NULL;
3332
3333 static void free_firmware(void)
3334 {
3335         if (fw_loaded) {
3336                 release_firmware(raw);
3337                 raw = NULL;
3338                 fw_loaded = 0;
3339         }
3340 }
3341 #else
3342 #define free_firmware() do {} while (0)
3343 #endif
3344
3345 static int ipw_load(struct ipw_priv *priv)
3346 {
3347 #ifndef CONFIG_PM
3348         const struct firmware *raw = NULL;
3349 #endif
3350         struct ipw_fw *fw;
3351         u8 *boot_img, *ucode_img, *fw_img;
3352         u8 *name = NULL;
3353         int rc = 0, retries = 3;
3354
3355         switch (priv->ieee->iw_mode) {
3356         case IW_MODE_ADHOC:
3357                 name = "ipw2200-ibss.fw";
3358                 break;
3359 #ifdef CONFIG_IPW2200_MONITOR
3360         case IW_MODE_MONITOR:
3361                 name = "ipw2200-sniffer.fw";
3362                 break;
3363 #endif
3364         case IW_MODE_INFRA:
3365                 name = "ipw2200-bss.fw";
3366                 break;
3367         }
3368
3369         if (!name) {
3370                 rc = -EINVAL;
3371                 goto error;
3372         }
3373
3374 #ifdef CONFIG_PM
3375         if (!fw_loaded) {
3376 #endif
3377                 rc = ipw_get_fw(priv, &raw, name);
3378                 if (rc < 0)
3379                         goto error;
3380 #ifdef CONFIG_PM
3381         }
3382 #endif
3383
3384         fw = (void *)raw->data;
3385         boot_img = &fw->data[0];
3386         ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3387         fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3388                            le32_to_cpu(fw->ucode_size)];
3389
3390         if (rc < 0)
3391                 goto error;
3392
3393         if (!priv->rxq)
3394                 priv->rxq = ipw_rx_queue_alloc(priv);
3395         else
3396                 ipw_rx_queue_reset(priv, priv->rxq);
3397         if (!priv->rxq) {
3398                 IPW_ERROR("Unable to initialize Rx queue\n");
3399                 goto error;
3400         }
3401
3402       retry:
3403         /* Ensure interrupts are disabled */
3404         ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3405         priv->status &= ~STATUS_INT_ENABLED;
3406
3407         /* ack pending interrupts */
3408         ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3409
3410         ipw_stop_nic(priv);
3411
3412         rc = ipw_reset_nic(priv);
3413         if (rc < 0) {
3414                 IPW_ERROR("Unable to reset NIC\n");
3415                 goto error;
3416         }
3417
3418         ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3419                         IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3420
3421         /* DMA the initial boot firmware into the device */
3422         rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3423         if (rc < 0) {
3424                 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3425                 goto error;
3426         }
3427
3428         /* kick start the device */
3429         ipw_start_nic(priv);
3430
3431         /* wait for the device to finish its initial startup sequence */
3432         rc = ipw_poll_bit(priv, IPW_INTA_RW,
3433                           IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3434         if (rc < 0) {
3435                 IPW_ERROR("device failed to boot initial fw image\n");
3436                 goto error;
3437         }
3438         IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3439
3440         /* ack fw init done interrupt */
3441         ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3442
3443         /* DMA the ucode into the device */
3444         rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3445         if (rc < 0) {
3446                 IPW_ERROR("Unable to load ucode: %d\n", rc);
3447                 goto error;
3448         }
3449
3450         /* stop nic */
3451         ipw_stop_nic(priv);
3452
3453         /* DMA bss firmware into the device */
3454         rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3455         if (rc < 0) {
3456                 IPW_ERROR("Unable to load firmware: %d\n", rc);
3457                 goto error;
3458         }
3459 #ifdef CONFIG_PM
3460         fw_loaded = 1;
3461 #endif
3462
3463         ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3464
3465         rc = ipw_queue_reset(priv);
3466         if (rc < 0) {
3467                 IPW_ERROR("Unable to initialize queues\n");
3468                 goto error;
3469         }
3470
3471         /* Ensure interrupts are disabled */
3472         ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3473         /* ack pending interrupts */
3474         ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3475
3476         /* kick start the device */
3477         ipw_start_nic(priv);
3478
3479         if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3480                 if (retries > 0) {
3481                         IPW_WARNING("Parity error.  Retrying init.\n");
3482                         retries--;
3483                         goto retry;
3484                 }
3485
3486                 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3487                 rc = -EIO;
3488                 goto error;
3489         }
3490
3491         /* wait for the device */
3492         rc = ipw_poll_bit(priv, IPW_INTA_RW,
3493                           IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3494         if (rc < 0) {
3495                 IPW_ERROR("device failed to start within 500ms\n");
3496                 goto error;
3497         }
3498         IPW_DEBUG_INFO("device response after %dms\n", rc);
3499
3500         /* ack fw init done interrupt */
3501         ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3502
3503         /* read eeprom data and initialize the eeprom region of sram */
3504         priv->eeprom_delay = 1;
3505         ipw_eeprom_init_sram(priv);
3506
3507         /* enable interrupts */
3508         ipw_enable_interrupts(priv);
3509
3510         /* Ensure our queue has valid packets */
3511         ipw_rx_queue_replenish(priv);
3512
3513         ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3514
3515         /* ack pending interrupts */
3516         ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3517
3518 #ifndef CONFIG_PM
3519         release_firmware(raw);
3520 #endif
3521         return 0;
3522
3523       error:
3524         if (priv->rxq) {
3525                 ipw_rx_queue_free(priv, priv->rxq);
3526                 priv->rxq = NULL;
3527         }
3528         ipw_tx_queue_free(priv);
3529         if (raw)
3530                 release_firmware(raw);
3531 #ifdef CONFIG_PM
3532         fw_loaded = 0;
3533         raw = NULL;
3534 #endif
3535
3536         return rc;
3537 }
3538
3539 /**
3540  * DMA services
3541  *
3542  * Theory of operation
3543  *
3544  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3545  * 2 empty entries always kept in the buffer to protect from overflow.
3546  *
3547  * For Tx queue, there are low mark and high mark limits. If, after queuing
3548  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3549  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3550  * Tx queue resumed.
3551  *
3552  * The IPW operates with six queues, one receive queue in the device's
3553  * sram, one transmit queue for sending commands to the device firmware,
3554  * and four transmit queues for data.
3555  *
3556  * The four transmit queues allow for performing quality of service (qos)
3557  * transmissions as per the 802.11 protocol.  Currently Linux does not
3558  * provide a mechanism to the user for utilizing prioritized queues, so
3559  * we only utilize the first data transmit queue (queue1).
3560  */
3561
3562 /**
3563  * Driver allocates buffers of this size for Rx
3564  */
3565
3566 static inline int ipw_queue_space(const struct clx2_queue *q)
3567 {
3568         int s = q->last_used - q->first_empty;
3569         if (s <= 0)
3570                 s += q->n_bd;
3571         s -= 2;                 /* keep some reserve to not confuse empty and full situations */
3572         if (s < 0)
3573                 s = 0;
3574         return s;
3575 }
3576
3577 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3578 {
3579         return (++index == n_bd) ? 0 : index;
3580 }
3581
3582 /**
3583  * Initialize common DMA queue structure
3584  *
3585  * @param q                queue to init
3586  * @param count            Number of BD's to allocate. Should be power of 2
3587  * @param read_register    Address for 'read' register
3588  *                         (not offset within BAR, full address)
3589  * @param write_register   Address for 'write' register
3590  *                         (not offset within BAR, full address)
3591  * @param base_register    Address for 'base' register
3592  *                         (not offset within BAR, full address)
3593  * @param size             Address for 'size' register
3594  *                         (not offset within BAR, full address)
3595  */
3596 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3597                            int count, u32 read, u32 write, u32 base, u32 size)
3598 {
3599         q->n_bd = count;
3600
3601         q->low_mark = q->n_bd / 4;
3602         if (q->low_mark < 4)
3603                 q->low_mark = 4;
3604
3605         q->high_mark = q->n_bd / 8;
3606         if (q->high_mark < 2)
3607                 q->high_mark = 2;
3608
3609         q->first_empty = q->last_used = 0;
3610         q->reg_r = read;
3611         q->reg_w = write;
3612
3613         ipw_write32(priv, base, q->dma_addr);
3614         ipw_write32(priv, size, count);
3615         ipw_write32(priv, read, 0);
3616         ipw_write32(priv, write, 0);
3617
3618         _ipw_read32(priv, 0x90);
3619 }
3620
3621 static int ipw_queue_tx_init(struct ipw_priv *priv,
3622                              struct clx2_tx_queue *q,
3623                              int count, u32 read, u32 write, u32 base, u32 size)
3624 {
3625         struct pci_dev *dev = priv->pci_dev;
3626
3627         q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3628         if (!q->txb) {
3629                 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3630                 return -ENOMEM;
3631         }
3632
3633         q->bd =
3634             pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3635         if (!q->bd) {
3636                 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3637                           sizeof(q->bd[0]) * count);
3638                 kfree(q->txb);
3639                 q->txb = NULL;
3640                 return -ENOMEM;
3641         }
3642
3643         ipw_queue_init(priv, &q->q, count, read, write, base, size);
3644         return 0;
3645 }
3646
3647 /**
3648  * Free one TFD, those at index [txq->q.last_used].
3649  * Do NOT advance any indexes
3650  *
3651  * @param dev
3652  * @param txq
3653  */
3654 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3655                                   struct clx2_tx_queue *txq)
3656 {
3657         struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3658         struct pci_dev *dev = priv->pci_dev;
3659         int i;
3660
3661         /* classify bd */
3662         if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3663                 /* nothing to cleanup after for host commands */
3664                 return;
3665
3666         /* sanity check */
3667         if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3668                 IPW_ERROR("Too many chunks: %i\n",
3669                           le32_to_cpu(bd->u.data.num_chunks));
3670                 /** @todo issue fatal error, it is quite serious situation */
3671                 return;
3672         }
3673
3674         /* unmap chunks if any */
3675         for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3676                 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3677                                  le16_to_cpu(bd->u.data.chunk_len[i]),
3678                                  PCI_DMA_TODEVICE);
3679                 if (txq->txb[txq->q.last_used]) {
3680                         ieee80211_txb_free(txq->txb[txq->q.last_used]);
3681                         txq->txb[txq->q.last_used] = NULL;
3682                 }
3683         }
3684 }
3685
3686 /**
3687  * Deallocate DMA queue.
3688  *
3689  * Empty queue by removing and destroying all BD's.
3690  * Free all buffers.
3691  *
3692  * @param dev
3693  * @param q
3694  */
3695 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3696 {
3697         struct clx2_queue *q = &txq->q;
3698         struct pci_dev *dev = priv->pci_dev;
3699
3700         if (q->n_bd == 0)
3701                 return;
3702
3703         /* first, empty all BD's */
3704         for (; q->first_empty != q->last_used;
3705              q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3706                 ipw_queue_tx_free_tfd(priv, txq);
3707         }
3708
3709         /* free buffers belonging to queue itself */
3710         pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3711                             q->dma_addr);
3712         kfree(txq->txb);
3713
3714         /* 0 fill whole structure */
3715         memset(txq, 0, sizeof(*txq));
3716 }
3717
3718 /**
3719  * Destroy all DMA queues and structures
3720  *
3721  * @param priv
3722  */
3723 static void ipw_tx_queue_free(struct ipw_priv *priv)
3724 {
3725         /* Tx CMD queue */
3726         ipw_queue_tx_free(priv, &priv->txq_cmd);
3727
3728         /* Tx queues */
3729         ipw_queue_tx_free(priv, &priv->txq[0]);
3730         ipw_queue_tx_free(priv, &priv->txq[1]);
3731         ipw_queue_tx_free(priv, &priv->txq[2]);
3732         ipw_queue_tx_free(priv, &priv->txq[3]);
3733 }
3734
3735 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3736 {
3737         /* First 3 bytes are manufacturer */
3738         bssid[0] = priv->mac_addr[0];
3739         bssid[1] = priv->mac_addr[1];
3740         bssid[2] = priv->mac_addr[2];
3741
3742         /* Last bytes are random */
3743         get_random_bytes(&bssid[3], ETH_ALEN - 3);
3744
3745         bssid[0] &= 0xfe;       /* clear multicast bit */
3746         bssid[0] |= 0x02;       /* set local assignment bit (IEEE802) */
3747 }
3748
3749 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3750 {
3751         struct ipw_station_entry entry;
3752         int i;
3753
3754         for (i = 0; i < priv->num_stations; i++) {
3755                 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3756                         /* Another node is active in network */
3757                         priv->missed_adhoc_beacons = 0;
3758                         if (!(priv->config & CFG_STATIC_CHANNEL))
3759                                 /* when other nodes drop out, we drop out */
3760                                 priv->config &= ~CFG_ADHOC_PERSIST;
3761
3762                         return i;
3763                 }
3764         }
3765
3766         if (i == MAX_STATIONS)
3767                 return IPW_INVALID_STATION;
3768
3769         IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
3770
3771         entry.reserved = 0;
3772         entry.support_mode = 0;
3773         memcpy(entry.mac_addr, bssid, ETH_ALEN);
3774         memcpy(priv->stations[i], bssid, ETH_ALEN);
3775         ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3776                          &entry, sizeof(entry));
3777         priv->num_stations++;
3778
3779         return i;
3780 }
3781
3782 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3783 {
3784         int i;
3785
3786         for (i = 0; i < priv->num_stations; i++)
3787                 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3788                         return i;
3789
3790         return IPW_INVALID_STATION;
3791 }
3792
3793 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3794 {
3795         int err;
3796
3797         if (priv->status & STATUS_ASSOCIATING) {
3798                 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3799                 queue_work(priv->workqueue, &priv->disassociate);
3800                 return;
3801         }
3802
3803         if (!(priv->status & STATUS_ASSOCIATED)) {
3804                 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3805                 return;
3806         }
3807
3808         IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
3809                         "on channel %d.\n",
3810                         MAC_ARG(priv->assoc_request.bssid),
3811                         priv->assoc_request.channel);
3812
3813         priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3814         priv->status |= STATUS_DISASSOCIATING;
3815
3816         if (quiet)
3817                 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3818         else
3819                 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3820
3821         err = ipw_send_associate(priv, &priv->assoc_request);
3822         if (err) {
3823                 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3824                              "failed.\n");
3825                 return;
3826         }
3827
3828 }
3829
3830 static int ipw_disassociate(void *data)
3831 {
3832         struct ipw_priv *priv = data;
3833         if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3834                 return 0;
3835         ipw_send_disassociate(data, 0);
3836         return 1;
3837 }
3838
3839 static void ipw_bg_disassociate(struct work_struct *work)
3840 {
3841         struct ipw_priv *priv =
3842                 container_of(work, struct ipw_priv, disassociate);
3843         mutex_lock(&priv->mutex);
3844         ipw_disassociate(priv);
3845         mutex_unlock(&priv->mutex);
3846 }
3847
3848 static void ipw_system_config(struct work_struct *work)
3849 {
3850         struct ipw_priv *priv =
3851                 container_of(work, struct ipw_priv, system_config);
3852
3853 #ifdef CONFIG_IPW2200_PROMISCUOUS
3854         if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3855                 priv->sys_config.accept_all_data_frames = 1;
3856                 priv->sys_config.accept_non_directed_frames = 1;
3857                 priv->sys_config.accept_all_mgmt_bcpr = 1;
3858                 priv->sys_config.accept_all_mgmt_frames = 1;
3859         }
3860 #endif
3861
3862         ipw_send_system_config(priv);
3863 }
3864
3865 struct ipw_status_code {
3866         u16 status;
3867         const char *reason;
3868 };
3869
3870 static const struct ipw_status_code ipw_status_codes[] = {
3871         {0x00, "Successful"},
3872         {0x01, "Unspecified failure"},
3873         {0x0A, "Cannot support all requested capabilities in the "
3874          "Capability information field"},
3875         {0x0B, "Reassociation denied due to inability to confirm that "
3876          "association exists"},
3877         {0x0C, "Association denied due to reason outside the scope of this "
3878          "standard"},
3879         {0x0D,
3880          "Responding station does not support the specified authentication "
3881          "algorithm"},
3882         {0x0E,
3883          "Received an Authentication frame with authentication sequence "
3884          "transaction sequence number out of expected sequence"},
3885         {0x0F, "Authentication rejected because of challenge failure"},
3886         {0x10, "Authentication rejected due to timeout waiting for next "
3887          "frame in sequence"},
3888         {0x11, "Association denied because AP is unable to handle additional "
3889          "associated stations"},
3890         {0x12,
3891          "Association denied due to requesting station not supporting all "
3892          "of the datarates in the BSSBasicServiceSet Parameter"},
3893         {0x13,
3894          "Association denied due to requesting station not supporting "
3895          "short preamble operation"},
3896         {0x14,
3897          "Association denied due to requesting station not supporting "
3898          "PBCC encoding"},
3899         {0x15,
3900          "Association denied due to requesting station not supporting "
3901          "channel agility"},
3902         {0x19,
3903          "Association denied due to requesting station not supporting "
3904          "short slot operation"},
3905         {0x1A,
3906          "Association denied due to requesting station not supporting "
3907          "DSSS-OFDM operation"},
3908         {0x28, "Invalid Information Element"},
3909         {0x29, "Group Cipher is not valid"},
3910         {0x2A, "Pairwise Cipher is not valid"},
3911         {0x2B, "AKMP is not valid"},
3912         {0x2C, "Unsupported RSN IE version"},
3913         {0x2D, "Invalid RSN IE Capabilities"},
3914         {0x2E, "Cipher suite is rejected per security policy"},
3915 };
3916
3917 static const char *ipw_get_status_code(u16 status)
3918 {
3919         int i;
3920         for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3921                 if (ipw_status_codes[i].status == (status & 0xff))
3922                         return ipw_status_codes[i].reason;
3923         return "Unknown status value.";
3924 }
3925
3926 static void inline average_init(struct average *avg)
3927 {
3928         memset(avg, 0, sizeof(*avg));
3929 }
3930
3931 #define DEPTH_RSSI 8
3932 #define DEPTH_NOISE 16
3933 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3934 {
3935         return ((depth-1)*prev_avg +  val)/depth;
3936 }
3937
3938 static void average_add(struct average *avg, s16 val)
3939 {
3940         avg->sum -= avg->entries[avg->pos];
3941         avg->sum += val;
3942         avg->entries[avg->pos++] = val;
3943         if (unlikely(avg->pos == AVG_ENTRIES)) {
3944                 avg->init = 1;
3945                 avg->pos = 0;
3946         }
3947 }
3948
3949 static s16 average_value(struct average *avg)
3950 {
3951         if (!unlikely(avg->init)) {
3952                 if (avg->pos)
3953                         return avg->sum / avg->pos;
3954                 return 0;
3955         }
3956
3957         return avg->sum / AVG_ENTRIES;
3958 }
3959
3960 static void ipw_reset_stats(struct ipw_priv *priv)
3961 {
3962         u32 len = sizeof(u32);
3963
3964         priv->quality = 0;
3965
3966         average_init(&priv->average_missed_beacons);
3967         priv->exp_avg_rssi = -60;
3968         priv->exp_avg_noise = -85 + 0x100;
3969
3970         priv->last_rate = 0;
3971         priv->last_missed_beacons = 0;
3972         priv->last_rx_packets = 0;
3973         priv->last_tx_packets = 0;
3974         priv->last_tx_failures = 0;
3975
3976         /* Firmware managed, reset only when NIC is restarted, so we have to
3977          * normalize on the current value */
3978         ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3979                         &priv->last_rx_err, &len);
3980         ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3981                         &priv->last_tx_failures, &len);
3982
3983         /* Driver managed, reset with each association */
3984         priv->missed_adhoc_beacons = 0;
3985         priv->missed_beacons = 0;
3986         priv->tx_packets = 0;
3987         priv->rx_packets = 0;
3988
3989 }
3990
3991 static u32 ipw_get_max_rate(struct ipw_priv *priv)
3992 {
3993         u32 i = 0x80000000;
3994         u32 mask = priv->rates_mask;
3995         /* If currently associated in B mode, restrict the maximum
3996          * rate match to B rates */
3997         if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3998                 mask &= IEEE80211_CCK_RATES_MASK;
3999
4000         /* TODO: Verify that the rate is supported by the current rates
4001          * list. */
4002
4003         while (i && !(mask & i))
4004                 i >>= 1;
4005         switch (i) {
4006         case IEEE80211_CCK_RATE_1MB_MASK:
4007                 return 1000000;
4008         case IEEE80211_CCK_RATE_2MB_MASK:
4009                 return 2000000;
4010         case IEEE80211_CCK_RATE_5MB_MASK:
4011                 return 5500000;
4012         case IEEE80211_OFDM_RATE_6MB_MASK:
4013                 return 6000000;
4014         case IEEE80211_OFDM_RATE_9MB_MASK:
4015                 return 9000000;
4016         case IEEE80211_CCK_RATE_11MB_MASK:
4017                 return 11000000;
4018         case IEEE80211_OFDM_RATE_12MB_MASK:
4019                 return 12000000;
4020         case IEEE80211_OFDM_RATE_18MB_MASK:
4021                 return 18000000;
4022         case IEEE80211_OFDM_RATE_24MB_MASK:
4023                 return 24000000;
4024         case IEEE80211_OFDM_RATE_36MB_MASK:
4025                 return 36000000;
4026         case IEEE80211_OFDM_RATE_48MB_MASK:
4027                 return 48000000;
4028         case IEEE80211_OFDM_RATE_54MB_MASK:
4029                 return 54000000;
4030         }
4031
4032         if (priv->ieee->mode == IEEE_B)
4033                 return 11000000;
4034         else
4035                 return 54000000;
4036 }
4037
4038 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4039 {
4040         u32 rate, len = sizeof(rate);
4041         int err;
4042
4043         if (!(priv->status & STATUS_ASSOCIATED))
4044                 return 0;
4045
4046         if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4047                 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4048                                       &len);
4049                 if (err) {
4050                         IPW_DEBUG_INFO("failed querying ordinals.\n");
4051                         return 0;
4052                 }
4053         } else
4054                 return ipw_get_max_rate(priv);
4055
4056         switch (rate) {
4057         case IPW_TX_RATE_1MB:
4058                 return 1000000;
4059         case IPW_TX_RATE_2MB:
4060                 return 2000000;
4061         case IPW_TX_RATE_5MB:
4062                 return 5500000;
4063         case IPW_TX_RATE_6MB:
4064                 return 6000000;
4065         case IPW_TX_RATE_9MB:
4066                 return 9000000;
4067         case IPW_TX_RATE_11MB:
4068                 return 11000000;
4069         case IPW_TX_RATE_12MB:
4070                 return 12000000;
4071         case IPW_TX_RATE_18MB:
4072                 return 18000000;
4073         case IPW_TX_RATE_24MB:
4074                 return 24000000;
4075         case IPW_TX_RATE_36MB:
4076                 return 36000000;
4077         case IPW_TX_RATE_48MB:
4078                 return 48000000;
4079         case IPW_TX_RATE_54MB:
4080                 return 54000000;
4081         }
4082
4083         return 0;
4084 }
4085
4086 #define IPW_STATS_INTERVAL (2 * HZ)
4087 static void ipw_gather_stats(struct ipw_priv *priv)
4088 {
4089         u32 rx_err, rx_err_delta, rx_packets_delta;
4090         u32 tx_failures, tx_failures_delta, tx_packets_delta;
4091         u32 missed_beacons_percent, missed_beacons_delta;
4092         u32 quality = 0;
4093         u32 len = sizeof(u32);
4094         s16 rssi;
4095         u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4096             rate_quality;
4097         u32 max_rate;
4098
4099         if (!(priv->status & STATUS_ASSOCIATED)) {
4100                 priv->quality = 0;
4101                 return;
4102         }
4103
4104         /* Update the statistics */
4105         ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4106                         &priv->missed_beacons, &len);
4107         missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4108         priv->last_missed_beacons = priv->missed_beacons;
4109         if (priv->assoc_request.beacon_interval) {
4110                 missed_beacons_percent = missed_beacons_delta *
4111                     (HZ * priv->assoc_request.beacon_interval) /
4112                     (IPW_STATS_INTERVAL * 10);
4113         } else {
4114                 missed_beacons_percent = 0;
4115         }
4116         average_add(&priv->average_missed_beacons, missed_beacons_percent);
4117
4118         ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4119         rx_err_delta = rx_err - priv->last_rx_err;
4120         priv->last_rx_err = rx_err;
4121
4122         ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4123         tx_failures_delta = tx_failures - priv->last_tx_failures;
4124         priv->last_tx_failures = tx_failures;
4125
4126         rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4127         priv->last_rx_packets = priv->rx_packets;
4128
4129         tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4130         priv->last_tx_packets = priv->tx_packets;
4131
4132         /* Calculate quality based on the following:
4133          *
4134          * Missed beacon: 100% = 0, 0% = 70% missed
4135          * Rate: 60% = 1Mbs, 100% = Max
4136          * Rx and Tx errors represent a straight % of total Rx/Tx
4137          * RSSI: 100% = > -50,  0% = < -80
4138          * Rx errors: 100% = 0, 0% = 50% missed
4139          *
4140          * The lowest computed quality is used.
4141          *
4142          */
4143 #define BEACON_THRESHOLD 5
4144         beacon_quality = 100 - missed_beacons_percent;
4145         if (beacon_quality < BEACON_THRESHOLD)
4146                 beacon_quality = 0;
4147         else
4148                 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4149                     (100 - BEACON_THRESHOLD);
4150         IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4151                         beacon_quality, missed_beacons_percent);
4152
4153         priv->last_rate = ipw_get_current_rate(priv);
4154         max_rate = ipw_get_max_rate(priv);
4155         rate_quality = priv->last_rate * 40 / max_rate + 60;
4156         IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4157                         rate_quality, priv->last_rate / 1000000);
4158
4159         if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4160                 rx_quality = 100 - (rx_err_delta * 100) /
4161                     (rx_packets_delta + rx_err_delta);
4162         else
4163                 rx_quality = 100;
4164         IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4165                         rx_quality, rx_err_delta, rx_packets_delta);
4166
4167         if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4168                 tx_quality = 100 - (tx_failures_delta * 100) /
4169                     (tx_packets_delta + tx_failures_delta);
4170         else
4171                 tx_quality = 100;
4172         IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4173                         tx_quality, tx_failures_delta, tx_packets_delta);
4174
4175         rssi = priv->exp_avg_rssi;
4176         signal_quality =
4177             (100 *
4178              (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4179              (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4180              (priv->ieee->perfect_rssi - rssi) *
4181              (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4182               62 * (priv->ieee->perfect_rssi - rssi))) /
4183             ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4184              (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4185         if (signal_quality > 100)
4186                 signal_quality = 100;
4187         else if (signal_quality < 1)
4188                 signal_quality = 0;
4189
4190         IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4191                         signal_quality, rssi);
4192
4193         quality = min(beacon_quality,
4194                       min(rate_quality,
4195                           min(tx_quality, min(rx_quality, signal_quality))));
4196         if (quality == beacon_quality)
4197                 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4198                                 quality);
4199         if (quality == rate_quality)
4200                 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4201                                 quality);
4202         if (quality == tx_quality)
4203                 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4204                                 quality);
4205         if (quality == rx_quality)
4206                 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4207                                 quality);
4208         if (quality == signal_quality)
4209                 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4210                                 quality);
4211
4212         priv->quality = quality;
4213
4214         queue_delayed_work(priv->workqueue, &priv->gather_stats,
4215                            IPW_STATS_INTERVAL);
4216 }
4217
4218 static void ipw_bg_gather_stats(struct work_struct *work)
4219 {
4220         struct ipw_priv *priv =
4221                 container_of(work, struct ipw_priv, gather_stats.work);
4222         mutex_lock(&priv->mutex);
4223         ipw_gather_stats(priv);
4224         mutex_unlock(&priv->mutex);
4225 }
4226
4227 /* Missed beacon behavior:
4228  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4229  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4230  * Above disassociate threshold, give up and stop scanning.
4231  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4232 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4233                                             int missed_count)
4234 {
4235         priv->notif_missed_beacons = missed_count;
4236
4237         if (missed_count > priv->disassociate_threshold &&
4238             priv->status & STATUS_ASSOCIATED) {
4239                 /* If associated and we've hit the missed
4240                  * beacon threshold, disassociate, turn
4241                  * off roaming, and abort any active scans */
4242                 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4243                           IPW_DL_STATE | IPW_DL_ASSOC,
4244                           "Missed beacon: %d - disassociate\n", missed_count);
4245                 priv->status &= ~STATUS_ROAMING;
4246                 if (priv->status & STATUS_SCANNING) {
4247                         IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4248                                   IPW_DL_STATE,
4249                                   "Aborting scan with missed beacon.\n");
4250                         queue_work(priv->workqueue, &priv->abort_scan);
4251                 }
4252
4253                 queue_work(priv->workqueue, &priv->disassociate);
4254                 return;
4255         }
4256
4257         if (priv->status & STATUS_ROAMING) {
4258                 /* If we are currently roaming, then just
4259                  * print a debug statement... */
4260                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4261                           "Missed beacon: %d - roam in progress\n",
4262                           missed_count);
4263                 return;
4264         }
4265
4266         if (roaming &&
4267             (missed_count > priv->roaming_threshold &&
4268              missed_count <= priv->disassociate_threshold)) {
4269                 /* If we are not already roaming, set the ROAM
4270                  * bit in the status and kick off a scan.
4271                  * This can happen several times before we reach
4272                  * disassociate_threshold. */
4273                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4274                           "Missed beacon: %d - initiate "
4275                           "roaming\n", missed_count);
4276                 if (!(priv->status & STATUS_ROAMING)) {
4277                         priv->status |= STATUS_ROAMING;
4278                         if (!(priv->status & STATUS_SCANNING))
4279                                 queue_delayed_work(priv->workqueue,
4280                                                    &priv->request_scan, 0);
4281                 }
4282                 return;
4283         }
4284
4285         if (priv->status & STATUS_SCANNING) {
4286                 /* Stop scan to keep fw from getting
4287                  * stuck (only if we aren't roaming --
4288                  * otherwise we'll never scan more than 2 or 3
4289                  * channels..) */
4290                 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4291                           "Aborting scan with missed beacon.\n");
4292                 queue_work(priv->workqueue, &priv->abort_scan);
4293         }
4294
4295         IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4296 }
4297
4298 /**
4299  * Handle host notification packet.
4300  * Called from interrupt routine
4301  */
4302 static void ipw_rx_notification(struct ipw_priv *priv,
4303                                        struct ipw_rx_notification *notif)
4304 {
4305         notif->size = le16_to_cpu(notif->size);
4306
4307         IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
4308
4309         switch (notif->subtype) {
4310         case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4311                         struct notif_association *assoc = &notif->u.assoc;
4312
4313                         switch (assoc->state) {
4314                         case CMAS_ASSOCIATED:{
4315                                         IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4316                                                   IPW_DL_ASSOC,
4317                                                   "associated: '%s' " MAC_FMT
4318                                                   " \n",
4319                                                   escape_essid(priv->essid,
4320                                                                priv->essid_len),
4321                                                   MAC_ARG(priv->bssid));
4322
4323                                         switch (priv->ieee->iw_mode) {
4324                                         case IW_MODE_INFRA:
4325                                                 memcpy(priv->ieee->bssid,
4326                                                        priv->bssid, ETH_ALEN);
4327                                                 break;
4328
4329                                         case IW_MODE_ADHOC:
4330                                                 memcpy(priv->ieee->bssid,
4331                                                        priv->bssid, ETH_ALEN);
4332
4333                                                 /* clear out the station table */
4334                                                 priv->num_stations = 0;
4335
4336                                                 IPW_DEBUG_ASSOC
4337                                                     ("queueing adhoc check\n");
4338                                                 queue_delayed_work(priv->
4339                                                                    workqueue,
4340                                                                    &priv->
4341                                                                    adhoc_check,
4342                                                                    priv->
4343                                                                    assoc_request.
4344                                                                    beacon_interval);
4345                                                 break;
4346                                         }
4347
4348                                         priv->status &= ~STATUS_ASSOCIATING;
4349                                         priv->status |= STATUS_ASSOCIATED;
4350                                         queue_work(priv->workqueue,
4351                                                    &priv->system_config);
4352
4353 #ifdef CONFIG_IPW2200_QOS
4354 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4355                          le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl))
4356                                         if ((priv->status & STATUS_AUTH) &&
4357                                             (IPW_GET_PACKET_STYPE(&notif->u.raw)
4358                                              == IEEE80211_STYPE_ASSOC_RESP)) {
4359                                                 if ((sizeof
4360                                                      (struct
4361                                                       ieee80211_assoc_response)
4362                                                      <= notif->size)
4363                                                     && (notif->size <= 2314)) {
4364                                                         struct
4365                                                         ieee80211_rx_stats
4366                                                             stats = {
4367                                                                 .len =
4368                                                                     notif->
4369                                                                     size - 1,
4370                                                         };
4371
4372                                                         IPW_DEBUG_QOS
4373                                                             ("QoS Associate "
4374                                                              "size %d\n",
4375                                                              notif->size);
4376                                                         ieee80211_rx_mgt(priv->
4377                                                                          ieee,
4378                                                                          (struct
4379                                                                           ieee80211_hdr_4addr
4380                                                                           *)
4381                                                                          &notif->u.raw, &stats);
4382                                                 }
4383                                         }
4384 #endif
4385
4386                                         schedule_work(&priv->link_up);
4387
4388                                         break;
4389                                 }
4390
4391                         case CMAS_AUTHENTICATED:{
4392                                         if (priv->
4393                                             status & (STATUS_ASSOCIATED |
4394                                                       STATUS_AUTH)) {
4395                                                 struct notif_authenticate *auth
4396                                                     = &notif->u.auth;
4397                                                 IPW_DEBUG(IPW_DL_NOTIF |
4398                                                           IPW_DL_STATE |
4399                                                           IPW_DL_ASSOC,
4400                                                           "deauthenticated: '%s' "
4401                                                           MAC_FMT
4402                                                           ": (0x%04X) - %s \n",
4403                                                           escape_essid(priv->
4404                                                                        essid,
4405                                                                        priv->
4406                                                                        essid_len),
4407                                                           MAC_ARG(priv->bssid),
4408                                                           ntohs(auth->status),
4409                                                           ipw_get_status_code
4410                                                           (ntohs
4411                                                            (auth->status)));
4412
4413                                                 priv->status &=
4414                                                     ~(STATUS_ASSOCIATING |
4415                                                       STATUS_AUTH |
4416                                                       STATUS_ASSOCIATED);
4417
4418                                                 schedule_work(&priv->link_down);
4419                                                 break;
4420                                         }
4421
4422                                         IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4423                                                   IPW_DL_ASSOC,
4424                                                   "authenticated: '%s' " MAC_FMT
4425                                                   "\n",
4426                                                   escape_essid(priv->essid,
4427                                                                priv->essid_len),
4428                                                   MAC_ARG(priv->bssid));
4429                                         break;
4430                                 }
4431
4432                         case CMAS_INIT:{
4433                                         if (priv->status & STATUS_AUTH) {
4434                                                 struct
4435                                                     ieee80211_assoc_response
4436                                                 *resp;
4437                                                 resp =
4438                                                     (struct
4439                                                      ieee80211_assoc_response
4440                                                      *)&notif->u.raw;
4441                                                 IPW_DEBUG(IPW_DL_NOTIF |
4442                                                           IPW_DL_STATE |
4443                                                           IPW_DL_ASSOC,
4444                                                           "association failed (0x%04X): %s\n",
4445                                                           ntohs(resp->status),
4446                                                           ipw_get_status_code
4447                                                           (ntohs
4448                                                            (resp->status)));
4449                                         }
4450
4451                                         IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4452                                                   IPW_DL_ASSOC,
4453                                                   "disassociated: '%s' " MAC_FMT
4454                                                   " \n",
4455                                                   escape_essid(priv->essid,
4456                                                                priv->essid_len),
4457                                                   MAC_ARG(priv->bssid));
4458
4459                                         priv->status &=
4460                                             ~(STATUS_DISASSOCIATING |
4461                                               STATUS_ASSOCIATING |
4462                                               STATUS_ASSOCIATED | STATUS_AUTH);
4463                                         if (priv->assoc_network
4464                                             && (priv->assoc_network->
4465                                                 capability &
4466                                                 WLAN_CAPABILITY_IBSS))
4467                                                 ipw_remove_current_network
4468                                                     (priv);
4469
4470                                         schedule_work(&priv->link_down);
4471
4472                                         break;
4473                                 }
4474
4475                         case CMAS_RX_ASSOC_RESP:
4476                                 break;
4477
4478                         default:
4479                                 IPW_ERROR("assoc: unknown (%d)\n",
4480                                           assoc->state);
4481                                 break;
4482                         }
4483
4484                         break;
4485                 }
4486
4487         case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4488                         struct notif_authenticate *auth = &notif->u.auth;
4489                         switch (auth->state) {
4490                         case CMAS_AUTHENTICATED:
4491                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4492                                           "authenticated: '%s' " MAC_FMT " \n",
4493                                           escape_essid(priv->essid,
4494                                                        priv->essid_len),
4495                                           MAC_ARG(priv->bssid));
4496                                 priv->status |= STATUS_AUTH;
4497                                 break;
4498
4499                         case CMAS_INIT:
4500                                 if (priv->status & STATUS_AUTH) {
4501                                         IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4502                                                   IPW_DL_ASSOC,
4503                                                   "authentication failed (0x%04X): %s\n",
4504                                                   ntohs(auth->status),
4505                                                   ipw_get_status_code(ntohs
4506                                                                       (auth->
4507                                                                        status)));
4508                                 }
4509                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4510                                           IPW_DL_ASSOC,
4511                                           "deauthenticated: '%s' " MAC_FMT "\n",
4512                                           escape_essid(priv->essid,
4513                                                        priv->essid_len),
4514                                           MAC_ARG(priv->bssid));
4515
4516                                 priv->status &= ~(STATUS_ASSOCIATING |
4517                                                   STATUS_AUTH |
4518                                                   STATUS_ASSOCIATED);
4519
4520                                 schedule_work(&priv->link_down);
4521                                 break;
4522
4523                         case CMAS_TX_AUTH_SEQ_1:
4524                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4525                                           IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4526                                 break;
4527                         case CMAS_RX_AUTH_SEQ_2:
4528                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4529                                           IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4530                                 break;
4531                         case CMAS_AUTH_SEQ_1_PASS:
4532                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4533                                           IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4534                                 break;
4535                         case CMAS_AUTH_SEQ_1_FAIL:
4536                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4537                                           IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4538                                 break;
4539                         case CMAS_TX_AUTH_SEQ_3:
4540                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4541                                           IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4542                                 break;
4543                         case CMAS_RX_AUTH_SEQ_4:
4544                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4545                                           IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4546                                 break;
4547                         case CMAS_AUTH_SEQ_2_PASS:
4548                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4549                                           IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4550                                 break;
4551                         case CMAS_AUTH_SEQ_2_FAIL:
4552                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4553                                           IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4554                                 break;
4555                         case CMAS_TX_ASSOC:
4556                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4557                                           IPW_DL_ASSOC, "TX_ASSOC\n");
4558                                 break;
4559                         case CMAS_RX_ASSOC_RESP:
4560                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4561                                           IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4562
4563                                 break;
4564                         case CMAS_ASSOCIATED:
4565                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4566                                           IPW_DL_ASSOC, "ASSOCIATED\n");
4567                                 break;
4568                         default:
4569                                 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4570                                                 auth->state);
4571                                 break;
4572                         }
4573                         break;
4574                 }
4575
4576         case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4577                         struct notif_channel_result *x =
4578                             &notif->u.channel_result;
4579
4580                         if (notif->size == sizeof(*x)) {
4581                                 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4582                                                x->channel_num);
4583                         } else {
4584                                 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4585                                                "(should be %zd)\n",
4586                                                notif->size, sizeof(*x));
4587                         }
4588                         break;
4589                 }
4590
4591         case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4592                         struct notif_scan_complete *x = &notif->u.scan_complete;
4593                         if (notif->size == sizeof(*x)) {
4594                                 IPW_DEBUG_SCAN
4595                                     ("Scan completed: type %d, %d channels, "
4596                                      "%d status\n", x->scan_type,
4597                                      x->num_channels, x->status);
4598                         } else {
4599                                 IPW_ERROR("Scan completed of wrong size %d "
4600                                           "(should be %zd)\n",
4601                                           notif->size, sizeof(*x));
4602                         }
4603
4604                         priv->status &=
4605                             ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4606
4607                         wake_up_interruptible(&priv->wait_state);
4608                         cancel_delayed_work(&priv->scan_check);
4609
4610                         if (priv->status & STATUS_EXIT_PENDING)
4611                                 break;
4612
4613                         priv->ieee->scans++;
4614
4615 #ifdef CONFIG_IPW2200_MONITOR
4616                         if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4617                                 priv->status |= STATUS_SCAN_FORCED;
4618                                 queue_delayed_work(priv->workqueue,
4619                                                    &priv->request_scan, 0);
4620                                 break;
4621                         }
4622                         priv->status &= ~STATUS_SCAN_FORCED;
4623 #endif                          /* CONFIG_IPW2200_MONITOR */
4624
4625                         if (!(priv->status & (STATUS_ASSOCIATED |
4626                                               STATUS_ASSOCIATING |
4627                                               STATUS_ROAMING |
4628                                               STATUS_DISASSOCIATING)))
4629                                 queue_work(priv->workqueue, &priv->associate);
4630                         else if (priv->status & STATUS_ROAMING) {
4631                                 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4632                                         /* If a scan completed and we are in roam mode, then
4633                                          * the scan that completed was the one requested as a
4634                                          * result of entering roam... so, schedule the
4635                                          * roam work */
4636                                         queue_work(priv->workqueue,
4637                                                    &priv->roam);
4638                                 else
4639                                         /* Don't schedule if we aborted the scan */
4640                                         priv->status &= ~STATUS_ROAMING;
4641                         } else if (priv->status & STATUS_SCAN_PENDING)
4642                                 queue_delayed_work(priv->workqueue,
4643                                                    &priv->request_scan, 0);
4644                         else if (priv->config & CFG_BACKGROUND_SCAN
4645                                  && priv->status & STATUS_ASSOCIATED)
4646                                 queue_delayed_work(priv->workqueue,
4647                                                    &priv->request_scan, HZ);
4648
4649                         /* Send an empty event to user space.
4650                          * We don't send the received data on the event because
4651                          * it would require us to do complex transcoding, and
4652                          * we want to minimise the work done in the irq handler
4653                          * Use a request to extract the data.
4654                          * Also, we generate this even for any scan, regardless
4655                          * on how the scan was initiated. User space can just
4656                          * sync on periodic scan to get fresh data...
4657                          * Jean II */
4658                         if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) {
4659                                 union iwreq_data wrqu;
4660
4661                                 wrqu.data.length = 0;
4662                                 wrqu.data.flags = 0;
4663                                 wireless_send_event(priv->net_dev, SIOCGIWSCAN,
4664                                                     &wrqu, NULL);
4665                         }
4666                         break;
4667                 }
4668
4669         case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4670                         struct notif_frag_length *x = &notif->u.frag_len;
4671
4672                         if (notif->size == sizeof(*x))
4673                                 IPW_ERROR("Frag length: %d\n",
4674                                           le16_to_cpu(x->frag_length));
4675                         else
4676                                 IPW_ERROR("Frag length of wrong size %d "
4677                                           "(should be %zd)\n",
4678                                           notif->size, sizeof(*x));
4679                         break;
4680                 }
4681
4682         case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4683                         struct notif_link_deterioration *x =
4684                             &notif->u.link_deterioration;
4685
4686                         if (notif->size == sizeof(*x)) {
4687                                 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4688                                         "link deterioration: type %d, cnt %d\n",
4689                                         x->silence_notification_type,
4690                                         x->silence_count);
4691                                 memcpy(&priv->last_link_deterioration, x,
4692                                        sizeof(*x));
4693                         } else {
4694                                 IPW_ERROR("Link Deterioration of wrong size %d "
4695                                           "(should be %zd)\n",
4696                                           notif->size, sizeof(*x));
4697                         }
4698                         break;
4699                 }
4700
4701         case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4702                         IPW_ERROR("Dino config\n");
4703                         if (priv->hcmd
4704                             && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4705                                 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4706
4707                         break;
4708                 }
4709
4710         case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4711                         struct notif_beacon_state *x = &notif->u.beacon_state;
4712                         if (notif->size != sizeof(*x)) {
4713                                 IPW_ERROR
4714                                     ("Beacon state of wrong size %d (should "
4715                                      "be %zd)\n", notif->size, sizeof(*x));
4716                                 break;
4717                         }
4718
4719                         if (le32_to_cpu(x->state) ==
4720                             HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4721                                 ipw_handle_missed_beacon(priv,
4722                                                          le32_to_cpu(x->
4723                                                                      number));
4724
4725                         break;
4726                 }
4727
4728         case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4729                         struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4730                         if (notif->size == sizeof(*x)) {
4731                                 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4732                                           "0x%02x station %d\n",
4733                                           x->key_state, x->security_type,
4734                                           x->station_index);
4735                                 break;
4736                         }
4737
4738                         IPW_ERROR
4739                             ("TGi Tx Key of wrong size %d (should be %zd)\n",
4740                              notif->size, sizeof(*x));
4741                         break;
4742                 }
4743
4744         case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4745                         struct notif_calibration *x = &notif->u.calibration;
4746
4747                         if (notif->size == sizeof(*x)) {
4748                                 memcpy(&priv->calib, x, sizeof(*x));
4749                                 IPW_DEBUG_INFO("TODO: Calibration\n");
4750                                 break;
4751                         }
4752
4753                         IPW_ERROR
4754                             ("Calibration of wrong size %d (should be %zd)\n",
4755                              notif->size, sizeof(*x));
4756                         break;
4757                 }
4758
4759         case HOST_NOTIFICATION_NOISE_STATS:{
4760                         if (notif->size == sizeof(u32)) {
4761                                 priv->exp_avg_noise =
4762                                     exponential_average(priv->exp_avg_noise,
4763                                     (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4764                                     DEPTH_NOISE);
4765                                 break;
4766                         }
4767
4768                         IPW_ERROR
4769                             ("Noise stat is wrong size %d (should be %zd)\n",
4770                              notif->size, sizeof(u32));
4771                         break;
4772                 }
4773
4774         default:
4775                 IPW_DEBUG_NOTIF("Unknown notification: "
4776                                 "subtype=%d,flags=0x%2x,size=%d\n",
4777                                 notif->subtype, notif->flags, notif->size);
4778         }
4779 }
4780
4781 /**
4782  * Destroys all DMA structures and initialise them again
4783  *
4784  * @param priv
4785  * @return error code
4786  */
4787 static int ipw_queue_reset(struct ipw_priv *priv)
4788 {
4789         int rc = 0;
4790         /** @todo customize queue sizes */
4791         int nTx = 64, nTxCmd = 8;
4792         ipw_tx_queue_free(priv);
4793         /* Tx CMD queue */
4794         rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4795                                IPW_TX_CMD_QUEUE_READ_INDEX,
4796                                IPW_TX_CMD_QUEUE_WRITE_INDEX,
4797                                IPW_TX_CMD_QUEUE_BD_BASE,
4798                                IPW_TX_CMD_QUEUE_BD_SIZE);
4799         if (rc) {
4800                 IPW_ERROR("Tx Cmd queue init failed\n");
4801                 goto error;
4802         }
4803         /* Tx queue(s) */
4804         rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4805                                IPW_TX_QUEUE_0_READ_INDEX,
4806                                IPW_TX_QUEUE_0_WRITE_INDEX,
4807                                IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4808         if (rc) {
4809                 IPW_ERROR("Tx 0 queue init failed\n");
4810                 goto error;
4811         }
4812         rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4813                                IPW_TX_QUEUE_1_READ_INDEX,
4814                                IPW_TX_QUEUE_1_WRITE_INDEX,
4815                                IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4816         if (rc) {
4817                 IPW_ERROR("Tx 1 queue init failed\n");
4818                 goto error;
4819         }
4820         rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4821                                IPW_TX_QUEUE_2_READ_INDEX,
4822                                IPW_TX_QUEUE_2_WRITE_INDEX,
4823                                IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4824         if (rc) {
4825                 IPW_ERROR("Tx 2 queue init failed\n");
4826                 goto error;
4827         }
4828         rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4829                                IPW_TX_QUEUE_3_READ_INDEX,
4830                                IPW_TX_QUEUE_3_WRITE_INDEX,
4831                                IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4832         if (rc) {
4833                 IPW_ERROR("Tx 3 queue init failed\n");
4834                 goto error;
4835         }
4836         /* statistics */
4837         priv->rx_bufs_min = 0;
4838         priv->rx_pend_max = 0;
4839         return rc;
4840
4841       error:
4842         ipw_tx_queue_free(priv);
4843         return rc;
4844 }
4845
4846 /**
4847  * Reclaim Tx queue entries no more used by NIC.
4848  *
4849  * When FW adwances 'R' index, all entries between old and
4850  * new 'R' index need to be reclaimed. As result, some free space
4851  * forms. If there is enough free space (> low mark), wake Tx queue.
4852  *
4853  * @note Need to protect against garbage in 'R' index
4854  * @param priv
4855  * @param txq
4856  * @param qindex
4857  * @return Number of used entries remains in the queue
4858  */
4859 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4860                                 struct clx2_tx_queue *txq, int qindex)
4861 {
4862         u32 hw_tail;
4863         int used;
4864         struct clx2_queue *q = &txq->q;
4865
4866         hw_tail = ipw_read32(priv, q->reg_r);
4867         if (hw_tail >= q->n_bd) {
4868                 IPW_ERROR
4869                     ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4870                      hw_tail, q->n_bd);
4871                 goto done;
4872         }
4873         for (; q->last_used != hw_tail;
4874              q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4875                 ipw_queue_tx_free_tfd(priv, txq);
4876                 priv->tx_packets++;
4877         }
4878       done:
4879         if ((ipw_queue_space(q) > q->low_mark) &&
4880             (qindex >= 0) &&
4881             (priv->status & STATUS_ASSOCIATED) && netif_running(priv->net_dev))
4882                 netif_wake_queue(priv->net_dev);
4883         used = q->first_empty - q->last_used;
4884         if (used < 0)
4885                 used += q->n_bd;
4886
4887         return used;
4888 }
4889
4890 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4891                              int len, int sync)
4892 {
4893         struct clx2_tx_queue *txq = &priv->txq_cmd;
4894         struct clx2_queue *q = &txq->q;
4895         struct tfd_frame *tfd;
4896
4897         if (ipw_queue_space(q) < (sync ? 1 : 2)) {
4898                 IPW_ERROR("No space for Tx\n");
4899                 return -EBUSY;
4900         }
4901
4902         tfd = &txq->bd[q->first_empty];
4903         txq->txb[q->first_empty] = NULL;
4904
4905         memset(tfd, 0, sizeof(*tfd));
4906         tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4907         tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4908         priv->hcmd_seq++;
4909         tfd->u.cmd.index = hcmd;
4910         tfd->u.cmd.length = len;
4911         memcpy(tfd->u.cmd.payload, buf, len);
4912         q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
4913         ipw_write32(priv, q->reg_w, q->first_empty);
4914         _ipw_read32(priv, 0x90);
4915
4916         return 0;
4917 }
4918
4919 /*
4920  * Rx theory of operation
4921  *
4922  * The host allocates 32 DMA target addresses and passes the host address
4923  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4924  * 0 to 31
4925  *
4926  * Rx Queue Indexes
4927  * The host/firmware share two index registers for managing the Rx buffers.
4928  *
4929  * The READ index maps to the first position that the firmware may be writing
4930  * to -- the driver can read up to (but not including) this position and get
4931  * good data.
4932  * The READ index is managed by the firmware once the card is enabled.
4933  *
4934  * The WRITE index maps to the last position the driver has read from -- the
4935  * position preceding WRITE is the last slot the firmware can place a packet.
4936  *
4937  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4938  * WRITE = READ.
4939  *
4940  * During initialization the host sets up the READ queue position to the first
4941  * INDEX position, and WRITE to the last (READ - 1 wrapped)
4942  *
4943  * When the firmware places a packet in a buffer it will advance the READ index
4944  * and fire the RX interrupt.  The driver can then query the READ index and
4945  * process as many packets as possible, moving the WRITE index forward as it
4946  * resets the Rx queue buffers with new memory.
4947  *
4948  * The management in the driver is as follows:
4949  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
4950  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4951  *   to replensish the ipw->rxq->rx_free.
4952  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
4953  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
4954  *   'processed' and 'read' driver indexes as well)
4955  * + A received packet is processed and handed to the kernel network stack,
4956  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
4957  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
4958  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
4959  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
4960  *   were enough free buffers and RX_STALLED is set it is cleared.
4961  *
4962  *
4963  * Driver sequence:
4964  *
4965  * ipw_rx_queue_alloc()       Allocates rx_free
4966  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
4967  *                            ipw_rx_queue_restock
4968  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
4969  *                            queue, updates firmware pointers, and updates
4970  *                            the WRITE index.  If insufficient rx_free buffers
4971  *                            are available, schedules ipw_rx_queue_replenish
4972  *
4973  * -- enable interrupts --
4974  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
4975  *                            READ INDEX, detaching the SKB from the pool.
4976  *                            Moves the packet buffer from queue to rx_used.
4977  *                            Calls ipw_rx_queue_restock to refill any empty
4978  *                            slots.
4979  * ...
4980  *
4981  */
4982
4983 /*
4984  * If there are slots in the RX queue that  need to be restocked,
4985  * and we have free pre-allocated buffers, fill the ranks as much
4986  * as we can pulling from rx_free.
4987  *
4988  * This moves the 'write' index forward to catch up with 'processed', and
4989  * also updates the memory address in the firmware to reference the new
4990  * target buffer.
4991  */
4992 static void ipw_rx_queue_restock(struct ipw_priv *priv)
4993 {
4994         struct ipw_rx_queue *rxq = priv->rxq;
4995         struct list_head *element;
4996         struct ipw_rx_mem_buffer *rxb;
4997         unsigned long flags;
4998         int write;
4999
5000         spin_lock_irqsave(&rxq->lock, flags);
5001         write = rxq->write;
5002         while ((rxq->write != rxq->processed) && (rxq->free_count)) {
5003                 element = rxq->rx_free.next;
5004                 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5005                 list_del(element);
5006
5007                 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5008                             rxb->dma_addr);
5009                 rxq->queue[rxq->write] = rxb;
5010                 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5011                 rxq->free_count--;
5012         }
5013         spin_unlock_irqrestore(&rxq->lock, flags);
5014
5015         /* If the pre-allocated buffer pool is dropping low, schedule to
5016          * refill it */
5017         if (rxq->free_count <= RX_LOW_WATERMARK)
5018                 queue_work(priv->workqueue, &priv->rx_replenish);
5019
5020         /* If we've added more space for the firmware to place data, tell it */
5021         if (write != rxq->write)
5022                 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5023 }
5024
5025 /*
5026  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5027  * Also restock the Rx queue via ipw_rx_queue_restock.
5028  *
5029  * This is called as a scheduled work item (except for during intialization)
5030  */
5031 static void ipw_rx_queue_replenish(void *data)
5032 {
5033         struct ipw_priv *priv = data;
5034         struct ipw_rx_queue *rxq = priv->rxq;
5035         struct list_head *element;
5036         struct ipw_rx_mem_buffer *rxb;
5037         unsigned long flags;
5038
5039         spin_lock_irqsave(&rxq->lock, flags);
5040         while (!list_empty(&rxq->rx_used)) {
5041                 element = rxq->rx_used.next;
5042                 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5043                 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5044                 if (!rxb->skb) {
5045                         printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5046                                priv->net_dev->name);
5047                         /* We don't reschedule replenish work here -- we will
5048                          * call the restock method and if it still needs
5049                          * more buffers it will schedule replenish */
5050                         break;
5051                 }
5052                 list_del(element);
5053
5054                 rxb->dma_addr =
5055                     pci_map_single(priv->pci_dev, rxb->skb->data,
5056                                    IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5057
5058                 list_add_tail(&rxb->list, &rxq->rx_free);
5059                 rxq->free_count++;
5060         }
5061         spin_unlock_irqrestore(&rxq->lock, flags);
5062
5063         ipw_rx_queue_restock(priv);
5064 }
5065
5066 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5067 {
5068         struct ipw_priv *priv =
5069                 container_of(work, struct ipw_priv, rx_replenish);
5070         mutex_lock(&priv->mutex);
5071         ipw_rx_queue_replenish(priv);
5072         mutex_unlock(&priv->mutex);
5073 }
5074
5075 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5076  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5077  * This free routine walks the list of POOL entries and if SKB is set to
5078  * non NULL it is unmapped and freed
5079  */
5080 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5081 {
5082         int i;
5083
5084         if (!rxq)
5085                 return;
5086
5087         for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5088                 if (rxq->pool[i].skb != NULL) {
5089                         pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5090                                          IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5091                         dev_kfree_skb(rxq->pool[i].skb);
5092                 }
5093         }
5094
5095         kfree(rxq);
5096 }
5097
5098 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5099 {
5100         struct ipw_rx_queue *rxq;
5101         int i;
5102
5103         rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5104         if (unlikely(!rxq)) {
5105                 IPW_ERROR("memory allocation failed\n");
5106                 return NULL;
5107         }
5108         spin_lock_init(&rxq->lock);
5109         INIT_LIST_HEAD(&rxq->rx_free);
5110         INIT_LIST_HEAD(&rxq->rx_used);
5111
5112         /* Fill the rx_used queue with _all_ of the Rx buffers */
5113         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5114                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5115
5116         /* Set us so that we have processed and used all buffers, but have
5117          * not restocked the Rx queue with fresh buffers */
5118         rxq->read = rxq->write = 0;
5119         rxq->processed = RX_QUEUE_SIZE - 1;
5120         rxq->free_count = 0;
5121
5122         return rxq;
5123 }
5124
5125 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5126 {
5127         rate &= ~IEEE80211_BASIC_RATE_MASK;
5128         if (ieee_mode == IEEE_A) {
5129                 switch (rate) {
5130                 case IEEE80211_OFDM_RATE_6MB:
5131                         return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5132                             1 : 0;
5133                 case IEEE80211_OFDM_RATE_9MB:
5134                         return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5135                             1 : 0;
5136                 case IEEE80211_OFDM_RATE_12MB:
5137                         return priv->
5138                             rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5139                 case IEEE80211_OFDM_RATE_18MB:
5140                         return priv->
5141                             rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5142                 case IEEE80211_OFDM_RATE_24MB:
5143                         return priv->
5144                             rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5145                 case IEEE80211_OFDM_RATE_36MB:
5146                         return priv->
5147                             rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5148                 case IEEE80211_OFDM_RATE_48MB:
5149                         return priv->
5150                             rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5151                 case IEEE80211_OFDM_RATE_54MB:
5152                         return priv->
5153                             rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5154                 default:
5155                         return 0;
5156                 }
5157         }
5158
5159         /* B and G mixed */
5160         switch (rate) {
5161         case IEEE80211_CCK_RATE_1MB:
5162                 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5163         case IEEE80211_CCK_RATE_2MB:
5164                 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5165         case IEEE80211_CCK_RATE_5MB:
5166                 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5167         case IEEE80211_CCK_RATE_11MB:
5168                 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5169         }
5170
5171         /* If we are limited to B modulations, bail at this point */
5172         if (ieee_mode == IEEE_B)
5173                 return 0;
5174
5175         /* G */
5176         switch (rate) {
5177         case IEEE80211_OFDM_RATE_6MB:
5178                 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5179         case IEEE80211_OFDM_RATE_9MB:
5180                 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5181         case IEEE80211_OFDM_RATE_12MB:
5182                 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5183         case IEEE80211_OFDM_RATE_18MB:
5184                 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5185         case IEEE80211_OFDM_RATE_24MB:
5186                 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5187         case IEEE80211_OFDM_RATE_36MB:
5188                 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5189         case IEEE80211_OFDM_RATE_48MB:
5190                 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5191         case IEEE80211_OFDM_RATE_54MB:
5192                 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5193         }
5194
5195         return 0;
5196 }
5197
5198 static int ipw_compatible_rates(struct ipw_priv *priv,
5199                                 const struct ieee80211_network *network,
5200                                 struct ipw_supported_rates *rates)
5201 {
5202         int num_rates, i;
5203
5204         memset(rates, 0, sizeof(*rates));
5205         num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5206         rates->num_rates = 0;
5207         for (i = 0; i < num_rates; i++) {
5208                 if (!ipw_is_rate_in_mask(priv, network->mode,
5209                                          network->rates[i])) {
5210
5211                         if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5212                                 IPW_DEBUG_SCAN("Adding masked mandatory "
5213                                                "rate %02X\n",
5214                                                network->rates[i]);
5215                                 rates->supported_rates[rates->num_rates++] =
5216                                     network->rates[i];
5217                                 continue;
5218                         }
5219
5220                         IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5221                                        network->rates[i], priv->rates_mask);
5222                         continue;
5223                 }
5224
5225                 rates->supported_rates[rates->num_rates++] = network->rates[i];
5226         }
5227
5228         num_rates = min(network->rates_ex_len,
5229                         (u8) (IPW_MAX_RATES - num_rates));
5230         for (i = 0; i < num_rates; i++) {
5231                 if (!ipw_is_rate_in_mask(priv, network->mode,
5232                                          network->rates_ex[i])) {
5233                         if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5234                                 IPW_DEBUG_SCAN("Adding masked mandatory "
5235                                                "rate %02X\n",
5236                                                network->rates_ex[i]);
5237                                 rates->supported_rates[rates->num_rates++] =
5238                                     network->rates[i];
5239                                 continue;
5240                         }
5241
5242                         IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5243                                        network->rates_ex[i], priv->rates_mask);
5244                         continue;
5245                 }
5246
5247                 rates->supported_rates[rates->num_rates++] =
5248                     network->rates_ex[i];
5249         }
5250
5251         return 1;
5252 }
5253
5254 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5255                                   const struct ipw_supported_rates *src)
5256 {
5257         u8 i;
5258         for (i = 0; i < src->num_rates; i++)
5259                 dest->supported_rates[i] = src->supported_rates[i];
5260         dest->num_rates = src->num_rates;
5261 }
5262
5263 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5264  * mask should ever be used -- right now all callers to add the scan rates are
5265  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5266 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5267                                    u8 modulation, u32 rate_mask)
5268 {
5269         u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5270             IEEE80211_BASIC_RATE_MASK : 0;
5271
5272         if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5273                 rates->supported_rates[rates->num_rates++] =
5274                     IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5275
5276         if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5277                 rates->supported_rates[rates->num_rates++] =
5278                     IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5279
5280         if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5281                 rates->supported_rates[rates->num_rates++] = basic_mask |
5282                     IEEE80211_CCK_RATE_5MB;
5283
5284         if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5285                 rates->supported_rates[rates->num_rates++] = basic_mask |
5286                     IEEE80211_CCK_RATE_11MB;
5287 }
5288
5289 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5290                                     u8 modulation, u32 rate_mask)
5291 {
5292         u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5293             IEEE80211_BASIC_RATE_MASK : 0;
5294
5295         if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5296                 rates->supported_rates[rates->num_rates++] = basic_mask |
5297                     IEEE80211_OFDM_RATE_6MB;
5298
5299         if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5300                 rates->supported_rates[rates->num_rates++] =
5301                     IEEE80211_OFDM_RATE_9MB;
5302
5303         if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5304                 rates->supported_rates[rates->num_rates++] = basic_mask |
5305                     IEEE80211_OFDM_RATE_12MB;
5306
5307         if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5308                 rates->supported_rates[rates->num_rates++] =
5309                     IEEE80211_OFDM_RATE_18MB;
5310
5311         if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5312                 rates->supported_rates[rates->num_rates++] = basic_mask |
5313                     IEEE80211_OFDM_RATE_24MB;
5314
5315         if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5316                 rates->supported_rates[rates->num_rates++] =
5317                     IEEE80211_OFDM_RATE_36MB;
5318
5319         if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5320                 rates->supported_rates[rates->num_rates++] =
5321                     IEEE80211_OFDM_RATE_48MB;
5322
5323         if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5324                 rates->supported_rates[rates->num_rates++] =
5325                     IEEE80211_OFDM_RATE_54MB;
5326 }
5327
5328 struct ipw_network_match {
5329         struct ieee80211_network *network;
5330         struct ipw_supported_rates rates;
5331 };
5332
5333 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5334                                   struct ipw_network_match *match,
5335                                   struct ieee80211_network *network,
5336                                   int roaming)
5337 {
5338         struct ipw_supported_rates rates;
5339
5340         /* Verify that this network's capability is compatible with the
5341          * current mode (AdHoc or Infrastructure) */
5342         if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5343              !(network->capability & WLAN_CAPABILITY_IBSS))) {
5344                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded due to "
5345                                 "capability mismatch.\n",
5346                                 escape_essid(network->ssid, network->ssid_len),
5347                                 MAC_ARG(network->bssid));
5348                 return 0;
5349         }
5350
5351         /* If we do not have an ESSID for this AP, we can not associate with
5352          * it */
5353         if (network->flags & NETWORK_EMPTY_ESSID) {
5354                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5355                                 "because of hidden ESSID.\n",
5356                                 escape_essid(network->ssid, network->ssid_len),
5357                                 MAC_ARG(network->bssid));
5358                 return 0;
5359         }
5360
5361         if (unlikely(roaming)) {
5362                 /* If we are roaming, then ensure check if this is a valid
5363                  * network to try and roam to */
5364                 if ((network->ssid_len != match->network->ssid_len) ||
5365                     memcmp(network->ssid, match->network->ssid,
5366                            network->ssid_len)) {
5367                         IPW_DEBUG_MERGE("Netowrk '%s (" MAC_FMT ")' excluded "
5368                                         "because of non-network ESSID.\n",
5369                                         escape_essid(network->ssid,
5370                                                      network->ssid_len),
5371                                         MAC_ARG(network->bssid));
5372                         return 0;
5373                 }
5374         } else {
5375                 /* If an ESSID has been configured then compare the broadcast
5376                  * ESSID to ours */
5377                 if ((priv->config & CFG_STATIC_ESSID) &&
5378                     ((network->ssid_len != priv->essid_len) ||
5379                      memcmp(network->ssid, priv->essid,
5380                             min(network->ssid_len, priv->essid_len)))) {
5381                         char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5382
5383                         strncpy(escaped,
5384                                 escape_essid(network->ssid, network->ssid_len),
5385                                 sizeof(escaped));
5386                         IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5387                                         "because of ESSID mismatch: '%s'.\n",
5388                                         escaped, MAC_ARG(network->bssid),
5389                                         escape_essid(priv->essid,
5390                                                      priv->essid_len));
5391                         return 0;
5392                 }
5393         }
5394
5395         /* If the old network rate is better than this one, don't bother
5396          * testing everything else. */
5397
5398         if (network->time_stamp[0] < match->network->time_stamp[0]) {
5399                 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5400                                 "current network.\n",
5401                                 escape_essid(match->network->ssid,
5402                                              match->network->ssid_len));
5403                 return 0;
5404         } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5405                 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5406                                 "current network.\n",
5407                                 escape_essid(match->network->ssid,
5408                                              match->network->ssid_len));
5409                 return 0;
5410         }
5411
5412         /* Now go through and see if the requested network is valid... */
5413         if (priv->ieee->scan_age != 0 &&
5414             time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5415                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5416                                 "because of age: %ums.\n",
5417                                 escape_essid(network->ssid, network->ssid_len),
5418                                 MAC_ARG(network->bssid),
5419                                 jiffies_to_msecs(jiffies -
5420                                                  network->last_scanned));
5421                 return 0;
5422         }
5423
5424         if ((priv->config & CFG_STATIC_CHANNEL) &&
5425             (network->channel != priv->channel)) {
5426                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5427                                 "because of channel mismatch: %d != %d.\n",
5428                                 escape_essid(network->ssid, network->ssid_len),
5429                                 MAC_ARG(network->bssid),
5430                                 network->channel, priv->channel);
5431                 return 0;
5432         }
5433
5434         /* Verify privacy compatability */
5435         if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5436             ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5437                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5438                                 "because of privacy mismatch: %s != %s.\n",
5439                                 escape_essid(network->ssid, network->ssid_len),
5440                                 MAC_ARG(network->bssid),
5441                                 priv->
5442                                 capability & CAP_PRIVACY_ON ? "on" : "off",
5443                                 network->
5444                                 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5445                                 "off");
5446                 return 0;
5447         }
5448
5449         if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5450                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5451                                 "because of the same BSSID match: " MAC_FMT
5452                                 ".\n", escape_essid(network->ssid,
5453                                                     network->ssid_len),
5454                                 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5455                 return 0;
5456         }
5457
5458         /* Filter out any incompatible freq / mode combinations */
5459         if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5460                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5461                                 "because of invalid frequency/mode "
5462                                 "combination.\n",
5463                                 escape_essid(network->ssid, network->ssid_len),
5464                                 MAC_ARG(network->bssid));
5465                 return 0;
5466         }
5467
5468         /* Ensure that the rates supported by the driver are compatible with
5469          * this AP, including verification of basic rates (mandatory) */
5470         if (!ipw_compatible_rates(priv, network, &rates)) {
5471                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5472                                 "because configured rate mask excludes "
5473                                 "AP mandatory rate.\n",
5474                                 escape_essid(network->ssid, network->ssid_len),
5475                                 MAC_ARG(network->bssid));
5476                 return 0;
5477         }
5478
5479         if (rates.num_rates == 0) {
5480                 IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' excluded "
5481                                 "because of no compatible rates.\n",
5482                                 escape_essid(network->ssid, network->ssid_len),
5483                                 MAC_ARG(network->bssid));
5484                 return 0;
5485         }
5486
5487         /* TODO: Perform any further minimal comparititive tests.  We do not
5488          * want to put too much policy logic here; intelligent scan selection
5489          * should occur within a generic IEEE 802.11 user space tool.  */
5490
5491         /* Set up 'new' AP to this network */
5492         ipw_copy_rates(&match->rates, &rates);
5493         match->network = network;
5494         IPW_DEBUG_MERGE("Network '%s (" MAC_FMT ")' is a viable match.\n",
5495                         escape_essid(network->ssid, network->ssid_len),
5496                         MAC_ARG(network->bssid));
5497
5498         return 1;
5499 }
5500
5501 static void ipw_merge_adhoc_network(struct work_struct *work)
5502 {
5503         struct ipw_priv *priv =
5504                 container_of(work, struct ipw_priv, merge_networks);
5505         struct ieee80211_network *network = NULL;
5506         struct ipw_network_match match = {
5507                 .network = priv->assoc_network
5508         };
5509
5510         if ((priv->status & STATUS_ASSOCIATED) &&
5511             (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5512                 /* First pass through ROAM process -- look for a better
5513                  * network */
5514                 unsigned long flags;
5515
5516                 spin_lock_irqsave(&priv->ieee->lock, flags);
5517                 list_for_each_entry(network, &priv->ieee->network_list, list) {
5518                         if (network != priv->assoc_network)
5519                                 ipw_find_adhoc_network(priv, &match, network,
5520                                                        1);
5521                 }
5522                 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5523
5524                 if (match.network == priv->assoc_network) {
5525                         IPW_DEBUG_MERGE("No better ADHOC in this network to "
5526                                         "merge to.\n");
5527                         return;
5528                 }
5529
5530                 mutex_lock(&priv->mutex);
5531                 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5532                         IPW_DEBUG_MERGE("remove network %s\n",
5533                                         escape_essid(priv->essid,
5534                                                      priv->essid_len));
5535                         ipw_remove_current_network(priv);
5536                 }
5537
5538                 ipw_disassociate(priv);
5539                 priv->assoc_network = match.network;
5540                 mutex_unlock(&priv->mutex);
5541                 return;
5542         }
5543 }
5544
5545 static int ipw_best_network(struct ipw_priv *priv,
5546                             struct ipw_network_match *match,
5547                             struct ieee80211_network *network, int roaming)
5548 {
5549         struct ipw_supported_rates rates;
5550
5551         /* Verify that this network's capability is compatible with the
5552          * current mode (AdHoc or Infrastructure) */
5553         if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5554              !(network->capability & WLAN_CAPABILITY_ESS)) ||
5555             (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5556              !(network->capability & WLAN_CAPABILITY_IBSS))) {
5557                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
5558                                 "capability mismatch.\n",
5559                                 escape_essid(network->ssid, network->ssid_len),
5560                                 MAC_ARG(network->bssid));
5561                 return 0;
5562         }
5563
5564         /* If we do not have an ESSID for this AP, we can not associate with
5565          * it */
5566         if (network->flags & NETWORK_EMPTY_ESSID) {
5567                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5568                                 "because of hidden ESSID.\n",
5569                                 escape_essid(network->ssid, network->ssid_len),
5570                                 MAC_ARG(network->bssid));
5571                 return 0;
5572         }
5573
5574         if (unlikely(roaming)) {
5575                 /* If we are roaming, then ensure check if this is a valid
5576                  * network to try and roam to */
5577                 if ((network->ssid_len != match->network->ssid_len) ||
5578                     memcmp(network->ssid, match->network->ssid,
5579                            network->ssid_len)) {
5580                         IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
5581                                         "because of non-network ESSID.\n",
5582                                         escape_essid(network->ssid,
5583                                                      network->ssid_len),
5584                                         MAC_ARG(network->bssid));
5585                         return 0;
5586                 }
5587         } else {
5588                 /* If an ESSID has been configured then compare the broadcast
5589                  * ESSID to ours */
5590                 if ((priv->config & CFG_STATIC_ESSID) &&
5591                     ((network->ssid_len != priv->essid_len) ||
5592                      memcmp(network->ssid, priv->essid,
5593                             min(network->ssid_len, priv->essid_len)))) {
5594                         char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5595                         strncpy(escaped,
5596                                 escape_essid(network->ssid, network->ssid_len),
5597                                 sizeof(escaped));
5598                         IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5599                                         "because of ESSID mismatch: '%s'.\n",
5600                                         escaped, MAC_ARG(network->bssid),
5601                                         escape_essid(priv->essid,
5602                                                      priv->essid_len));
5603                         return 0;
5604                 }
5605         }
5606
5607         /* If the old network rate is better than this one, don't bother
5608          * testing everything else. */
5609         if (match->network && match->network->stats.rssi > network->stats.rssi) {
5610                 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5611                 strncpy(escaped,
5612                         escape_essid(network->ssid, network->ssid_len),
5613                         sizeof(escaped));
5614                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
5615                                 "'%s (" MAC_FMT ")' has a stronger signal.\n",
5616                                 escaped, MAC_ARG(network->bssid),
5617                                 escape_essid(match->network->ssid,
5618                                              match->network->ssid_len),
5619                                 MAC_ARG(match->network->bssid));
5620                 return 0;
5621         }
5622
5623         /* If this network has already had an association attempt within the
5624          * last 3 seconds, do not try and associate again... */
5625         if (network->last_associate &&
5626             time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5627                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5628                                 "because of storming (%ums since last "
5629                                 "assoc attempt).\n",
5630                                 escape_essid(network->ssid, network->ssid_len),
5631                                 MAC_ARG(network->bssid),
5632                                 jiffies_to_msecs(jiffies -
5633                                                  network->last_associate));
5634                 return 0;
5635         }
5636
5637         /* Now go through and see if the requested network is valid... */
5638         if (priv->ieee->scan_age != 0 &&
5639             time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5640                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5641                                 "because of age: %ums.\n",
5642                                 escape_essid(network->ssid, network->ssid_len),
5643                                 MAC_ARG(network->bssid),
5644                                 jiffies_to_msecs(jiffies -
5645                                                  network->last_scanned));
5646                 return 0;
5647         }
5648
5649         if ((priv->config & CFG_STATIC_CHANNEL) &&
5650             (network->channel != priv->channel)) {
5651                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5652                                 "because of channel mismatch: %d != %d.\n",
5653                                 escape_essid(network->ssid, network->ssid_len),
5654                                 MAC_ARG(network->bssid),
5655                                 network->channel, priv->channel);
5656                 return 0;
5657         }
5658
5659         /* Verify privacy compatability */
5660         if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5661             ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5662                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5663                                 "because of privacy mismatch: %s != %s.\n",
5664                                 escape_essid(network->ssid, network->ssid_len),
5665                                 MAC_ARG(network->bssid),
5666                                 priv->capability & CAP_PRIVACY_ON ? "on" :
5667                                 "off",
5668                                 network->capability &
5669                                 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5670                 return 0;
5671         }
5672
5673         if ((priv->config & CFG_STATIC_BSSID) &&
5674             memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5675                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5676                                 "because of BSSID mismatch: " MAC_FMT ".\n",
5677                                 escape_essid(network->ssid, network->ssid_len),
5678                                 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
5679                 return 0;
5680         }
5681
5682         /* Filter out any incompatible freq / mode combinations */
5683         if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5684                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5685                                 "because of invalid frequency/mode "
5686                                 "combination.\n",
5687                                 escape_essid(network->ssid, network->ssid_len),
5688                                 MAC_ARG(network->bssid));
5689                 return 0;
5690         }
5691
5692         /* Filter out invalid channel in current GEO */
5693         if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5694                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5695                                 "because of invalid channel in current GEO\n",
5696                                 escape_essid(network->ssid, network->ssid_len),
5697                                 MAC_ARG(network->bssid));
5698                 return 0;
5699         }
5700
5701         /* Ensure that the rates supported by the driver are compatible with
5702          * this AP, including verification of basic rates (mandatory) */
5703         if (!ipw_compatible_rates(priv, network, &rates)) {
5704                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5705                                 "because configured rate mask excludes "
5706                                 "AP mandatory rate.\n",
5707                                 escape_essid(network->ssid, network->ssid_len),
5708                                 MAC_ARG(network->bssid));
5709                 return 0;
5710         }
5711
5712         if (rates.num_rates == 0) {
5713                 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
5714                                 "because of no compatible rates.\n",
5715                                 escape_essid(network->ssid, network->ssid_len),
5716                                 MAC_ARG(network->bssid));
5717                 return 0;
5718         }
5719
5720         /* TODO: Perform any further minimal comparititive tests.  We do not
5721          * want to put too much policy logic here; intelligent scan selection
5722          * should occur within a generic IEEE 802.11 user space tool.  */
5723
5724         /* Set up 'new' AP to this network */
5725         ipw_copy_rates(&match->rates, &rates);
5726         match->network = network;
5727
5728         IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
5729                         escape_essid(network->ssid, network->ssid_len),
5730                         MAC_ARG(network->bssid));
5731
5732         return 1;
5733 }
5734
5735 static void ipw_adhoc_create(struct ipw_priv *priv,
5736                              struct ieee80211_network *network)
5737 {
5738         const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5739         int i;
5740
5741         /*
5742          * For the purposes of scanning, we can set our wireless mode
5743          * to trigger scans across combinations of bands, but when it
5744          * comes to creating a new ad-hoc network, we have tell the FW
5745          * exactly which band to use.
5746          *
5747          * We also have the possibility of an invalid channel for the
5748          * chossen band.  Attempting to create a new ad-hoc network
5749          * with an invalid channel for wireless mode will trigger a
5750          * FW fatal error.
5751          *
5752          */
5753         switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5754         case IEEE80211_52GHZ_BAND:
5755                 network->mode = IEEE_A;
5756<