2cd7caa69e466043a46843fea4dc9d97f23fbb81
[linux-2.6.git] / drivers / net / wireless / iwlwifi / iwl3945-base.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29
30 /*
31  * NOTE:  This file (iwl-base.c) is used to build to multiple hardware targets
32  * by defining IWL to either 3945 or 4965.  The Makefile used when building
33  * the base targets will create base-3945.o and base-4965.o
34  *
35  * The eventual goal is to move as many of the #if IWL / #endif blocks out of
36  * this file and into the hardware specific implementation files (iwl-XXXX.c)
37  * and leave only the common (non #ifdef sprinkled) code in this file
38  */
39
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/version.h>
43 #include <linux/init.h>
44 #include <linux/pci.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/delay.h>
47 #include <linux/skbuff.h>
48 #include <linux/netdevice.h>
49 #include <linux/wireless.h>
50 #include <linux/firmware.h>
51 #include <linux/skbuff.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/if_arp.h>
55
56 #include <net/ieee80211_radiotap.h>
57 #include <net/mac80211.h>
58
59 #include <asm/div64.h>
60
61 #define IWL 3945
62
63 #include "iwlwifi.h"
64 #include "iwl-3945.h"
65 #include "iwl-helpers.h"
66
67 #ifdef CONFIG_IWLWIFI_DEBUG
68 u32 iwl_debug_level;
69 #endif
70
71 /******************************************************************************
72  *
73  * module boiler plate
74  *
75  ******************************************************************************/
76
77 /* module parameters */
78 int iwl_param_disable_hw_scan;
79 int iwl_param_debug;
80 int iwl_param_disable;      /* def: enable radio */
81 int iwl_param_antenna;      /* def: 0 = both antennas (use diversity) */
82 int iwl_param_hwcrypto;     /* def: using software encryption */
83 int iwl_param_qos_enable = 1;
84 int iwl_param_queues_num = IWL_MAX_NUM_QUEUES;
85
86 /*
87  * module name, copyright, version, etc.
88  * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
89  */
90
91 #define DRV_DESCRIPTION \
92 "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
93
94 #ifdef CONFIG_IWLWIFI_DEBUG
95 #define VD "d"
96 #else
97 #define VD
98 #endif
99
100 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
101 #define VS "s"
102 #else
103 #define VS
104 #endif
105
106 #define IWLWIFI_VERSION "1.1.17k" VD VS
107 #define DRV_COPYRIGHT   "Copyright(c) 2003-2007 Intel Corporation"
108 #define DRV_VERSION     IWLWIFI_VERSION
109
110 /* Change firmware file name, using "-" and incrementing number,
111  *   *only* when uCode interface or architecture changes so that it
112  *   is not compatible with earlier drivers.
113  * This number will also appear in << 8 position of 1st dword of uCode file */
114 #define IWL3945_UCODE_API "-1"
115
116 MODULE_DESCRIPTION(DRV_DESCRIPTION);
117 MODULE_VERSION(DRV_VERSION);
118 MODULE_AUTHOR(DRV_COPYRIGHT);
119 MODULE_LICENSE("GPL");
120
121 __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
122 {
123         u16 fc = le16_to_cpu(hdr->frame_control);
124         int hdr_len = ieee80211_get_hdrlen(fc);
125
126         if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
127                 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
128         return NULL;
129 }
130
131 static const struct ieee80211_hw_mode *iwl_get_hw_mode(
132                 struct iwl_priv *priv, int mode)
133 {
134         int i;
135
136         for (i = 0; i < 3; i++)
137                 if (priv->modes[i].mode == mode)
138                         return &priv->modes[i];
139
140         return NULL;
141 }
142
143 static int iwl_is_empty_essid(const char *essid, int essid_len)
144 {
145         /* Single white space is for Linksys APs */
146         if (essid_len == 1 && essid[0] == ' ')
147                 return 1;
148
149         /* Otherwise, if the entire essid is 0, we assume it is hidden */
150         while (essid_len) {
151                 essid_len--;
152                 if (essid[essid_len] != '\0')
153                         return 0;
154         }
155
156         return 1;
157 }
158
159 static const char *iwl_escape_essid(const char *essid, u8 essid_len)
160 {
161         static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
162         const char *s = essid;
163         char *d = escaped;
164
165         if (iwl_is_empty_essid(essid, essid_len)) {
166                 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
167                 return escaped;
168         }
169
170         essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
171         while (essid_len--) {
172                 if (*s == '\0') {
173                         *d++ = '\\';
174                         *d++ = '0';
175                         s++;
176                 } else
177                         *d++ = *s++;
178         }
179         *d = '\0';
180         return escaped;
181 }
182
183 static void iwl_print_hex_dump(int level, void *p, u32 len)
184 {
185 #ifdef CONFIG_IWLWIFI_DEBUG
186         if (!(iwl_debug_level & level))
187                 return;
188
189         print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
190                         p, len, 1);
191 #endif
192 }
193
194 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
195  * DMA services
196  *
197  * Theory of operation
198  *
199  * A queue is a circular buffers with 'Read' and 'Write' pointers.
200  * 2 empty entries always kept in the buffer to protect from overflow.
201  *
202  * For Tx queue, there are low mark and high mark limits. If, after queuing
203  * the packet for Tx, free space become < low mark, Tx queue stopped. When
204  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
205  * Tx queue resumed.
206  *
207  * The IWL operates with six queues, one receive queue in the device's
208  * sram, one transmit queue for sending commands to the device firmware,
209  * and four transmit queues for data.
210  ***************************************************/
211
212 static int iwl_queue_space(const struct iwl_queue *q)
213 {
214         int s = q->last_used - q->first_empty;
215
216         if (q->last_used > q->first_empty)
217                 s -= q->n_bd;
218
219         if (s <= 0)
220                 s += q->n_window;
221         /* keep some reserve to not confuse empty and full situations */
222         s -= 2;
223         if (s < 0)
224                 s = 0;
225         return s;
226 }
227
228 /* XXX: n_bd must be power-of-two size */
229 static inline int iwl_queue_inc_wrap(int index, int n_bd)
230 {
231         return ++index & (n_bd - 1);
232 }
233
234 /* XXX: n_bd must be power-of-two size */
235 static inline int iwl_queue_dec_wrap(int index, int n_bd)
236 {
237         return --index & (n_bd - 1);
238 }
239
240 static inline int x2_queue_used(const struct iwl_queue *q, int i)
241 {
242         return q->first_empty > q->last_used ?
243                 (i >= q->last_used && i < q->first_empty) :
244                 !(i < q->last_used && i >= q->first_empty);
245 }
246
247 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
248 {
249         if (is_huge)
250                 return q->n_window;
251
252         return index & (q->n_window - 1);
253 }
254
255 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
256                           int count, int slots_num, u32 id)
257 {
258         q->n_bd = count;
259         q->n_window = slots_num;
260         q->id = id;
261
262         /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
263          * and iwl_queue_dec_wrap are broken. */
264         BUG_ON(!is_power_of_2(count));
265
266         /* slots_num must be power-of-two size, otherwise
267          * get_cmd_index is broken. */
268         BUG_ON(!is_power_of_2(slots_num));
269
270         q->low_mark = q->n_window / 4;
271         if (q->low_mark < 4)
272                 q->low_mark = 4;
273
274         q->high_mark = q->n_window / 8;
275         if (q->high_mark < 2)
276                 q->high_mark = 2;
277
278         q->first_empty = q->last_used = 0;
279
280         return 0;
281 }
282
283 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
284                               struct iwl_tx_queue *txq, u32 id)
285 {
286         struct pci_dev *dev = priv->pci_dev;
287
288         if (id != IWL_CMD_QUEUE_NUM) {
289                 txq->txb = kmalloc(sizeof(txq->txb[0]) *
290                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
291                 if (!txq->txb) {
292                         IWL_ERROR("kmalloc for auxilary BD "
293                                   "structures failed\n");
294                         goto error;
295                 }
296         } else
297                 txq->txb = NULL;
298
299         txq->bd = pci_alloc_consistent(dev,
300                         sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
301                         &txq->q.dma_addr);
302
303         if (!txq->bd) {
304                 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
305                           sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
306                 goto error;
307         }
308         txq->q.id = id;
309
310         return 0;
311
312  error:
313         if (txq->txb) {
314                 kfree(txq->txb);
315                 txq->txb = NULL;
316         }
317
318         return -ENOMEM;
319 }
320
321 int iwl_tx_queue_init(struct iwl_priv *priv,
322                       struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
323 {
324         struct pci_dev *dev = priv->pci_dev;
325         int len;
326         int rc = 0;
327
328         /* alocate command space + one big command for scan since scan
329          * command is very huge the system will not have two scan at the
330          * same time */
331         len = sizeof(struct iwl_cmd) * slots_num;
332         if (txq_id == IWL_CMD_QUEUE_NUM)
333                 len +=  IWL_MAX_SCAN_SIZE;
334         txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
335         if (!txq->cmd)
336                 return -ENOMEM;
337
338         rc = iwl_tx_queue_alloc(priv, txq, txq_id);
339         if (rc) {
340                 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
341
342                 return -ENOMEM;
343         }
344         txq->need_update = 0;
345
346         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
347          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
348         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
349         iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
350
351         iwl_hw_tx_queue_init(priv, txq);
352
353         return 0;
354 }
355
356 /**
357  * iwl_tx_queue_free - Deallocate DMA queue.
358  * @txq: Transmit queue to deallocate.
359  *
360  * Empty queue by removing and destroying all BD's.
361  * Free all buffers.  txq itself is not freed.
362  *
363  */
364 void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
365 {
366         struct iwl_queue *q = &txq->q;
367         struct pci_dev *dev = priv->pci_dev;
368         int len;
369
370         if (q->n_bd == 0)
371                 return;
372
373         /* first, empty all BD's */
374         for (; q->first_empty != q->last_used;
375              q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd))
376                 iwl_hw_txq_free_tfd(priv, txq);
377
378         len = sizeof(struct iwl_cmd) * q->n_window;
379         if (q->id == IWL_CMD_QUEUE_NUM)
380                 len += IWL_MAX_SCAN_SIZE;
381
382         pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
383
384         /* free buffers belonging to queue itself */
385         if (txq->q.n_bd)
386                 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
387                                     txq->q.n_bd, txq->bd, txq->q.dma_addr);
388
389         if (txq->txb) {
390                 kfree(txq->txb);
391                 txq->txb = NULL;
392         }
393
394         /* 0 fill whole structure */
395         memset(txq, 0, sizeof(*txq));
396 }
397
398 const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
399
400 /*************** STATION TABLE MANAGEMENT ****
401  *
402  * NOTE:  This needs to be overhauled to better synchronize between
403  * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c
404  *
405  * mac80211 should also be examined to determine if sta_info is duplicating
406  * the functionality provided here
407  */
408
409 /**************************************************************/
410 #if 0 /* temparary disable till we add real remove station */
411 static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
412 {
413         int index = IWL_INVALID_STATION;
414         int i;
415         unsigned long flags;
416
417         spin_lock_irqsave(&priv->sta_lock, flags);
418
419         if (is_ap)
420                 index = IWL_AP_ID;
421         else if (is_broadcast_ether_addr(addr))
422                 index = priv->hw_setting.bcast_sta_id;
423         else
424                 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
425                         if (priv->stations[i].used &&
426                             !compare_ether_addr(priv->stations[i].sta.sta.addr,
427                                                 addr)) {
428                                 index = i;
429                                 break;
430                         }
431
432         if (unlikely(index == IWL_INVALID_STATION))
433                 goto out;
434
435         if (priv->stations[index].used) {
436                 priv->stations[index].used = 0;
437                 priv->num_stations--;
438         }
439
440         BUG_ON(priv->num_stations < 0);
441
442 out:
443         spin_unlock_irqrestore(&priv->sta_lock, flags);
444         return 0;
445 }
446 #endif
447 static void iwl_clear_stations_table(struct iwl_priv *priv)
448 {
449         unsigned long flags;
450
451         spin_lock_irqsave(&priv->sta_lock, flags);
452
453         priv->num_stations = 0;
454         memset(priv->stations, 0, sizeof(priv->stations));
455
456         spin_unlock_irqrestore(&priv->sta_lock, flags);
457 }
458
459
460 u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
461 {
462         int i;
463         int index = IWL_INVALID_STATION;
464         struct iwl_station_entry *station;
465         unsigned long flags_spin;
466         DECLARE_MAC_BUF(mac);
467         u8 rate;
468
469         spin_lock_irqsave(&priv->sta_lock, flags_spin);
470         if (is_ap)
471                 index = IWL_AP_ID;
472         else if (is_broadcast_ether_addr(addr))
473                 index = priv->hw_setting.bcast_sta_id;
474         else
475                 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
476                         if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
477                                                 addr)) {
478                                 index = i;
479                                 break;
480                         }
481
482                         if (!priv->stations[i].used &&
483                             index == IWL_INVALID_STATION)
484                                 index = i;
485                 }
486
487         /* These twh conditions has the same outcome but keep them separate
488           since they have different meaning */
489         if (unlikely(index == IWL_INVALID_STATION)) {
490                 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
491                 return index;
492         }
493
494         if (priv->stations[index].used &&
495            !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
496                 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
497                 return index;
498         }
499
500         IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
501         station = &priv->stations[index];
502         station->used = 1;
503         priv->num_stations++;
504
505         memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
506         memcpy(station->sta.sta.addr, addr, ETH_ALEN);
507         station->sta.mode = 0;
508         station->sta.sta.sta_id = index;
509         station->sta.station_flags = 0;
510
511         rate = (priv->phymode == MODE_IEEE80211A) ? IWL_RATE_6M_PLCP :
512                                 IWL_RATE_1M_PLCP | priv->hw_setting.cck_flag;
513
514         /* Turn on both antennas for the station... */
515         station->sta.rate_n_flags =
516                         iwl_hw_set_rate_n_flags(rate, RATE_MCS_ANT_AB_MSK);
517         station->current_rate.rate_n_flags =
518                         le16_to_cpu(station->sta.rate_n_flags);
519
520         spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
521         iwl_send_add_station(priv, &station->sta, flags);
522         return index;
523
524 }
525
526 /*************** DRIVER STATUS FUNCTIONS   *****/
527
528 static inline int iwl_is_ready(struct iwl_priv *priv)
529 {
530         /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
531          * set but EXIT_PENDING is not */
532         return test_bit(STATUS_READY, &priv->status) &&
533                test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
534                !test_bit(STATUS_EXIT_PENDING, &priv->status);
535 }
536
537 static inline int iwl_is_alive(struct iwl_priv *priv)
538 {
539         return test_bit(STATUS_ALIVE, &priv->status);
540 }
541
542 static inline int iwl_is_init(struct iwl_priv *priv)
543 {
544         return test_bit(STATUS_INIT, &priv->status);
545 }
546
547 static inline int iwl_is_rfkill(struct iwl_priv *priv)
548 {
549         return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
550                test_bit(STATUS_RF_KILL_SW, &priv->status);
551 }
552
553 static inline int iwl_is_ready_rf(struct iwl_priv *priv)
554 {
555
556         if (iwl_is_rfkill(priv))
557                 return 0;
558
559         return iwl_is_ready(priv);
560 }
561
562 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
563
564 #define IWL_CMD(x) case x : return #x
565
566 static const char *get_cmd_string(u8 cmd)
567 {
568         switch (cmd) {
569                 IWL_CMD(REPLY_ALIVE);
570                 IWL_CMD(REPLY_ERROR);
571                 IWL_CMD(REPLY_RXON);
572                 IWL_CMD(REPLY_RXON_ASSOC);
573                 IWL_CMD(REPLY_QOS_PARAM);
574                 IWL_CMD(REPLY_RXON_TIMING);
575                 IWL_CMD(REPLY_ADD_STA);
576                 IWL_CMD(REPLY_REMOVE_STA);
577                 IWL_CMD(REPLY_REMOVE_ALL_STA);
578                 IWL_CMD(REPLY_3945_RX);
579                 IWL_CMD(REPLY_TX);
580                 IWL_CMD(REPLY_RATE_SCALE);
581                 IWL_CMD(REPLY_LEDS_CMD);
582                 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
583                 IWL_CMD(RADAR_NOTIFICATION);
584                 IWL_CMD(REPLY_QUIET_CMD);
585                 IWL_CMD(REPLY_CHANNEL_SWITCH);
586                 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
587                 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
588                 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
589                 IWL_CMD(POWER_TABLE_CMD);
590                 IWL_CMD(PM_SLEEP_NOTIFICATION);
591                 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
592                 IWL_CMD(REPLY_SCAN_CMD);
593                 IWL_CMD(REPLY_SCAN_ABORT_CMD);
594                 IWL_CMD(SCAN_START_NOTIFICATION);
595                 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
596                 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
597                 IWL_CMD(BEACON_NOTIFICATION);
598                 IWL_CMD(REPLY_TX_BEACON);
599                 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
600                 IWL_CMD(QUIET_NOTIFICATION);
601                 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
602                 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
603                 IWL_CMD(REPLY_BT_CONFIG);
604                 IWL_CMD(REPLY_STATISTICS_CMD);
605                 IWL_CMD(STATISTICS_NOTIFICATION);
606                 IWL_CMD(REPLY_CARD_STATE_CMD);
607                 IWL_CMD(CARD_STATE_NOTIFICATION);
608                 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
609         default:
610                 return "UNKNOWN";
611
612         }
613 }
614
615 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
616
617 /**
618  * iwl_enqueue_hcmd - enqueue a uCode command
619  * @priv: device private data point
620  * @cmd: a point to the ucode command structure
621  *
622  * The function returns < 0 values to indicate the operation is
623  * failed. On success, it turns the index (> 0) of command in the
624  * command queue.
625  */
626 static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
627 {
628         struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
629         struct iwl_queue *q = &txq->q;
630         struct iwl_tfd_frame *tfd;
631         u32 *control_flags;
632         struct iwl_cmd *out_cmd;
633         u32 idx;
634         u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
635         dma_addr_t phys_addr;
636         int pad;
637         u16 count;
638         int ret;
639         unsigned long flags;
640
641         /* If any of the command structures end up being larger than
642          * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
643          * we will need to increase the size of the TFD entries */
644         BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
645                !(cmd->meta.flags & CMD_SIZE_HUGE));
646
647         if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
648                 IWL_ERROR("No space for Tx\n");
649                 return -ENOSPC;
650         }
651
652         spin_lock_irqsave(&priv->hcmd_lock, flags);
653
654         tfd = &txq->bd[q->first_empty];
655         memset(tfd, 0, sizeof(*tfd));
656
657         control_flags = (u32 *) tfd;
658
659         idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE);
660         out_cmd = &txq->cmd[idx];
661
662         out_cmd->hdr.cmd = cmd->id;
663         memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
664         memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
665
666         /* At this point, the out_cmd now has all of the incoming cmd
667          * information */
668
669         out_cmd->hdr.flags = 0;
670         out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
671                         INDEX_TO_SEQ(q->first_empty));
672         if (out_cmd->meta.flags & CMD_SIZE_HUGE)
673                 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
674
675         phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
676                         offsetof(struct iwl_cmd, hdr);
677         iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
678
679         pad = U32_PAD(cmd->len);
680         count = TFD_CTL_COUNT_GET(*control_flags);
681         *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad);
682
683         IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
684                      "%d bytes at %d[%d]:%d\n",
685                      get_cmd_string(out_cmd->hdr.cmd),
686                      out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
687                      fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM);
688
689         txq->need_update = 1;
690         q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
691         ret = iwl_tx_queue_update_write_ptr(priv, txq);
692
693         spin_unlock_irqrestore(&priv->hcmd_lock, flags);
694         return ret ? ret : idx;
695 }
696
697 int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
698 {
699         int ret;
700
701         BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
702
703         /* An asynchronous command can not expect an SKB to be set. */
704         BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
705
706         /* An asynchronous command MUST have a callback. */
707         BUG_ON(!cmd->meta.u.callback);
708
709         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
710                 return -EBUSY;
711
712         ret = iwl_enqueue_hcmd(priv, cmd);
713         if (ret < 0) {
714                 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
715                           get_cmd_string(cmd->id), ret);
716                 return ret;
717         }
718         return 0;
719 }
720
721 int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
722 {
723         int cmd_idx;
724         int ret;
725         static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
726
727         BUG_ON(cmd->meta.flags & CMD_ASYNC);
728
729          /* A synchronous command can not have a callback set. */
730         BUG_ON(cmd->meta.u.callback != NULL);
731
732         if (atomic_xchg(&entry, 1)) {
733                 IWL_ERROR("Error sending %s: Already sending a host command\n",
734                           get_cmd_string(cmd->id));
735                 return -EBUSY;
736         }
737
738         set_bit(STATUS_HCMD_ACTIVE, &priv->status);
739
740         if (cmd->meta.flags & CMD_WANT_SKB)
741                 cmd->meta.source = &cmd->meta;
742
743         cmd_idx = iwl_enqueue_hcmd(priv, cmd);
744         if (cmd_idx < 0) {
745                 ret = cmd_idx;
746                 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
747                           get_cmd_string(cmd->id), ret);
748                 goto out;
749         }
750
751         ret = wait_event_interruptible_timeout(priv->wait_command_queue,
752                         !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
753                         HOST_COMPLETE_TIMEOUT);
754         if (!ret) {
755                 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
756                         IWL_ERROR("Error sending %s: time out after %dms.\n",
757                                   get_cmd_string(cmd->id),
758                                   jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
759
760                         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
761                         ret = -ETIMEDOUT;
762                         goto cancel;
763                 }
764         }
765
766         if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
767                 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
768                                get_cmd_string(cmd->id));
769                 ret = -ECANCELED;
770                 goto fail;
771         }
772         if (test_bit(STATUS_FW_ERROR, &priv->status)) {
773                 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
774                                get_cmd_string(cmd->id));
775                 ret = -EIO;
776                 goto fail;
777         }
778         if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
779                 IWL_ERROR("Error: Response NULL in '%s'\n",
780                           get_cmd_string(cmd->id));
781                 ret = -EIO;
782                 goto out;
783         }
784
785         ret = 0;
786         goto out;
787
788 cancel:
789         if (cmd->meta.flags & CMD_WANT_SKB) {
790                 struct iwl_cmd *qcmd;
791
792                 /* Cancel the CMD_WANT_SKB flag for the cmd in the
793                  * TX cmd queue. Otherwise in case the cmd comes
794                  * in later, it will possibly set an invalid
795                  * address (cmd->meta.source). */
796                 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
797                 qcmd->meta.flags &= ~CMD_WANT_SKB;
798         }
799 fail:
800         if (cmd->meta.u.skb) {
801                 dev_kfree_skb_any(cmd->meta.u.skb);
802                 cmd->meta.u.skb = NULL;
803         }
804 out:
805         atomic_set(&entry, 0);
806         return ret;
807 }
808
809 int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
810 {
811         /* A command can not be asynchronous AND expect an SKB to be set. */
812         BUG_ON((cmd->meta.flags & CMD_ASYNC) &&
813                (cmd->meta.flags & CMD_WANT_SKB));
814
815         if (cmd->meta.flags & CMD_ASYNC)
816                 return iwl_send_cmd_async(priv, cmd);
817
818         return iwl_send_cmd_sync(priv, cmd);
819 }
820
821 int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
822 {
823         struct iwl_host_cmd cmd = {
824                 .id = id,
825                 .len = len,
826                 .data = data,
827         };
828
829         return iwl_send_cmd_sync(priv, &cmd);
830 }
831
832 static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
833 {
834         struct iwl_host_cmd cmd = {
835                 .id = id,
836                 .len = sizeof(val),
837                 .data = &val,
838         };
839
840         return iwl_send_cmd_sync(priv, &cmd);
841 }
842
843 int iwl_send_statistics_request(struct iwl_priv *priv)
844 {
845         return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
846 }
847
848 /**
849  * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
850  * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
851  * @channel: Any channel valid for the requested phymode
852
853  * In addition to setting the staging RXON, priv->phymode is also set.
854  *
855  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
856  * in the staging RXON flag structure based on the phymode
857  */
858 static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel)
859 {
860         if (!iwl_get_channel_info(priv, phymode, channel)) {
861                 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
862                                channel, phymode);
863                 return -EINVAL;
864         }
865
866         if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
867             (priv->phymode == phymode))
868                 return 0;
869
870         priv->staging_rxon.channel = cpu_to_le16(channel);
871         if (phymode == MODE_IEEE80211A)
872                 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
873         else
874                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
875
876         priv->phymode = phymode;
877
878         IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
879
880         return 0;
881 }
882
883 /**
884  * iwl_check_rxon_cmd - validate RXON structure is valid
885  *
886  * NOTE:  This is really only useful during development and can eventually
887  * be #ifdef'd out once the driver is stable and folks aren't actively
888  * making changes
889  */
890 static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
891 {
892         int error = 0;
893         int counter = 1;
894
895         if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
896                 error |= le32_to_cpu(rxon->flags &
897                                 (RXON_FLG_TGJ_NARROW_BAND_MSK |
898                                  RXON_FLG_RADAR_DETECT_MSK));
899                 if (error)
900                         IWL_WARNING("check 24G fields %d | %d\n",
901                                     counter++, error);
902         } else {
903                 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
904                                 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
905                 if (error)
906                         IWL_WARNING("check 52 fields %d | %d\n",
907                                     counter++, error);
908                 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
909                 if (error)
910                         IWL_WARNING("check 52 CCK %d | %d\n",
911                                     counter++, error);
912         }
913         error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
914         if (error)
915                 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
916
917         /* make sure basic rates 6Mbps and 1Mbps are supported */
918         error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
919                   ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
920         if (error)
921                 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
922
923         error |= (le16_to_cpu(rxon->assoc_id) > 2007);
924         if (error)
925                 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
926
927         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
928                         == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
929         if (error)
930                 IWL_WARNING("check CCK and short slot %d | %d\n",
931                             counter++, error);
932
933         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
934                         == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
935         if (error)
936                 IWL_WARNING("check CCK & auto detect %d | %d\n",
937                             counter++, error);
938
939         error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
940                         RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
941         if (error)
942                 IWL_WARNING("check TGG and auto detect %d | %d\n",
943                             counter++, error);
944
945         if ((rxon->flags & RXON_FLG_DIS_DIV_MSK))
946                 error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK |
947                                 RXON_FLG_ANT_A_MSK)) == 0);
948         if (error)
949                 IWL_WARNING("check antenna %d %d\n", counter++, error);
950
951         if (error)
952                 IWL_WARNING("Tuning to channel %d\n",
953                             le16_to_cpu(rxon->channel));
954
955         if (error) {
956                 IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n");
957                 return -1;
958         }
959         return 0;
960 }
961
962 /**
963  * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit
964  * @priv: staging_rxon is comapred to active_rxon
965  *
966  * If the RXON structure is changing sufficient to require a new
967  * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1
968  * to indicate a new tune is required.
969  */
970 static int iwl_full_rxon_required(struct iwl_priv *priv)
971 {
972
973         /* These items are only settable from the full RXON command */
974         if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
975             compare_ether_addr(priv->staging_rxon.bssid_addr,
976                                priv->active_rxon.bssid_addr) ||
977             compare_ether_addr(priv->staging_rxon.node_addr,
978                                priv->active_rxon.node_addr) ||
979             compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
980                                priv->active_rxon.wlap_bssid_addr) ||
981             (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
982             (priv->staging_rxon.channel != priv->active_rxon.channel) ||
983             (priv->staging_rxon.air_propagation !=
984              priv->active_rxon.air_propagation) ||
985             (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
986                 return 1;
987
988         /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
989          * be updated with the RXON_ASSOC command -- however only some
990          * flag transitions are allowed using RXON_ASSOC */
991
992         /* Check if we are not switching bands */
993         if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
994             (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
995                 return 1;
996
997         /* Check if we are switching association toggle */
998         if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
999                 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1000                 return 1;
1001
1002         return 0;
1003 }
1004
1005 static int iwl_send_rxon_assoc(struct iwl_priv *priv)
1006 {
1007         int rc = 0;
1008         struct iwl_rx_packet *res = NULL;
1009         struct iwl_rxon_assoc_cmd rxon_assoc;
1010         struct iwl_host_cmd cmd = {
1011                 .id = REPLY_RXON_ASSOC,
1012                 .len = sizeof(rxon_assoc),
1013                 .meta.flags = CMD_WANT_SKB,
1014                 .data = &rxon_assoc,
1015         };
1016         const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1017         const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1018
1019         if ((rxon1->flags == rxon2->flags) &&
1020             (rxon1->filter_flags == rxon2->filter_flags) &&
1021             (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1022             (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1023                 IWL_DEBUG_INFO("Using current RXON_ASSOC.  Not resending.\n");
1024                 return 0;
1025         }
1026
1027         rxon_assoc.flags = priv->staging_rxon.flags;
1028         rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1029         rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1030         rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1031         rxon_assoc.reserved = 0;
1032
1033         rc = iwl_send_cmd_sync(priv, &cmd);
1034         if (rc)
1035                 return rc;
1036
1037         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1038         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1039                 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1040                 rc = -EIO;
1041         }
1042
1043         priv->alloc_rxb_skb--;
1044         dev_kfree_skb_any(cmd.meta.u.skb);
1045
1046         return rc;
1047 }
1048
1049 /**
1050  * iwl_commit_rxon - commit staging_rxon to hardware
1051  *
1052  * The RXON command in staging_rxon is commited to the hardware and
1053  * the active_rxon structure is updated with the new data.  This
1054  * function correctly transitions out of the RXON_ASSOC_MSK state if
1055  * a HW tune is required based on the RXON structure changes.
1056  */
1057 static int iwl_commit_rxon(struct iwl_priv *priv)
1058 {
1059         /* cast away the const for active_rxon in this function */
1060         struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1061         int rc = 0;
1062         DECLARE_MAC_BUF(mac);
1063
1064         if (!iwl_is_alive(priv))
1065                 return -1;
1066
1067         /* always get timestamp with Rx frame */
1068         priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1069
1070         /* select antenna */
1071         priv->staging_rxon.flags &=
1072             ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
1073         priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv);
1074
1075         rc = iwl_check_rxon_cmd(&priv->staging_rxon);
1076         if (rc) {
1077                 IWL_ERROR("Invalid RXON configuration.  Not committing.\n");
1078                 return -EINVAL;
1079         }
1080
1081         /* If we don't need to send a full RXON, we can use
1082          * iwl_rxon_assoc_cmd which is used to reconfigure filter
1083          * and other flags for the current radio configuration. */
1084         if (!iwl_full_rxon_required(priv)) {
1085                 rc = iwl_send_rxon_assoc(priv);
1086                 if (rc) {
1087                         IWL_ERROR("Error setting RXON_ASSOC "
1088                                   "configuration (%d).\n", rc);
1089                         return rc;
1090                 }
1091
1092                 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1093
1094                 return 0;
1095         }
1096
1097         /* If we are currently associated and the new config requires
1098          * an RXON_ASSOC and the new config wants the associated mask enabled,
1099          * we must clear the associated from the active configuration
1100          * before we apply the new config */
1101         if (iwl_is_associated(priv) &&
1102             (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1103                 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1104                 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1105
1106                 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1107                                       sizeof(struct iwl_rxon_cmd),
1108                                       &priv->active_rxon);
1109
1110                 /* If the mask clearing failed then we set
1111                  * active_rxon back to what it was previously */
1112                 if (rc) {
1113                         active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1114                         IWL_ERROR("Error clearing ASSOC_MSK on current "
1115                                   "configuration (%d).\n", rc);
1116                         return rc;
1117                 }
1118         }
1119
1120         IWL_DEBUG_INFO("Sending RXON\n"
1121                        "* with%s RXON_FILTER_ASSOC_MSK\n"
1122                        "* channel = %d\n"
1123                        "* bssid = %s\n",
1124                        ((priv->staging_rxon.filter_flags &
1125                          RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1126                        le16_to_cpu(priv->staging_rxon.channel),
1127                        print_mac(mac, priv->staging_rxon.bssid_addr));
1128
1129         /* Apply the new configuration */
1130         rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1131                               sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
1132         if (rc) {
1133                 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1134                 return rc;
1135         }
1136
1137         memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1138
1139         iwl_clear_stations_table(priv);
1140
1141         /* If we issue a new RXON command which required a tune then we must
1142          * send a new TXPOWER command or we won't be able to Tx any frames */
1143         rc = iwl_hw_reg_send_txpower(priv);
1144         if (rc) {
1145                 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1146                 return rc;
1147         }
1148
1149         /* Add the broadcast address so we can send broadcast frames */
1150         if (iwl_add_station(priv, BROADCAST_ADDR, 0, 0) ==
1151             IWL_INVALID_STATION) {
1152                 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1153                 return -EIO;
1154         }
1155
1156         /* If we have set the ASSOC_MSK and we are in BSS mode then
1157          * add the IWL_AP_ID to the station rate table */
1158         if (iwl_is_associated(priv) &&
1159             (priv->iw_mode == IEEE80211_IF_TYPE_STA))
1160                 if (iwl_add_station(priv, priv->active_rxon.bssid_addr, 1, 0)
1161                     == IWL_INVALID_STATION) {
1162                         IWL_ERROR("Error adding AP address for transmit.\n");
1163                         return -EIO;
1164                 }
1165
1166         /* Init the hardware's rate fallback order based on the
1167          * phymode */
1168         rc = iwl3945_init_hw_rate_table(priv);
1169         if (rc) {
1170                 IWL_ERROR("Error setting HW rate table: %02X\n", rc);
1171                 return -EIO;
1172         }
1173
1174         return 0;
1175 }
1176
1177 static int iwl_send_bt_config(struct iwl_priv *priv)
1178 {
1179         struct iwl_bt_cmd bt_cmd = {
1180                 .flags = 3,
1181                 .lead_time = 0xAA,
1182                 .max_kill = 1,
1183                 .kill_ack_mask = 0,
1184                 .kill_cts_mask = 0,
1185         };
1186
1187         return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1188                                 sizeof(struct iwl_bt_cmd), &bt_cmd);
1189 }
1190
1191 static int iwl_send_scan_abort(struct iwl_priv *priv)
1192 {
1193         int rc = 0;
1194         struct iwl_rx_packet *res;
1195         struct iwl_host_cmd cmd = {
1196                 .id = REPLY_SCAN_ABORT_CMD,
1197                 .meta.flags = CMD_WANT_SKB,
1198         };
1199
1200         /* If there isn't a scan actively going on in the hardware
1201          * then we are in between scan bands and not actually
1202          * actively scanning, so don't send the abort command */
1203         if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1204                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1205                 return 0;
1206         }
1207
1208         rc = iwl_send_cmd_sync(priv, &cmd);
1209         if (rc) {
1210                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1211                 return rc;
1212         }
1213
1214         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1215         if (res->u.status != CAN_ABORT_STATUS) {
1216                 /* The scan abort will return 1 for success or
1217                  * 2 for "failure".  A failure condition can be
1218                  * due to simply not being in an active scan which
1219                  * can occur if we send the scan abort before we
1220                  * the microcode has notified us that a scan is
1221                  * completed. */
1222                 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1223                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1224                 clear_bit(STATUS_SCAN_HW, &priv->status);
1225         }
1226
1227         dev_kfree_skb_any(cmd.meta.u.skb);
1228
1229         return rc;
1230 }
1231
1232 static int iwl_card_state_sync_callback(struct iwl_priv *priv,
1233                                         struct iwl_cmd *cmd,
1234                                         struct sk_buff *skb)
1235 {
1236         return 1;
1237 }
1238
1239 /*
1240  * CARD_STATE_CMD
1241  *
1242  * Use: Sets the internal card state to enable, disable, or halt
1243  *
1244  * When in the 'enable' state the card operates as normal.
1245  * When in the 'disable' state, the card enters into a low power mode.
1246  * When in the 'halt' state, the card is shut down and must be fully
1247  * restarted to come back on.
1248  */
1249 static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1250 {
1251         struct iwl_host_cmd cmd = {
1252                 .id = REPLY_CARD_STATE_CMD,
1253                 .len = sizeof(u32),
1254                 .data = &flags,
1255                 .meta.flags = meta_flag,
1256         };
1257
1258         if (meta_flag & CMD_ASYNC)
1259                 cmd.meta.u.callback = iwl_card_state_sync_callback;
1260
1261         return iwl_send_cmd(priv, &cmd);
1262 }
1263
1264 static int iwl_add_sta_sync_callback(struct iwl_priv *priv,
1265                                      struct iwl_cmd *cmd, struct sk_buff *skb)
1266 {
1267         struct iwl_rx_packet *res = NULL;
1268
1269         if (!skb) {
1270                 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1271                 return 1;
1272         }
1273
1274         res = (struct iwl_rx_packet *)skb->data;
1275         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1276                 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1277                           res->hdr.flags);
1278                 return 1;
1279         }
1280
1281         switch (res->u.add_sta.status) {
1282         case ADD_STA_SUCCESS_MSK:
1283                 break;
1284         default:
1285                 break;
1286         }
1287
1288         /* We didn't cache the SKB; let the caller free it */
1289         return 1;
1290 }
1291
1292 int iwl_send_add_station(struct iwl_priv *priv,
1293                          struct iwl_addsta_cmd *sta, u8 flags)
1294 {
1295         struct iwl_rx_packet *res = NULL;
1296         int rc = 0;
1297         struct iwl_host_cmd cmd = {
1298                 .id = REPLY_ADD_STA,
1299                 .len = sizeof(struct iwl_addsta_cmd),
1300                 .meta.flags = flags,
1301                 .data = sta,
1302         };
1303
1304         if (flags & CMD_ASYNC)
1305                 cmd.meta.u.callback = iwl_add_sta_sync_callback;
1306         else
1307                 cmd.meta.flags |= CMD_WANT_SKB;
1308
1309         rc = iwl_send_cmd(priv, &cmd);
1310
1311         if (rc || (flags & CMD_ASYNC))
1312                 return rc;
1313
1314         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1315         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1316                 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1317                           res->hdr.flags);
1318                 rc = -EIO;
1319         }
1320
1321         if (rc == 0) {
1322                 switch (res->u.add_sta.status) {
1323                 case ADD_STA_SUCCESS_MSK:
1324                         IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1325                         break;
1326                 default:
1327                         rc = -EIO;
1328                         IWL_WARNING("REPLY_ADD_STA failed\n");
1329                         break;
1330                 }
1331         }
1332
1333         priv->alloc_rxb_skb--;
1334         dev_kfree_skb_any(cmd.meta.u.skb);
1335
1336         return rc;
1337 }
1338
1339 static int iwl_update_sta_key_info(struct iwl_priv *priv,
1340                                    struct ieee80211_key_conf *keyconf,
1341                                    u8 sta_id)
1342 {
1343         unsigned long flags;
1344         __le16 key_flags = 0;
1345
1346         switch (keyconf->alg) {
1347         case ALG_CCMP:
1348                 key_flags |= STA_KEY_FLG_CCMP;
1349                 key_flags |= cpu_to_le16(
1350                                 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1351                 key_flags &= ~STA_KEY_FLG_INVALID;
1352                 break;
1353         case ALG_TKIP:
1354         case ALG_WEP:
1355                 return -EINVAL;
1356         default:
1357                 return -EINVAL;
1358         }
1359         spin_lock_irqsave(&priv->sta_lock, flags);
1360         priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1361         priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1362         memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1363                keyconf->keylen);
1364
1365         memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1366                keyconf->keylen);
1367         priv->stations[sta_id].sta.key.key_flags = key_flags;
1368         priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1369         priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1370
1371         spin_unlock_irqrestore(&priv->sta_lock, flags);
1372
1373         IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1374         iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1375         return 0;
1376 }
1377
1378 static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1379 {
1380         unsigned long flags;
1381
1382         spin_lock_irqsave(&priv->sta_lock, flags);
1383         memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
1384         memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo));
1385         priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1386         priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1387         priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1388         spin_unlock_irqrestore(&priv->sta_lock, flags);
1389
1390         IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1391         iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1392         return 0;
1393 }
1394
1395 static void iwl_clear_free_frames(struct iwl_priv *priv)
1396 {
1397         struct list_head *element;
1398
1399         IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1400                        priv->frames_count);
1401
1402         while (!list_empty(&priv->free_frames)) {
1403                 element = priv->free_frames.next;
1404                 list_del(element);
1405                 kfree(list_entry(element, struct iwl_frame, list));
1406                 priv->frames_count--;
1407         }
1408
1409         if (priv->frames_count) {
1410                 IWL_WARNING("%d frames still in use.  Did we lose one?\n",
1411                             priv->frames_count);
1412                 priv->frames_count = 0;
1413         }
1414 }
1415
1416 static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1417 {
1418         struct iwl_frame *frame;
1419         struct list_head *element;
1420         if (list_empty(&priv->free_frames)) {
1421                 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1422                 if (!frame) {
1423                         IWL_ERROR("Could not allocate frame!\n");
1424                         return NULL;
1425                 }
1426
1427                 priv->frames_count++;
1428                 return frame;
1429         }
1430
1431         element = priv->free_frames.next;
1432         list_del(element);
1433         return list_entry(element, struct iwl_frame, list);
1434 }
1435
1436 static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1437 {
1438         memset(frame, 0, sizeof(*frame));
1439         list_add(&frame->list, &priv->free_frames);
1440 }
1441
1442 unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
1443                                 struct ieee80211_hdr *hdr,
1444                                 const u8 *dest, int left)
1445 {
1446
1447         if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1448             ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1449              (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1450                 return 0;
1451
1452         if (priv->ibss_beacon->len > left)
1453                 return 0;
1454
1455         memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1456
1457         return priv->ibss_beacon->len;
1458 }
1459
1460 static int iwl_rate_index_from_plcp(int plcp)
1461 {
1462         int i = 0;
1463
1464         for (i = 0; i < IWL_RATE_COUNT; i++)
1465                 if (iwl_rates[i].plcp == plcp)
1466                         return i;
1467         return -1;
1468 }
1469
1470 static u8 iwl_rate_get_lowest_plcp(int rate_mask)
1471 {
1472         u8 i;
1473
1474         for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1475              i = iwl_rates[i].next_ieee) {
1476                 if (rate_mask & (1 << i))
1477                         return iwl_rates[i].plcp;
1478         }
1479
1480         return IWL_RATE_INVALID;
1481 }
1482
1483 static int iwl_send_beacon_cmd(struct iwl_priv *priv)
1484 {
1485         struct iwl_frame *frame;
1486         unsigned int frame_size;
1487         int rc;
1488         u8 rate;
1489
1490         frame = iwl_get_free_frame(priv);
1491
1492         if (!frame) {
1493                 IWL_ERROR("Could not obtain free frame buffer for beacon "
1494                           "command.\n");
1495                 return -ENOMEM;
1496         }
1497
1498         if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
1499                 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic &
1500                                                 0xFF0);
1501                 if (rate == IWL_INVALID_RATE)
1502                         rate = IWL_RATE_6M_PLCP;
1503         } else {
1504                 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1505                 if (rate == IWL_INVALID_RATE)
1506                         rate = IWL_RATE_1M_PLCP;
1507         }
1508
1509         frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
1510
1511         rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1512                               &frame->u.cmd[0]);
1513
1514         iwl_free_frame(priv, frame);
1515
1516         return rc;
1517 }
1518
1519 /******************************************************************************
1520  *
1521  * EEPROM related functions
1522  *
1523  ******************************************************************************/
1524
1525 static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1526 {
1527         memcpy(mac, priv->eeprom.mac_address, 6);
1528 }
1529
1530 /**
1531  * iwl_eeprom_init - read EEPROM contents
1532  *
1533  * Load the EEPROM from adapter into priv->eeprom
1534  *
1535  * NOTE:  This routine uses the non-debug IO access functions.
1536  */
1537 int iwl_eeprom_init(struct iwl_priv *priv)
1538 {
1539         u16 *e = (u16 *)&priv->eeprom;
1540         u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1541         u32 r;
1542         int sz = sizeof(priv->eeprom);
1543         int rc;
1544         int i;
1545         u16 addr;
1546
1547         /* The EEPROM structure has several padding buffers within it
1548          * and when adding new EEPROM maps is subject to programmer errors
1549          * which may be very difficult to identify without explicitly
1550          * checking the resulting size of the eeprom map. */
1551         BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1552
1553         if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1554                 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1555                 return -ENOENT;
1556         }
1557
1558         rc = iwl_eeprom_aqcuire_semaphore(priv);
1559         if (rc < 0) {
1560                 IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n");
1561                 return -ENOENT;
1562         }
1563
1564         /* eeprom is an array of 16bit values */
1565         for (addr = 0; addr < sz; addr += sizeof(u16)) {
1566                 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
1567                 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1568
1569                 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1570                                         i += IWL_EEPROM_ACCESS_DELAY) {
1571                         r = _iwl_read_restricted(priv, CSR_EEPROM_REG);
1572                         if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1573                                 break;
1574                         udelay(IWL_EEPROM_ACCESS_DELAY);
1575                 }
1576
1577                 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1578                         IWL_ERROR("Time out reading EEPROM[%d]", addr);
1579                         return -ETIMEDOUT;
1580                 }
1581                 e[addr / 2] = le16_to_cpu(r >> 16);
1582         }
1583
1584         return 0;
1585 }
1586
1587 /******************************************************************************
1588  *
1589  * Misc. internal state and helper functions
1590  *
1591  ******************************************************************************/
1592 #ifdef CONFIG_IWLWIFI_DEBUG
1593
1594 /**
1595  * iwl_report_frame - dump frame to syslog during debug sessions
1596  *
1597  * hack this function to show different aspects of received frames,
1598  * including selective frame dumps.
1599  * group100 parameter selects whether to show 1 out of 100 good frames.
1600  *
1601  * TODO:  ieee80211_hdr stuff is common to 3945 and 4965, so frame type
1602  *        info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats)
1603  *        is 3945-specific and gives bad output for 4965.  Need to split the
1604  *        functionality, keep common stuff here.
1605  */
1606 void iwl_report_frame(struct iwl_priv *priv,
1607                       struct iwl_rx_packet *pkt,
1608                       struct ieee80211_hdr *header, int group100)
1609 {
1610         u32 to_us;
1611         u32 print_summary = 0;
1612         u32 print_dump = 0;     /* set to 1 to dump all frames' contents */
1613         u32 hundred = 0;
1614         u32 dataframe = 0;
1615         u16 fc;
1616         u16 seq_ctl;
1617         u16 channel;
1618         u16 phy_flags;
1619         int rate_sym;
1620         u16 length;
1621         u16 status;
1622         u16 bcn_tmr;
1623         u32 tsf_low;
1624         u64 tsf;
1625         u8 rssi;
1626         u8 agc;
1627         u16 sig_avg;
1628         u16 noise_diff;
1629         struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1630         struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1631         struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
1632         u8 *data = IWL_RX_DATA(pkt);
1633
1634         /* MAC header */
1635         fc = le16_to_cpu(header->frame_control);
1636         seq_ctl = le16_to_cpu(header->seq_ctrl);
1637
1638         /* metadata */
1639         channel = le16_to_cpu(rx_hdr->channel);
1640         phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1641         rate_sym = rx_hdr->rate;
1642         length = le16_to_cpu(rx_hdr->len);
1643
1644         /* end-of-frame status and timestamp */
1645         status = le32_to_cpu(rx_end->status);
1646         bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1647         tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1648         tsf = le64_to_cpu(rx_end->timestamp);
1649
1650         /* signal statistics */
1651         rssi = rx_stats->rssi;
1652         agc = rx_stats->agc;
1653         sig_avg = le16_to_cpu(rx_stats->sig_avg);
1654         noise_diff = le16_to_cpu(rx_stats->noise_diff);
1655
1656         to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1657
1658         /* if data frame is to us and all is good,
1659          *   (optionally) print summary for only 1 out of every 100 */
1660         if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1661             (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1662                 dataframe = 1;
1663                 if (!group100)
1664                         print_summary = 1;      /* print each frame */
1665                 else if (priv->framecnt_to_us < 100) {
1666                         priv->framecnt_to_us++;
1667                         print_summary = 0;
1668                 } else {
1669                         priv->framecnt_to_us = 0;
1670                         print_summary = 1;
1671                         hundred = 1;
1672                 }
1673         } else {
1674                 /* print summary for all other frames */
1675                 print_summary = 1;
1676         }
1677
1678         if (print_summary) {
1679                 char *title;
1680                 u32 rate;
1681
1682                 if (hundred)
1683                         title = "100Frames";
1684                 else if (fc & IEEE80211_FCTL_RETRY)
1685                         title = "Retry";
1686                 else if (ieee80211_is_assoc_response(fc))
1687                         title = "AscRsp";
1688                 else if (ieee80211_is_reassoc_response(fc))
1689                         title = "RasRsp";
1690                 else if (ieee80211_is_probe_response(fc)) {
1691                         title = "PrbRsp";
1692                         print_dump = 1; /* dump frame contents */
1693                 } else if (ieee80211_is_beacon(fc)) {
1694                         title = "Beacon";
1695                         print_dump = 1; /* dump frame contents */
1696                 } else if (ieee80211_is_atim(fc))
1697                         title = "ATIM";
1698                 else if (ieee80211_is_auth(fc))
1699                         title = "Auth";
1700                 else if (ieee80211_is_deauth(fc))
1701                         title = "DeAuth";
1702                 else if (ieee80211_is_disassoc(fc))
1703                         title = "DisAssoc";
1704                 else
1705                         title = "Frame";
1706
1707                 rate = iwl_rate_index_from_plcp(rate_sym);
1708                 if (rate == -1)
1709                         rate = 0;
1710                 else
1711                         rate = iwl_rates[rate].ieee / 2;
1712
1713                 /* print frame summary.
1714                  * MAC addresses show just the last byte (for brevity),
1715                  *    but you can hack it to show more, if you'd like to. */
1716                 if (dataframe)
1717                         IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1718                                      "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1719                                      title, fc, header->addr1[5],
1720                                      length, rssi, channel, rate);
1721                 else {
1722                         /* src/dst addresses assume managed mode */
1723                         IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1724                                      "src=0x%02x, rssi=%u, tim=%lu usec, "
1725                                      "phy=0x%02x, chnl=%d\n",
1726                                      title, fc, header->addr1[5],
1727                                      header->addr3[5], rssi,
1728                                      tsf_low - priv->scan_start_tsf,
1729                                      phy_flags, channel);
1730                 }
1731         }
1732         if (print_dump)
1733                 iwl_print_hex_dump(IWL_DL_RX, data, length);
1734 }
1735 #endif
1736
1737 static void iwl_unset_hw_setting(struct iwl_priv *priv)
1738 {
1739         if (priv->hw_setting.shared_virt)
1740                 pci_free_consistent(priv->pci_dev,
1741                                     sizeof(struct iwl_shared),
1742                                     priv->hw_setting.shared_virt,
1743                                     priv->hw_setting.shared_phys);
1744 }
1745
1746 /**
1747  * iwl_supported_rate_to_ie - fill in the supported rate in IE field
1748  *
1749  * return : set the bit for each supported rate insert in ie
1750  */
1751 static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1752                                     u16 basic_rate, int max_count)
1753 {
1754         u16 ret_rates = 0, bit;
1755         int i;
1756         u8 *rates;
1757
1758         rates = &(ie[1]);
1759
1760         for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1761                 if (bit & supported_rate) {
1762                         ret_rates |= bit;
1763                         rates[*ie] = iwl_rates[i].ieee |
1764                             ((bit & basic_rate) ? 0x80 : 0x00);
1765                         *ie = *ie + 1;
1766                         if (*ie >= max_count)
1767                                 break;
1768                 }
1769         }
1770
1771         return ret_rates;
1772 }
1773
1774 /**
1775  * iwl_fill_probe_req - fill in all required fields and IE for probe request
1776  */
1777 static u16 iwl_fill_probe_req(struct iwl_priv *priv,
1778                               struct ieee80211_mgmt *frame,
1779                               int left, int is_direct)
1780 {
1781         int len = 0;
1782         u8 *pos = NULL;
1783         u16 ret_rates;
1784
1785         /* Make sure there is enough space for the probe request,
1786          * two mandatory IEs and the data */
1787         left -= 24;
1788         if (left < 0)
1789                 return 0;
1790         len += 24;
1791
1792         frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1793         memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN);
1794         memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1795         memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN);
1796         frame->seq_ctrl = 0;
1797
1798         /* fill in our indirect SSID IE */
1799         /* ...next IE... */
1800
1801         left -= 2;
1802         if (left < 0)
1803                 return 0;
1804         len += 2;
1805         pos = &(frame->u.probe_req.variable[0]);
1806         *pos++ = WLAN_EID_SSID;
1807         *pos++ = 0;
1808
1809         /* fill in our direct SSID IE... */
1810         if (is_direct) {
1811                 /* ...next IE... */
1812                 left -= 2 + priv->essid_len;
1813                 if (left < 0)
1814                         return 0;
1815                 /* ... fill it in... */
1816                 *pos++ = WLAN_EID_SSID;
1817                 *pos++ = priv->essid_len;
1818                 memcpy(pos, priv->essid, priv->essid_len);
1819                 pos += priv->essid_len;
1820                 len += 2 + priv->essid_len;
1821         }
1822
1823         /* fill in supported rate */
1824         /* ...next IE... */
1825         left -= 2;
1826         if (left < 0)
1827                 return 0;
1828         /* ... fill it in... */
1829         *pos++ = WLAN_EID_SUPP_RATES;
1830         *pos = 0;
1831         ret_rates = priv->active_rate = priv->rates_mask;
1832         priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1833
1834         iwl_supported_rate_to_ie(pos, priv->active_rate,
1835                                  priv->active_rate_basic, left);
1836         len += 2 + *pos;
1837         pos += (*pos) + 1;
1838         ret_rates = ~ret_rates & priv->active_rate;
1839
1840         if (ret_rates == 0)
1841                 goto fill_end;
1842
1843         /* fill in supported extended rate */
1844         /* ...next IE... */
1845         left -= 2;
1846         if (left < 0)
1847                 return 0;
1848         /* ... fill it in... */
1849         *pos++ = WLAN_EID_EXT_SUPP_RATES;
1850         *pos = 0;
1851         iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left);
1852         if (*pos > 0)
1853                 len += 2 + *pos;
1854
1855  fill_end:
1856         return (u16)len;
1857 }
1858
1859 /*
1860  * QoS  support
1861 */
1862 #ifdef CONFIG_IWLWIFI_QOS
1863 static int iwl_send_qos_params_command(struct iwl_priv *priv,
1864                                        struct iwl_qosparam_cmd *qos)
1865 {
1866
1867         return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1868                                 sizeof(struct iwl_qosparam_cmd), qos);
1869 }
1870
1871 static void iwl_reset_qos(struct iwl_priv *priv)
1872 {
1873         u16 cw_min = 15;
1874         u16 cw_max = 1023;
1875         u8 aifs = 2;
1876         u8 is_legacy = 0;
1877         unsigned long flags;
1878         int i;
1879
1880         spin_lock_irqsave(&priv->lock, flags);
1881         priv->qos_data.qos_active = 0;
1882
1883         if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
1884                 if (priv->qos_data.qos_enable)
1885                         priv->qos_data.qos_active = 1;
1886                 if (!(priv->active_rate & 0xfff0)) {
1887                         cw_min = 31;
1888                         is_legacy = 1;
1889                 }
1890         } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1891                 if (priv->qos_data.qos_enable)
1892                         priv->qos_data.qos_active = 1;
1893         } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
1894                 cw_min = 31;
1895                 is_legacy = 1;
1896         }
1897
1898         if (priv->qos_data.qos_active)
1899                 aifs = 3;
1900
1901         priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1902         priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1903         priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1904         priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1905         priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1906
1907         if (priv->qos_data.qos_active) {
1908                 i = 1;
1909                 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1910                 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1911                 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1912                 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1913                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1914
1915                 i = 2;
1916                 priv->qos_data.def_qos_parm.ac[i].cw_min =
1917                         cpu_to_le16((cw_min + 1) / 2 - 1);
1918                 priv->qos_data.def_qos_parm.ac[i].cw_max =
1919                         cpu_to_le16(cw_max);
1920                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1921                 if (is_legacy)
1922                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1923                                 cpu_to_le16(6016);
1924                 else
1925                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1926                                 cpu_to_le16(3008);
1927                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1928
1929                 i = 3;
1930                 priv->qos_data.def_qos_parm.ac[i].cw_min =
1931                         cpu_to_le16((cw_min + 1) / 4 - 1);
1932                 priv->qos_data.def_qos_parm.ac[i].cw_max =
1933                         cpu_to_le16((cw_max + 1) / 2 - 1);
1934                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1935                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1936                 if (is_legacy)
1937                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1938                                 cpu_to_le16(3264);
1939                 else
1940                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
1941                                 cpu_to_le16(1504);
1942         } else {
1943                 for (i = 1; i < 4; i++) {
1944                         priv->qos_data.def_qos_parm.ac[i].cw_min =
1945                                 cpu_to_le16(cw_min);
1946                         priv->qos_data.def_qos_parm.ac[i].cw_max =
1947                                 cpu_to_le16(cw_max);
1948                         priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
1949                         priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1950                         priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1951                 }
1952         }
1953         IWL_DEBUG_QOS("set QoS to default \n");
1954
1955         spin_unlock_irqrestore(&priv->lock, flags);
1956 }
1957
1958 static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
1959 {
1960         unsigned long flags;
1961
1962         if (priv == NULL)
1963                 return;
1964
1965         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1966                 return;
1967
1968         if (!priv->qos_data.qos_enable)
1969                 return;
1970
1971         spin_lock_irqsave(&priv->lock, flags);
1972         priv->qos_data.def_qos_parm.qos_flags = 0;
1973
1974         if (priv->qos_data.qos_cap.q_AP.queue_request &&
1975             !priv->qos_data.qos_cap.q_AP.txop_request)
1976                 priv->qos_data.def_qos_parm.qos_flags |=
1977                         QOS_PARAM_FLG_TXOP_TYPE_MSK;
1978
1979         if (priv->qos_data.qos_active)
1980                 priv->qos_data.def_qos_parm.qos_flags |=
1981                         QOS_PARAM_FLG_UPDATE_EDCA_MSK;
1982
1983         spin_unlock_irqrestore(&priv->lock, flags);
1984
1985         if (force || iwl_is_associated(priv)) {
1986                 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n",
1987                               priv->qos_data.qos_active);
1988
1989                 iwl_send_qos_params_command(priv,
1990                                 &(priv->qos_data.def_qos_parm));
1991         }
1992 }
1993
1994 #endif /* CONFIG_IWLWIFI_QOS */
1995 /*
1996  * Power management (not Tx power!) functions
1997  */
1998 #define MSEC_TO_USEC 1024
1999
2000 #define NOSLP __constant_cpu_to_le32(0)
2001 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK
2002 #define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2003 #define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2004                                      __constant_cpu_to_le32(X1), \
2005                                      __constant_cpu_to_le32(X2), \
2006                                      __constant_cpu_to_le32(X3), \
2007                                      __constant_cpu_to_le32(X4)}
2008
2009
2010 /* default power management (not Tx power) table values */
2011 /* for tim  0-10 */
2012 static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
2013         {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2014         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2015         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2016         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2017         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2018         {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2019 };
2020
2021 /* for tim > 10 */
2022 static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
2023         {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2024         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2025                  SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2026         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2027                  SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2028         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2029                  SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2030         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2031         {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2032                  SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2033 };
2034
2035 int iwl_power_init_handle(struct iwl_priv *priv)
2036 {
2037         int rc = 0, i;
2038         struct iwl_power_mgr *pow_data;
2039         int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
2040         u16 pci_pm;
2041
2042         IWL_DEBUG_POWER("Initialize power \n");
2043
2044         pow_data = &(priv->power_data);
2045
2046         memset(pow_data, 0, sizeof(*pow_data));
2047
2048         pow_data->active_index = IWL_POWER_RANGE_0;
2049         pow_data->dtim_val = 0xffff;
2050
2051         memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2052         memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2053
2054         rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2055         if (rc != 0)
2056                 return 0;
2057         else {
2058                 struct iwl_powertable_cmd *cmd;
2059
2060                 IWL_DEBUG_POWER("adjust power command flags\n");
2061
2062                 for (i = 0; i < IWL_POWER_AC; i++) {
2063                         cmd = &pow_data->pwr_range_0[i].cmd;
2064
2065                         if (pci_pm & 0x1)
2066                                 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2067                         else
2068                                 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2069                 }
2070         }
2071         return rc;
2072 }
2073
2074 static int iwl_update_power_cmd(struct iwl_priv *priv,
2075                                 struct iwl_powertable_cmd *cmd, u32 mode)
2076 {
2077         int rc = 0, i;
2078         u8 skip;
2079         u32 max_sleep = 0;
2080         struct iwl_power_vec_entry *range;
2081         u8 period = 0;
2082         struct iwl_power_mgr *pow_data;
2083
2084         if (mode > IWL_POWER_INDEX_5) {
2085                 IWL_DEBUG_POWER("Error invalid power mode \n");
2086                 return -1;
2087         }
2088         pow_data = &(priv->power_data);
2089
2090         if (pow_data->active_index == IWL_POWER_RANGE_0)
2091                 range = &pow_data->pwr_range_0[0];
2092         else
2093                 range = &pow_data->pwr_range_1[1];
2094
2095         memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
2096
2097 #ifdef IWL_MAC80211_DISABLE
2098         if (priv->assoc_network != NULL) {
2099                 unsigned long flags;
2100
2101                 period = priv->assoc_network->tim.tim_period;
2102         }
2103 #endif  /*IWL_MAC80211_DISABLE */
2104         skip = range[mode].no_dtim;
2105
2106         if (period == 0) {
2107                 period = 1;
2108                 skip = 0;
2109         }
2110
2111         if (skip == 0) {
2112                 max_sleep = period;
2113                 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2114         } else {
2115                 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2116                 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2117                 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2118         }
2119
2120         for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2121                 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2122                         cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2123         }
2124
2125         IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2126         IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2127         IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2128         IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2129                         le32_to_cpu(cmd->sleep_interval[0]),
2130                         le32_to_cpu(cmd->sleep_interval[1]),
2131                         le32_to_cpu(cmd->sleep_interval[2]),
2132                         le32_to_cpu(cmd->sleep_interval[3]),
2133                         le32_to_cpu(cmd->sleep_interval[4]));
2134
2135         return rc;
2136 }
2137
2138 static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode)
2139 {
2140         u32 final_mode = mode;
2141         int rc;
2142         struct iwl_powertable_cmd cmd;
2143
2144         /* If on battery, set to 3,
2145          * if plugged into AC power, set to CAM ("continuosly aware mode"),
2146          * else user level */
2147         switch (mode) {
2148         case IWL_POWER_BATTERY:
2149                 final_mode = IWL_POWER_INDEX_3;
2150                 break;
2151         case IWL_POWER_AC:
2152                 final_mode = IWL_POWER_MODE_CAM;
2153                 break;
2154         default:
2155                 final_mode = mode;
2156                 break;
2157         }
2158
2159         iwl_update_power_cmd(priv, &cmd, final_mode);
2160
2161         rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2162
2163         if (final_mode == IWL_POWER_MODE_CAM)
2164                 clear_bit(STATUS_POWER_PMI, &priv->status);
2165         else
2166                 set_bit(STATUS_POWER_PMI, &priv->status);
2167
2168         return rc;
2169 }
2170
2171 int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2172 {
2173         /* Filter incoming packets to determine if they are targeted toward
2174          * this network, discarding packets coming from ourselves */
2175         switch (priv->iw_mode) {
2176         case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source    | BSSID */
2177                 /* packets from our adapter are dropped (echo) */
2178                 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2179                         return 0;
2180                 /* {broad,multi}cast packets to our IBSS go through */
2181                 if (is_multicast_ether_addr(header->addr1))
2182                         return !compare_ether_addr(header->addr3, priv->bssid);
2183                 /* packets to our adapter go through */
2184                 return !compare_ether_addr(header->addr1, priv->mac_addr);
2185         case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2186                 /* packets from our adapter are dropped (echo) */
2187                 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2188                         return 0;
2189                 /* {broad,multi}cast packets to our BSS go through */
2190                 if (is_multicast_ether_addr(header->addr1))
2191                         return !compare_ether_addr(header->addr2, priv->bssid);
2192                 /* packets to our adapter go through */
2193                 return !compare_ether_addr(header->addr1, priv->mac_addr);
2194         }
2195
2196         return 1;
2197 }
2198
2199 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2200
2201 const char *iwl_get_tx_fail_reason(u32 status)
2202 {
2203         switch (status & TX_STATUS_MSK) {
2204         case TX_STATUS_SUCCESS:
2205                 return "SUCCESS";
2206                 TX_STATUS_ENTRY(SHORT_LIMIT);
2207                 TX_STATUS_ENTRY(LONG_LIMIT);
2208                 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2209                 TX_STATUS_ENTRY(MGMNT_ABORT);
2210                 TX_STATUS_ENTRY(NEXT_FRAG);
2211                 TX_STATUS_ENTRY(LIFE_EXPIRE);
2212                 TX_STATUS_ENTRY(DEST_PS);
2213                 TX_STATUS_ENTRY(ABORTED);
2214                 TX_STATUS_ENTRY(BT_RETRY);
2215                 TX_STATUS_ENTRY(STA_INVALID);
2216                 TX_STATUS_ENTRY(FRAG_DROPPED);
2217                 TX_STATUS_ENTRY(TID_DISABLE);
2218                 TX_STATUS_ENTRY(FRAME_FLUSHED);
2219                 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2220                 TX_STATUS_ENTRY(TX_LOCKED);
2221                 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2222         }
2223
2224         return "UNKNOWN";
2225 }
2226
2227 /**
2228  * iwl_scan_cancel - Cancel any currently executing HW scan
2229  *
2230  * NOTE: priv->mutex is not required before calling this function
2231  */
2232 static int iwl_scan_cancel(struct iwl_priv *priv)
2233 {
2234         if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2235                 clear_bit(STATUS_SCANNING, &priv->status);
2236                 return 0;
2237         }
2238
2239         if (test_bit(STATUS_SCANNING, &priv->status)) {
2240                 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2241                         IWL_DEBUG_SCAN("Queuing scan abort.\n");
2242                         set_bit(STATUS_SCAN_ABORTING, &priv->status);
2243                         queue_work(priv->workqueue, &priv->abort_scan);
2244
2245                 } else
2246                         IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2247
2248                 return test_bit(STATUS_SCANNING, &priv->status);
2249         }
2250
2251         return 0;
2252 }
2253
2254 /**
2255  * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
2256  * @ms: amount of time to wait (in milliseconds) for scan to abort
2257  *
2258  * NOTE: priv->mutex must be held before calling this function
2259  */
2260 static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2261 {
2262         unsigned long now = jiffies;
2263         int ret;
2264
2265         ret = iwl_scan_cancel(priv);
2266         if (ret && ms) {
2267                 mutex_unlock(&priv->mutex);
2268                 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2269                                 test_bit(STATUS_SCANNING, &priv->status))
2270                         msleep(1);
2271                 mutex_lock(&priv->mutex);
2272
2273                 return test_bit(STATUS_SCANNING, &priv->status);
2274         }
2275
2276         return ret;
2277 }
2278
2279 static void iwl_sequence_reset(struct iwl_priv *priv)
2280 {
2281         /* Reset ieee stats */
2282
2283         /* We don't reset the net_device_stats (ieee->stats) on
2284          * re-association */
2285
2286         priv->last_seq_num = -1;
2287         priv->last_frag_num = -1;
2288         priv->last_packet_time = 0;
2289
2290         iwl_scan_cancel(priv);
2291 }
2292
2293 #define MAX_UCODE_BEACON_INTERVAL       1024
2294 #define INTEL_CONN_LISTEN_INTERVAL      __constant_cpu_to_le16(0xA)
2295
2296 static __le16 iwl_adjust_beacon_interval(u16 beacon_val)
2297 {
2298         u16 new_val = 0;
2299         u16 beacon_factor = 0;
2300
2301         beacon_factor =
2302             (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2303                 / MAX_UCODE_BEACON_INTERVAL;
2304         new_val = beacon_val / beacon_factor;
2305
2306         return cpu_to_le16(new_val);
2307 }
2308
2309 static void iwl_setup_rxon_timing(struct iwl_priv *priv)
2310 {
2311         u64 interval_tm_unit;
2312         u64 tsf, result;
2313         unsigned long flags;
2314         struct ieee80211_conf *conf = NULL;
2315         u16 beacon_int = 0;
2316
2317         conf = ieee80211_get_hw_conf(priv->hw);
2318
2319         spin_lock_irqsave(&priv->lock, flags);
2320         priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2321         priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2322
2323         priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2324
2325         tsf = priv->timestamp1;
2326         tsf = ((tsf << 32) | priv->timestamp0);
2327
2328         beacon_int = priv->beacon_int;
2329         spin_unlock_irqrestore(&priv->lock, flags);
2330
2331         if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2332                 if (beacon_int == 0) {
2333                         priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2334                         priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2335                 } else {
2336                         priv->rxon_timing.beacon_interval =
2337                                 cpu_to_le16(beacon_int);
2338                         priv->rxon_timing.beacon_interval =
2339                             iwl_adjust_beacon_interval(
2340                                 le16_to_cpu(priv->rxon_timing.beacon_interval));
2341                 }
2342
2343                 priv->rxon_timing.atim_window = 0;
2344         } else {
2345                 priv->rxon_timing.beacon_interval =
2346                         iwl_adjust_beacon_interval(conf->beacon_int);
2347                 /* TODO: we need to get atim_window from upper stack
2348                  * for now we set to 0 */
2349                 priv->rxon_timing.atim_window = 0;
2350         }
2351
2352         interval_tm_unit =
2353                 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2354         result = do_div(tsf, interval_tm_unit);
2355         priv->rxon_timing.beacon_init_val =
2356             cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2357
2358         IWL_DEBUG_ASSOC
2359             ("beacon interval %d beacon timer %d beacon tim %d\n",
2360                 le16_to_cpu(priv->rxon_timing.beacon_interval),
2361                 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2362                 le16_to_cpu(priv->rxon_timing.atim_window));
2363 }
2364
2365 static int iwl_scan_initiate(struct iwl_priv *priv)
2366 {
2367         if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2368                 IWL_ERROR("APs don't scan.\n");
2369                 return 0;
2370         }
2371
2372         if (!iwl_is_ready_rf(priv)) {
2373                 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2374                 return -EIO;
2375         }
2376
2377         if (test_bit(STATUS_SCANNING, &priv->status)) {
2378                 IWL_DEBUG_SCAN("Scan already in progress.\n");
2379                 return -EAGAIN;
2380         }
2381
2382         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2383                 IWL_DEBUG_SCAN("Scan request while abort pending.  "
2384                                "Queuing.\n");
2385                 return -EAGAIN;
2386         }
2387
2388         IWL_DEBUG_INFO("Starting scan...\n");
2389         priv->scan_bands = 2;
2390         set_bit(STATUS_SCANNING, &priv->status);
2391         priv->scan_start = jiffies;
2392         priv->scan_pass_start = priv->scan_start;
2393
2394         queue_work(priv->workqueue, &priv->request_scan);
2395
2396         return 0;
2397 }
2398
2399 static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2400 {
2401         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
2402
2403         if (hw_decrypt)
2404                 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2405         else
2406                 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2407
2408         return 0;
2409 }
2410
2411 static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode)
2412 {
2413         if (phymode == MODE_IEEE80211A) {
2414                 priv->staging_rxon.flags &=
2415                     ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2416                       | RXON_FLG_CCK_MSK);
2417                 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2418         } else {
2419                 /* Copied from iwl_bg_post_associate() */
2420                 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2421                         priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2422                 else
2423                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2424
2425                 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2426                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2427
2428                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2429                 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2430                 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2431         }
2432 }
2433
2434 /*
2435  * initilize rxon structure with default values fromm eeprom
2436  */
2437 static void iwl_connection_init_rx_config(struct iwl_priv *priv)
2438 {
2439         const struct iwl_channel_info *ch_info;
2440
2441         memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2442
2443         switch (priv->iw_mode) {
2444         case IEEE80211_IF_TYPE_AP:
2445                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2446                 break;
2447
2448         case IEEE80211_IF_TYPE_STA:
2449                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2450                 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2451                 break;
2452
2453         case IEEE80211_IF_TYPE_IBSS:
2454                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2455                 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2456                 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2457                                                   RXON_FILTER_ACCEPT_GRP_MSK;
2458                 break;
2459
2460         case IEEE80211_IF_TYPE_MNTR:
2461                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2462                 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2463                     RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2464                 break;
2465         }
2466
2467 #if 0
2468         /* TODO:  Figure out when short_preamble would be set and cache from
2469          * that */
2470         if (!hw_to_local(priv->hw)->short_preamble)
2471                 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2472         else
2473                 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2474 #endif
2475
2476         ch_info = iwl_get_channel_info(priv, priv->phymode,
2477                                        le16_to_cpu(priv->staging_rxon.channel));
2478
2479         if (!ch_info)
2480                 ch_info = &priv->channel_info[0];
2481
2482         /*
2483          * in some case A channels are all non IBSS
2484          * in this case force B/G channel
2485          */
2486         if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2487             !(is_channel_ibss(ch_info)))
2488                 ch_info = &priv->channel_info[0];
2489
2490         priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2491         if (is_channel_a_band(ch_info))
2492                 priv->phymode = MODE_IEEE80211A;
2493         else
2494                 priv->phymode = MODE_IEEE80211G;
2495
2496         iwl_set_flags_for_phymode(priv, priv->phymode);
2497
2498         priv->staging_rxon.ofdm_basic_rates =
2499             (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2500         priv->staging_rxon.cck_basic_rates =
2501             (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2502 }
2503
2504 static int iwl_set_mode(struct iwl_priv *priv, int mode)
2505 {
2506         if (!iwl_is_ready_rf(priv))
2507                 return -EAGAIN;
2508
2509         if (mode == IEEE80211_IF_TYPE_IBSS) {
2510                 const struct iwl_channel_info *ch_info;
2511
2512                 ch_info = iwl_get_channel_info(priv,
2513                         priv->phymode,
2514                         le16_to_cpu(priv->staging_rxon.channel));
2515
2516                 if (!ch_info || !is_channel_ibss(ch_info)) {
2517                         IWL_ERROR("channel %d not IBSS channel\n",
2518                                   le16_to_cpu(priv->staging_rxon.channel));
2519                         return -EINVAL;
2520                 }
2521         }
2522
2523         cancel_delayed_work(&priv->scan_check);
2524         if (iwl_scan_cancel_timeout(priv, 100)) {
2525                 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2526                 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2527                 return -EAGAIN;
2528         }
2529
2530         priv->iw_mode = mode;
2531
2532         iwl_connection_init_rx_config(priv);
2533         memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2534
2535         iwl_clear_stations_table(priv);
2536
2537         iwl_commit_rxon(priv);
2538
2539         return 0;
2540 }
2541
2542 static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2543                                       struct ieee80211_tx_control *ctl,
2544                                       struct iwl_cmd *cmd,
2545                                       struct sk_buff *skb_frag,
2546                                       int last_frag)
2547 {
2548         struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
2549
2550         switch (keyinfo->alg) {
2551         case ALG_CCMP:
2552                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2553                 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2554                 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2555                 break;
2556
2557         case ALG_TKIP:
2558 #if 0
2559                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2560
2561                 if (last_frag)
2562                         memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2563                                8);
2564                 else
2565                         memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2566 #endif
2567                 break;
2568
2569         case ALG_WEP:
2570                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2571                     (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2572
2573                 if (keyinfo->keylen == 13)
2574                         cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2575
2576                 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2577
2578                 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2579                              "with key %d\n", ctl->key_idx);
2580                 break;
2581
2582         case ALG_NONE:
2583                 IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n");
2584                 break;
2585
2586         default:
2587                 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2588                 break;
2589         }
2590 }
2591
2592 /*
2593  * handle build REPLY_TX command notification.
2594  */
2595 static void iwl_build_tx_cmd_basic(struct iwl_priv *priv,
2596                                   struct iwl_cmd *cmd,
2597                                   struct ieee80211_tx_control *ctrl,
2598                                   struct ieee80211_hdr *hdr,
2599                                   int is_unicast, u8 std_id)
2600 {
2601         __le16 *qc;
2602         u16 fc = le16_to_cpu(hdr->frame_control);
2603         __le32 tx_flags = cmd->cmd.tx.tx_flags;
2604
2605         cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2606         if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2607                 tx_flags |= TX_CMD_FLG_ACK_MSK;
2608                 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2609                         tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2610                 if (ieee80211_is_probe_response(fc) &&
2611                     !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2612                         tx_flags |= TX_CMD_FLG_TSF_MSK;
2613         } else {
2614                 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2615                 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2616         }
2617
2618         cmd->cmd.tx.sta_id = std_id;
2619         if (ieee80211_get_morefrag(hdr))
2620                 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2621
2622         qc = ieee80211_get_qos_ctrl(hdr);
2623         if (qc) {
2624                 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2625                 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2626         } else
2627                 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2628
2629         if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2630                 tx_flags |= TX_CMD_FLG_RTS_MSK;
2631                 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2632         } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2633                 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2634                 tx_flags |= TX_CMD_FLG_CTS_MSK;
2635         }
2636
2637         if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2638                 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2639
2640         tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2641         if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2642                 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2643                     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2644                         cmd->cmd.tx.timeout.pm_frame_timeout =
2645                                 cpu_to_le16(3);
2646                 else
2647                         cmd->cmd.tx.timeout.pm_frame_timeout =
2648                                 cpu_to_le16(2);
2649         } else
2650                 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2651
2652         cmd->cmd.tx.driver_txop = 0;
2653         cmd->cmd.tx.tx_flags = tx_flags;
2654         cmd->cmd.tx.next_frame_len = 0;
2655 }
2656
2657 static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2658 {
2659         int sta_id;
2660         u16 fc = le16_to_cpu(hdr->frame_control);
2661
2662         /* If this frame is broadcast or not data then use the broadcast
2663          * station id */
2664         if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2665             is_multicast_ether_addr(hdr->addr1))
2666                 return priv->hw_setting.bcast_sta_id;
2667
2668         switch (priv->iw_mode) {
2669
2670         /* If this frame is part of a BSS network (we're a station), then
2671          * we use the AP's station id */
2672         case IEEE80211_IF_TYPE_STA:
2673                 return IWL_AP_ID;
2674
2675         /* If we are an AP, then find the station, or use BCAST */
2676         case IEEE80211_IF_TYPE_AP:
2677                 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2678                 if (sta_id != IWL_INVALID_STATION)
2679                         return sta_id;
2680                 return priv->hw_setting.bcast_sta_id;
2681
2682         /* If this frame is part of a IBSS network, then we use the
2683          * target specific station id */
2684         case IEEE80211_IF_TYPE_IBSS: {
2685                 DECLARE_MAC_BUF(mac);
2686
2687                 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2688                 if (sta_id != IWL_INVALID_STATION)
2689                         return sta_id;
2690
2691                 sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2692
2693                 if (sta_id != IWL_INVALID_STATION)
2694                         return sta_id;
2695
2696                 IWL_DEBUG_DROP("Station %s not in station map. "
2697                                "Defaulting to broadcast...\n",
2698                                print_mac(mac, hdr->addr1));
2699                 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2700                 return priv->hw_setting.bcast_sta_id;
2701         }
2702         default:
2703                 IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode);
2704                 return priv->hw_setting.bcast_sta_id;
2705         }
2706 }
2707
2708 /*
2709  * start REPLY_TX command process
2710  */
2711 static int iwl_tx_skb(struct iwl_priv *priv,
2712                       struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2713 {
2714         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2715         struct iwl_tfd_frame *tfd;
2716         u32 *control_flags;
2717         int txq_id = ctl->queue;
2718         struct iwl_tx_queue *txq = NULL;
2719         struct iwl_queue *q = NULL;
2720         dma_addr_t phys_addr;
2721         dma_addr_t txcmd_phys;
2722         struct iwl_cmd *out_cmd = NULL;
2723         u16 len, idx, len_org;
2724         u8 id, hdr_len, unicast;
2725         u8 sta_id;
2726         u16 seq_number = 0;
2727         u16 fc;
2728         __le16 *qc;
2729         u8 wait_write_ptr = 0;
2730         unsigned long flags;
2731         int rc;
2732
2733         spin_lock_irqsave(&priv->lock, flags);
2734         if (iwl_is_rfkill(priv)) {
2735                 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2736                 goto drop_unlock;
2737         }
2738
2739         if (!priv->interface_id) {
2740                 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2741                 goto drop_unlock;
2742         }
2743
2744         if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2745                 IWL_ERROR("ERROR: No TX rate available.\n");
2746                 goto drop_unlock;
2747         }
2748
2749         unicast = !is_multicast_ether_addr(hdr->addr1);
2750         id = 0;
2751
2752         fc = le16_to_cpu(hdr->frame_control);
2753
2754 #ifdef CONFIG_IWLWIFI_DEBUG
2755         if (ieee80211_is_auth(fc))
2756                 IWL_DEBUG_TX("Sending AUTH frame\n");
2757         else if (ieee80211_is_assoc_request(fc))
2758                 IWL_DEBUG_TX("Sending ASSOC frame\n");
2759         else if (ieee80211_is_reassoc_request(fc))
2760                 IWL_DEBUG_TX("Sending REASSOC frame\n");
2761 #endif
2762
2763         if (!iwl_is_associated(priv) &&
2764             ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2765                 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2766                 goto drop_unlock;
2767         }
2768
2769         spin_unlock_irqrestore(&priv->lock, flags);
2770
2771         hdr_len = ieee80211_get_hdrlen(fc);
2772         sta_id = iwl_get_sta_id(priv, hdr);
2773         if (sta_id == IWL_INVALID_STATION) {
2774                 DECLARE_MAC_BUF(mac);
2775
2776                 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2777                                print_mac(mac, hdr->addr1));
2778                 goto drop;
2779         }
2780
2781         IWL_DEBUG_RATE("station Id %d\n", sta_id);
2782
2783         qc = ieee80211_get_qos_ctrl(hdr);
2784         if (qc) {
2785                 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2786                 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2787                                 IEEE80211_SCTL_SEQ;
2788                 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2789                         (hdr->seq_ctrl &
2790                                 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2791                 seq_number += 0x10;
2792         }
2793         txq = &priv->txq[txq_id];
2794         q = &txq->q;
2795
2796         spin_lock_irqsave(&priv->lock, flags);
2797
2798         tfd = &txq->bd[q->first_empty];
2799         memset(tfd, 0, sizeof(*tfd));
2800         control_flags = (u32 *) tfd;
2801         idx = get_cmd_index(q, q->first_empty, 0);
2802
2803         memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info));
2804         txq->txb[q->first_empty].skb[0] = skb;
2805         memcpy(&(txq->txb[q->first_empty].status.control),
2806                ctl, sizeof(struct ieee80211_tx_control));
2807         out_cmd = &txq->cmd[idx];
2808         memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2809         memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2810         out_cmd->hdr.cmd = REPLY_TX;
2811         out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2812                                 INDEX_TO_SEQ(q->first_empty)));
2813         /* copy frags header */
2814         memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2815
2816         /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */
2817         len = priv->hw_setting.tx_cmd_len +
2818                 sizeof(struct iwl_cmd_header) + hdr_len;
2819
2820         len_org = len;
2821         len = (len + 3) & ~3;
2822
2823         if (len_org != len)
2824                 len_org = 1;
2825         else
2826                 len_org = 0;
2827
2828         txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2829                      offsetof(struct iwl_cmd, hdr);
2830
2831         iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2832
2833         if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2834                 iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
2835
2836         /* 802.11 null functions have no payload... */
2837         len = skb->len - hdr_len;
2838         if (len) {
2839                 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2840                                            len, PCI_DMA_TODEVICE);
2841                 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2842         }
2843
2844         /* If there is no payload, then only one TFD is used */
2845         if (!len)
2846                 *control_flags = TFD_CTL_COUNT_SET(1);
2847         else
2848                 *control_flags = TFD_CTL_COUNT_SET(2) |
2849                         TFD_CTL_PAD_SET(U32_PAD(len));
2850
2851         len = (u16)skb->len;
2852         out_cmd->cmd.tx.len = cpu_to_le16(len);
2853
2854         /* TODO need this for burst mode later on */
2855         iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2856
2857         /* set is_hcca to 0; it probably will never be implemented */
2858         iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2859
2860         out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
2861         out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
2862
2863         if (!ieee80211_get_morefrag(hdr)) {
2864                 txq->need_update = 1;
2865                 if (qc) {
2866                         u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2867                         priv->stations[sta_id].tid[tid].seq_number = seq_number;
2868                 }
2869         } else {
2870                 wait_write_ptr = 1;
2871                 txq->need_update = 0;
2872         }
2873
2874         iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2875                            sizeof(out_cmd->cmd.tx));
2876
2877         iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2878                            ieee80211_get_hdrlen(fc));
2879
2880         q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
2881         rc = iwl_tx_queue_update_write_ptr(priv, txq);
2882         spin_unlock_irqrestore(&priv->lock, flags);
2883
2884         if (rc)
2885                 return rc;
2886
2887         if ((iwl_queue_space(q) < q->high_mark)
2888             && priv->mac80211_registered) {
2889                 if (wait_write_ptr) {
2890                         spin_lock_irqsave(&priv->lock, flags);
2891                         txq->need_update = 1;
2892                         iwl_tx_queue_update_write_ptr(priv, txq);
2893                         spin_unlock_irqrestore(&priv->lock, flags);
2894                 }
2895
2896                 ieee80211_stop_queue(priv->hw, ctl->queue);
2897         }
2898
2899         return 0;
2900
2901 drop_unlock:
2902         spin_unlock_irqrestore(&priv->lock, flags);
2903 drop:
2904         return -1;
2905 }
2906
2907 static void iwl_set_rate(struct iwl_priv *priv)
2908 {
2909         const struct ieee80211_hw_mode *hw = NULL;
2910         struct ieee80211_rate *rate;
2911         int i;
2912
2913         hw = iwl_get_hw_mode(priv, priv->phymode);
2914
2915         priv->active_rate = 0;
2916         priv->active_rate_basic = 0;
2917
2918         IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
2919                        hw->mode == MODE_IEEE80211A ?
2920                        'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
2921
2922         for (i = 0; i < hw->num_rates; i++) {
2923                 rate = &(hw->rates[i]);
2924                 if ((rate->val < IWL_RATE_COUNT) &&
2925                     (rate->flags & IEEE80211_RATE_SUPPORTED)) {
2926                         IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
2927                                        rate->val, iwl_rates[rate->val].plcp,
2928                                        (rate->flags & IEEE80211_RATE_BASIC) ?
2929                                        "*" : "");
2930                         priv->active_rate |= (1 << rate->val);
2931                         if (rate->flags & IEEE80211_RATE_BASIC)
2932                                 priv->active_rate_basic |= (1 << rate->val);
2933                 } else
2934                         IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
2935                                        rate->val, iwl_rates[rate->val].plcp);
2936         }
2937
2938         IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
2939                        priv->active_rate, priv->active_rate_basic);
2940
2941         /*
2942          * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
2943          * otherwise set it to the default of all CCK rates and 6, 12, 24 for
2944          * OFDM
2945          */
2946         if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
2947                 priv->staging_rxon.cck_basic_rates =
2948                     ((priv->active_rate_basic &
2949                       IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
2950         else
2951                 priv->staging_rxon.cck_basic_rates =
2952                     (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2953
2954         if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
2955                 priv->staging_rxon.ofdm_basic_rates =
2956                     ((priv->active_rate_basic &
2957                       (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
2958                       IWL_FIRST_OFDM_RATE) & 0xFF;
2959         else
2960                 priv->staging_rxon.ofdm_basic_rates =
2961                    (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2962 }
2963
2964 static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
2965 {
2966         unsigned long flags;
2967
2968         if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
2969                 return;
2970
2971         IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
2972                           disable_radio ? "OFF" : "ON");
2973
2974         if (disable_radio) {
2975                 iwl_scan_cancel(priv);
2976                 /* FIXME: This is a workaround for AP */
2977                 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
2978                         spin_lock_irqsave(&priv->lock, flags);
2979                         iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
2980                                     CSR_UCODE_SW_BIT_RFKILL);
2981                         spin_unlock_irqrestore(&priv->lock, flags);
2982                         iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
2983                         set_bit(STATUS_RF_KILL_SW, &priv->status);
2984                 }
2985                 return;
2986         }
2987
2988         spin_lock_irqsave(&priv->lock, flags);
2989         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
2990
2991         clear_bit(STATUS_RF_KILL_SW, &priv->status);
2992         spin_unlock_irqrestore(&priv->lock, flags);
2993
2994         /* wake up ucode */
2995         msleep(10);
2996
2997         spin_lock_irqsave(&priv->lock, flags);
2998         iwl_read32(priv, CSR_UCODE_DRV_GP1);
2999         if (!iwl_grab_restricted_access(priv))
3000                 iwl_release_restricted_access(priv);
3001         spin_unlock_irqrestore(&priv->lock, flags);
3002
3003         if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3004                 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3005                                   "disabled by HW switch\n");
3006                 return;
3007         }
3008
3009         queue_work(priv->workqueue, &priv->restart);
3010         return;
3011 }
3012
3013 void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3014                             u32 decrypt_res, struct ieee80211_rx_status *stats)
3015 {
3016         u16 fc =
3017             le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3018
3019         if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3020                 return;
3021
3022         if (!(fc & IEEE80211_FCTL_PROTECTED))
3023                 return;
3024
3025         IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3026         switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3027         case RX_RES_STATUS_SEC_TYPE_TKIP:
3028                 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3029                     RX_RES_STATUS_BAD_ICV_MIC)
3030                         stats->flag |= RX_FLAG_MMIC_ERROR;
3031         case RX_RES_STATUS_SEC_TYPE_WEP:
3032         case RX_RES_STATUS_SEC_TYPE_CCMP:
3033                 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3034                     RX_RES_STATUS_DECRYPT_OK) {
3035                         IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3036                         stats->flag |= RX_FLAG_DECRYPTED;
3037                 }
3038                 break;
3039
3040         default:
3041                 break;
3042         }
3043 }
3044
3045 void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
3046                                     struct iwl_rx_mem_buffer *rxb,
3047                                     void *data, short len,
3048                                     struct ieee80211_rx_status *stats,
3049                                     u16 phy_flags)
3050 {
3051         struct iwl_rt_rx_hdr *iwl_rt;
3052
3053         /* First cache any information we need before we overwrite
3054          * the information provided in the skb from the hardware */
3055         s8 signal = stats->ssi;
3056         s8 noise = 0;
3057         int rate = stats->rate;
3058         u64 tsf = stats->mactime;
3059         __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3060
3061         /* We received data from the HW, so stop the watchdog */
3062         if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) {
3063                 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3064                 return;
3065         }
3066
3067         /* copy the frame data to write after where the radiotap header goes */
3068         iwl_rt = (void *)rxb->skb->data;
3069         memmove(iwl_rt->payload, data, len);
3070
3071         iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3072         iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */
3073
3074         /* total header + data */
3075         iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt));
3076
3077         /* Set the size of the skb to the size of the frame */
3078         skb_put(rxb->skb, sizeof(*iwl_rt) + len);
3079
3080         /* Big bitfield of all the fields we provide in radiotap */
3081         iwl_rt->rt_hdr.it_present =
3082             cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3083                         (1 << IEEE80211_RADIOTAP_FLAGS) |
3084                         (1 << IEEE80211_RADIOTAP_RATE) |
3085                         (1 << IEEE80211_RADIOTAP_CHANNEL) |
3086                         (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3087                         (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3088                         (1 << IEEE80211_RADIOTAP_ANTENNA));
3089
3090         /* Zero the flags, we'll add to them as we go */
3091         iwl_rt->rt_flags = 0;
3092
3093         iwl_rt->rt_tsf = cpu_to_le64(tsf);
3094
3095         /* Convert to dBm */
3096         iwl_rt->rt_dbmsignal = signal;
3097         iwl_rt->rt_dbmnoise = noise;
3098
3099         /* Convert the channel frequency and set the flags */
3100         iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq);
3101         if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3102                 iwl_rt->rt_chbitmask =
3103                     cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3104         else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3105                 iwl_rt->rt_chbitmask =
3106                     cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3107         else    /* 802.11g */
3108                 iwl_rt->rt_chbitmask =
3109                     cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3110
3111         rate = iwl_rate_index_from_plcp(rate);
3112         if (rate == -1)
3113                 iwl_rt->rt_rate = 0;
3114         else
3115                 iwl_rt->rt_rate = iwl_rates[rate].ieee;
3116
3117         /* antenna number */
3118         iwl_rt->rt_antenna =
3119                 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3120
3121         /* set the preamble flag if we have it */
3122         if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3123                 iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3124
3125         IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3126
3127         stats->flag |= RX_FLAG_RADIOTAP;
3128         ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3129         rxb->skb = NULL;
3130 }
3131
3132
3133 #define IWL_PACKET_RETRY_TIME HZ
3134
3135 int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
3136 {
3137         u16 sc = le16_to_cpu(header->seq_ctrl);
3138         u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3139         u16 frag = sc & IEEE80211_SCTL_FRAG;
3140         u16 *last_seq, *last_frag;
3141         unsigned long *last_time;
3142
3143         switch (priv->iw_mode) {
3144         case IEEE80211_IF_TYPE_IBSS:{
3145                 struct list_head *p;
3146                 struct iwl_ibss_seq *entry = NULL;
3147                 u8 *mac = header->addr2;
3148                 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3149
3150                 __list_for_each(p, &priv->ibss_mac_hash[index]) {
3151                         entry =
3152                                 list_entry(p, struct iwl_ibss_seq, list);
3153                         if (!compare_ether_addr(entry->mac, mac))
3154                                 break;
3155                 }
3156                 if (p == &priv->ibss_mac_hash[index]) {
3157                         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3158                         if (!entry) {
3159                                 IWL_ERROR
3160                                         ("Cannot malloc new mac entry\n");
3161                                 return 0;
3162                         }
3163                         memcpy(entry->mac, mac, ETH_ALEN);
3164                         entry->seq_num = seq;
3165                         entry->frag_num = frag;
3166                         entry->packet_time = jiffies;
3167                         list_add(&entry->list,
3168                                  &priv->ibss_mac_hash[index]);
3169                         return 0;
3170                 }
3171                 last_seq = &entry->seq_num;
3172                 last_frag = &entry->frag_num;
3173                 last_time = &entry->packet_time;
3174                 break;
3175         }
3176         case IEEE80211_IF_TYPE_STA:
3177                 last_seq = &priv->last_seq_num;
3178                 last_frag = &priv->last_frag_num;
3179                 last_time = &priv->last_packet_time;
3180                 break;
3181         default:
3182                 return 0;
3183         }
3184         if ((*last_seq == seq) &&
3185             time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3186                 if (*last_frag == frag)
3187                         goto drop;
3188                 if (*last_frag + 1 != frag)
3189                         /* out-of-order fragment */
3190                         goto drop;
3191         } else
3192                 *last_seq = seq;
3193
3194         *last_frag = frag;
3195         *last_time = jiffies;
3196         return 0;
3197
3198  drop:
3199         return 1;
3200 }
3201
3202 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3203
3204 #include "iwl-spectrum.h"
3205
3206 #define BEACON_TIME_MASK_LOW    0x00FFFFFF
3207 #define BEACON_TIME_MASK_HIGH   0xFF000000
3208 #define TIME_UNIT               1024
3209
3210 /*
3211  * extended beacon time format
3212  * time in usec will be changed into a 32-bit value in 8:24 format
3213  * the high 1 byte is the beacon counts
3214  * the lower 3 bytes is the time in usec within one beacon interval
3215  */
3216
3217 static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
3218 {
3219         u32 quot;
3220         u32 rem;
3221         u32 interval = beacon_interval * 1024;
3222
3223         if (!interval || !usec)
3224                 return 0;
3225
3226         quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3227         rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3228
3229         return (quot << 24) + rem;
3230 }
3231
3232 /* base is usually what we get from ucode with each received frame,
3233  * the same as HW timer counter counting down
3234  */
3235
3236 static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
3237 {
3238         u32 base_low = base & BEACON_TIME_MASK_LOW;
3239         u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3240         u32 interval = beacon_interval * TIME_UNIT;
3241         u32 res = (base & BEACON_TIME_MASK_HIGH) +
3242             (addon & BEACON_TIME_MASK_HIGH);
3243
3244         if (base_low > addon_low)
3245                 res += base_low - addon_low;
3246         else if (base_low < addon_low) {
3247                 res += interval + base_low - addon_low;
3248                 res += (1 << 24);
3249         } else
3250                 res += (1 << 24);
3251
3252         return cpu_to_le32(res);
3253 }
3254
3255 static int iwl_get_measurement(struct iwl_priv *priv,
3256                                struct ieee80211_measurement_params *params,
3257                                u8 type)
3258 {
3259         struct iwl_spectrum_cmd spectrum;
3260         struct iwl_rx_packet *res;
3261         struct iwl_host_cmd cmd = {
3262                 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3263                 .data = (void *)&spectrum,
3264                 .meta.flags = CMD_WANT_SKB,
3265         };
3266         u32 add_time = le64_to_cpu(params->start_time);
3267         int rc;
3268         int spectrum_resp_status;
3269         int duration = le16_to_cpu(params->duration);
3270
3271         if (iwl_is_associated(priv))
3272                 add_time =
3273                     iwl_usecs_to_beacons(
3274                         le64_to_cpu(params->start_time) - priv->last_tsf,
3275                         le16_to_cpu(priv->rxon_timing.beacon_interval));
3276
3277         memset(&spectrum, 0, sizeof(spectrum));
3278
3279         spectrum.channel_count = cpu_to_le16(1);
3280         spectrum.flags =
3281             RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3282         spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3283         cmd.len = sizeof(spectrum);
3284         spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3285
3286         if (iwl_is_associated(priv))
3287                 spectrum.start_time =
3288                     iwl_add_beacon_time(priv->last_beacon_time,
3289                                 add_time,
3290                                 le16_to_cpu(priv->rxon_timing.beacon_interval));
3291         else
3292                 spectrum.start_time = 0;
3293
3294         spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3295         spectrum.channels[0].channel = params->channel;
3296         spectrum.channels[0].type = type;
3297         if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3298                 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3299                     RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3300
3301         rc = iwl_send_cmd_sync(priv, &cmd);
3302         if (rc)
3303                 return rc;
3304
3305         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
3306         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3307                 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3308                 rc = -EIO;
3309         }
3310
3311         spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3312         switch (spectrum_resp_status) {
3313         case 0:         /* Command will be handled */
3314                 if (res->u.spectrum.id != 0xff) {
3315                         IWL_DEBUG_INFO
3316                             ("Replaced existing measurement: %d\n",
3317                              res->u.spectrum.id);
3318                         priv->measurement_status &= ~MEASUREMENT_READY;
3319                 }
3320                 priv->measurement_status |= MEASUREMENT_ACTIVE;
3321                 rc = 0;
3322                 break;
3323
3324         case 1:         /* Command will not be handled */
3325                 rc = -EAGAIN;
3326                 break;
3327         }
3328
3329         dev_kfree_skb_any(cmd.meta.u.skb);
3330
3331         return rc;
3332 }
3333 #endif
3334
3335 static void iwl_txstatus_to_ieee(struct iwl_priv *priv,
3336                                  struct iwl_tx_info *tx_sta)
3337 {
3338
3339         tx_sta->status.ack_signal = 0;
3340         tx_sta->status.excessive_retries = 0;
3341         tx_sta->status.queue_length = 0;
3342         tx_sta->status.queue_number = 0;
3343
3344         if (in_interrupt())
3345                 ieee80211_tx_status_irqsafe(priv->hw,
3346                                             tx_sta->skb[0], &(tx_sta->status));
3347         else
3348                 ieee80211_tx_status(priv->hw,
3349                                     tx_sta->skb[0], &(tx_sta->status));
3350
3351         tx_sta->skb[0] = NULL;
3352 }
3353
3354 /**
3355  * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC.
3356  *
3357  * When FW advances 'R' index, all entries between old and
3358  * new 'R' index need to be reclaimed. As result, some free space
3359  * forms. If there is enough free space (> low mark), wake Tx queue.
3360  */
3361 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
3362 {
3363         struct iwl_tx_queue *txq = &priv->txq[txq_id];
3364         struct iwl_queue *q = &txq->q;
3365         int nfreed = 0;
3366
3367         if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3368                 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3369                           "is out of range [0-%d] %d %d.\n", txq_id,
3370                           index, q->n_bd, q->first_empty, q->last_used);
3371                 return 0;
3372         }
3373
3374         for (index = iwl_queue_inc_wrap(index, q->n_bd);
3375                 q->last_used != index;
3376                 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) {
3377                 if (txq_id != IWL_CMD_QUEUE_NUM) {
3378                         iwl_txstatus_to_ieee(priv,
3379                                         &(txq->txb[txq->q.last_used]));
3380                         iwl_hw_txq_free_tfd(priv, txq);
3381                 } else if (nfreed > 1) {
3382                         IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3383                                         q->first_empty, q->last_used);
3384                         queue_work(priv->workqueue, &priv->restart);
3385                 }
3386                 nfreed++;
3387         }
3388
3389         if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3390                         (txq_id != IWL_CMD_QUEUE_NUM) &&
3391                         priv->mac80211_registered)
3392                 ieee80211_wake_queue(priv->hw, txq_id);
3393
3394
3395         return nfreed;
3396 }
3397
3398 static int iwl_is_tx_success(u32 status)
3399 {
3400         return (status & 0xFF) == 0x1;
3401 }
3402
3403 /******************************************************************************
3404  *
3405  * Generic RX handler implementations
3406  *
3407  ******************************************************************************/
3408 static void iwl_rx_reply_tx(struct iwl_priv *priv,
3409                             struct iwl_rx_mem_buffer *rxb)
3410 {
3411         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3412         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3413         int txq_id = SEQ_TO_QUEUE(sequence);
3414         int index = SEQ_TO_INDEX(sequence);
3415         struct iwl_tx_queue *txq = &priv->txq[txq_id];
3416         struct ieee80211_tx_status *tx_status;
3417         struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3418         u32  status = le32_to_cpu(tx_resp->status);
3419
3420         if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3421                 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3422                           "is out of range [0-%d] %d %d\n", txq_id,
3423                           index, txq->q.n_bd, txq->q.first_empty,
3424                           txq->q.last_used);
3425                 return;
3426         }
3427
3428         tx_status = &(txq->txb[txq->q.last_used].status);
3429
3430         tx_status->retry_count = tx_resp->failure_frame;
3431         tx_status->queue_number = status;
3432         tx_status->queue_length = tx_resp->bt_kill_count;
3433         tx_status->queue_length |= tx_resp->failure_rts;
3434
3435         tx_status->flags =
3436             iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3437
3438         tx_status->control.tx_rate = iwl_rate_index_from_plcp(tx_resp->rate);
3439
3440         IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
3441                         txq_id, iwl_get_tx_fail_reason(status), status,
3442                         tx_resp->rate, tx_resp->failure_frame);
3443
3444         IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3445         if (index != -1)
3446                 iwl_tx_queue_reclaim(priv, txq_id, index);
3447
3448         if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3449                 IWL_ERROR("TODO:  Implement Tx ABORT REQUIRED!!!\n");
3450 }
3451
3452
3453 static void iwl_rx_reply_alive(struct iwl_priv *priv,
3454                                struct iwl_rx_mem_buffer *rxb)
3455 {
3456         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3457         struct iwl_alive_resp *palive;
3458         struct delayed_work *pwork;
3459
3460         palive = &pkt->u.alive_frame;
3461
3462         IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3463                        "0x%01X 0x%01X\n",
3464                        palive->is_valid, palive->ver_type,
3465                        palive->ver_subtype);
3466
3467         if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3468                 IWL_DEBUG_INFO("Initialization Alive received.\n");
3469                 memcpy(&priv->card_alive_init,
3470                        &pkt->u.alive_frame,
3471                        sizeof(struct iwl_init_alive_resp));
3472                 pwork = &priv->init_alive_start;
3473         } else {
3474                 IWL_DEBUG_INFO("Runtime Alive received.\n");
3475                 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3476                        sizeof(struct iwl_alive_resp));
3477                 pwork = &priv->alive_start;
3478                 iwl_disable_events(priv);
3479         }
3480
3481         /* We delay the ALIVE response by 5ms to
3482          * give the HW RF Kill time to activate... */
3483         if (palive->is_valid == UCODE_VALID_OK)
3484                 queue_delayed_work(priv->workqueue, pwork,
3485                                    msecs_to_jiffies(5));
3486         else
3487                 IWL_WARNING("uCode did not respond OK.\n");
3488 }
3489
3490 static void iwl_rx_reply_add_sta(struct iwl_priv *priv,
3491                                  struct iwl_rx_mem_buffer *rxb)
3492 {
3493         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3494
3495         IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3496         return;
3497 }
3498
3499 static void iwl_rx_reply_error(struct iwl_priv *priv,
3500                                struct iwl_rx_mem_buffer *rxb)
3501 {
3502         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3503
3504         IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3505                 "seq 0x%04X ser 0x%08X\n",
3506                 le32_to_cpu(pkt->u.err_resp.error_type),
3507                 get_cmd_string(pkt->u.err_resp.cmd_id),
3508                 pkt->u.err_resp.cmd_id,
3509                 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3510                 le32_to_cpu(pkt->u.err_resp.error_info));
3511 }
3512
3513 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3514
3515 static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3516 {
3517         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3518         struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3519         struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
3520         IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3521                       le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3522         rxon->channel = csa->channel;
3523         priv->staging_rxon.channel = csa->channel;
3524 }
3525
3526 static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
3527                                           struct iwl_rx_mem_buffer *rxb)
3528 {
3529 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3530         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3531         struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
3532
3533         if (!report->state) {
3534                 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3535                           "Spectrum Measure Notification: Start\n");
3536                 return;
3537         }
3538
3539         memcpy(&priv->measure_report, report, sizeof(*report));
3540         priv->measurement_status |= MEASUREMENT_READY;
3541 #endif
3542 }
3543
3544 static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
3545                                   struct iwl_rx_mem_buffer *rxb)
3546 {
3547 #ifdef CONFIG_IWLWIFI_DEBUG
3548         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3549         struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
3550         IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3551                      sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3552 #endif
3553 }
3554
3555 static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3556                                              struct iwl_rx_mem_buffer *rxb)
3557 {
3558         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3559         IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3560                         "notification for %s:\n",
3561                         le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3562         iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3563 }
3564
3565 static void iwl_bg_beacon_update(struct work_struct *work)
3566 {
3567         struct iwl_priv *priv =
3568                 container_of(work, struct iwl_priv, beacon_update);
3569         struct sk_buff *beacon;
3570
3571         /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3572         beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3573
3574         if (!beacon) {
3575                 IWL_ERROR("update beacon failed\n");
3576                 return;
3577         }
3578
3579         mutex_lock(&priv->mutex);
3580         /* new beacon skb is allocated every time; dispose previous.*/
3581         if (priv->ibss_beacon)
3582                 dev_kfree_skb(priv->ibss_beacon);
3583
3584         priv->ibss_beacon = beacon;
3585         mutex_unlock(&priv->mutex);
3586
3587         iwl_send_beacon_cmd(priv);
3588 }
3589
3590 static void iwl_rx_beacon_notif(struct iwl_priv *priv,
3591                                 struct iwl_rx_mem_buffer *rxb)
3592 {
3593 #ifdef CONFIG_IWLWIFI_DEBUG
3594         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3595         struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status);
3596         u8 rate = beacon->beacon_notify_hdr.rate;
3597
3598         IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3599                 "tsf %d %d rate %d\n",
3600                 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3601                 beacon->beacon_notify_hdr.failure_frame,
3602                 le32_to_cpu(beacon->ibss_mgr_status),
3603                 le32_to_cpu(beacon->high_tsf),
3604                 le32_to_cpu(beacon->low_tsf), rate);
3605 #endif
3606
3607         if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3608             (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3609                 queue_work(priv->workqueue, &priv->beacon_update);
3610 }
3611
3612 /* Service response to REPLY_SCAN_CMD (0x80) */
3613 static void iwl_rx_reply_scan(struct iwl_priv *priv,
3614                               struct iwl_rx_mem_buffer *rxb)
3615 {
3616 #ifdef CONFIG_IWLWIFI_DEBUG
3617         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3618         struct iwl_scanreq_notification *notif =
3619             (struct iwl_scanreq_notification *)pkt->u.raw;
3620
3621         IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3622 #endif
3623 }
3624
3625 /* Service SCAN_START_NOTIFICATION (0x82) */
3626 static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
3627                                     struct iwl_rx_mem_buffer *rxb)
3628 {
3629         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3630         struct iwl_scanstart_notification *notif =
3631             (struct iwl_scanstart_notification *)pkt->u.raw;
3632         priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3633         IWL_DEBUG_SCAN("Scan start: "
3634                        "%d [802.11%s] "
3635                        "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3636                        notif->channel,
3637                        notif->band ? "bg" : "a",
3638                        notif->tsf_high,
3639                        notif->tsf_low, notif->status, notif->beacon_timer);
3640 }
3641
3642 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3643 static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
3644                                       struct iwl_rx_mem_buffer *rxb)
3645 {
3646         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3647         struct iwl_scanresults_notification *notif =
3648             (struct iwl_scanresults_notification *)pkt->u.raw;
3649
3650         IWL_DEBUG_SCAN("Scan ch.res: "
3651                        "%d [802.11%s] "
3652                        "(TSF: 0x%08X:%08X) - %d "
3653                        "elapsed=%lu usec (%dms since last)\n",
3654                        notif->channel,
3655                        notif->band ? "bg" : "a",
3656                        le32_to_cpu(notif->tsf_high),
3657                        le32_to_cpu(notif->tsf_low),
3658                        le32_to_cpu(notif->statistics[0]),
3659                        le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3660                        jiffies_to_msecs(elapsed_jiffies
3661                                         (priv->last_scan_jiffies, jiffies)));
3662
3663         priv->last_scan_jiffies = jiffies;
3664 }
3665
3666 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3667 static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
3668                                        struct iwl_rx_mem_buffer *rxb)
3669 {
3670         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3671         struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3672
3673         IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3674                        scan_notif->scanned_channels,
3675                        scan_notif->tsf_low,
3676                        scan_notif->tsf_high, scan_notif->status);
3677
3678         /* The HW is no longer scanning */
3679         clear_bit(STATUS_SCAN_HW, &priv->status);
3680
3681         /* The scan completion notification came in, so kill that timer... */
3682         cancel_delayed_work(&priv->scan_check);
3683
3684         IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3685                        (priv->scan_bands == 2) ? "2.4" : "5.2",
3686                        jiffies_to_msecs(elapsed_jiffies
3687                                         (priv->scan_pass_start, jiffies)));
3688
3689         /* Remove this scanned band from the list
3690          * of pending bands to scan */
3691         priv->scan_bands--;
3692
3693         /* If a request to abort was given, or the scan did not succeed
3694          * then we reset the scan state machine and terminate,
3695          * re-queuing another scan if one has been requested */
3696         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3697                 IWL_DEBUG_INFO("Aborted scan completed.\n");
3698                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3699         } else {
3700                 /* If there are more bands on this scan pass reschedule */
3701                 if (priv->scan_bands > 0)
3702                         goto reschedule;
3703         }
3704
3705         priv->last_scan_jiffies = jiffies;
3706         IWL_DEBUG_INFO("Setting scan to off\n");
3707
3708         clear_bit(STATUS_SCANNING, &priv->status);
3709
3710         IWL_DEBUG_INFO("Scan took %dms\n",
3711                 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
3712
3713         queue_work(priv->workqueue, &priv->scan_completed);
3714
3715         return;
3716
3717 reschedule:
3718         priv->scan_pass_start = jiffies;
3719         queue_work(priv->workqueue, &priv->request_scan);
3720 }
3721
3722 /* Handle notification from uCode that card's power state is changing
3723  * due to software, hardware, or critical temperature RFKILL */
3724 static void iwl_rx_card_state_notif(struct iwl_priv *priv,
3725                                     struct iwl_rx_mem_buffer *rxb)
3726 {
3727         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3728         u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
3729         unsigned long status = priv->status;
3730
3731         IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
3732                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
3733                           (flags & SW_CARD_DISABLED) ? "Kill" : "On");
3734
3735         iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3736                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3737
3738         if (flags & HW_CARD_DISABLED)
3739                 set_bit(STATUS_RF_KILL_HW, &priv->status);
3740         else
3741                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3742
3743
3744         if (flags & SW_CARD_DISABLED)
3745                 set_bit(STATUS_RF_KILL_SW, &priv->status);
3746         else
3747                 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3748
3749         iwl_scan_cancel(priv);
3750
3751         if ((test_bit(STATUS_RF_KILL_HW, &status) !=
3752              test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
3753             (test_bit(STATUS_RF_KILL_SW, &status) !=
3754              test_bit(STATUS_RF_KILL_SW, &priv->status)))
3755                 queue_work(priv->workqueue, &priv->rf_kill);
3756         else
3757                 wake_up_interruptible(&priv->wait_command_queue);
3758 }
3759
3760 /**
3761  * iwl_setup_rx_handlers - Initialize Rx handler callbacks
3762  *
3763  * Setup the RX handlers for each of the reply types sent from the uCode
3764  * to the host.
3765  *
3766  * This function chains into the hardware specific files for them to setup
3767  * any hardware specific handlers as well.
3768  */
3769 static void iwl_setup_rx_handlers(struct iwl_priv *priv)
3770 {
3771         priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
3772         priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta;
3773         priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
3774         priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
3775         priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
3776             iwl_rx_spectrum_measure_notif;
3777         priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
3778         priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
3779             iwl_rx_pm_debug_statistics_notif;
3780         priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
3781
3782         /* NOTE:  iwl_rx_statistics is different based on whether
3783          * the build is for the 3945 or the 4965.  See the
3784          * corresponding implementation in iwl-XXXX.c
3785          *
3786          * The same handler is used for both the REPLY to a
3787          * discrete statistics request from the host as well as
3788          * for the periodic statistics notification from the uCode
3789          */
3790         priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics;
3791         priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics;
3792
3793         priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
3794         priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
3795         priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
3796             iwl_rx_scan_results_notif;
3797         priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3798             iwl_rx_scan_complete_notif;
3799         priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
3800         priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx;
3801
3802         /* Setup hardware specific Rx handlers */
3803         iwl_hw_rx_handler_setup(priv);
3804 }
3805
3806 /**
3807  * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3808  * @rxb: Rx buffer to reclaim
3809  *
3810  * If an Rx buffer has an async callback associated with it the callback
3811  * will be executed.  The attached skb (if present) will only be freed
3812  * if the callback returns 1
3813  */
3814 static void iwl_tx_cmd_complete(struct iwl_priv *priv,
3815                                 struct iwl_rx_mem_buffer *rxb)
3816 {
3817         struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
3818         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3819         int txq_id = SEQ_TO_QUEUE(sequence);
3820         int index = SEQ_TO_INDEX(sequence);
3821         int huge = sequence & SEQ_HUGE_FRAME;
3822         int cmd_index;
3823         struct iwl_cmd *cmd;
3824
3825         /* If a Tx command is being handled and it isn't in the actual
3826          * command queue then there a command routing bug has been introduced
3827          * in the queue management code. */
3828         if (txq_id != IWL_CMD_QUEUE_NUM)
3829                 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3830                           txq_id, pkt->hdr.cmd);
3831         BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3832
3833         cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
3834         cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
3835
3836         /* Input error checking is done when commands are added to queue. */
3837         if (cmd->meta.flags & CMD_WANT_SKB) {
3838                 cmd->meta.source->u.skb = rxb->skb;
3839                 rxb->skb = NULL;
3840         } else if (cmd->meta.u.callback &&
3841                    !cmd->meta.u.callback(priv, cmd, rxb->skb))
3842                 rxb->skb = NULL;
3843
3844         iwl_tx_queue_reclaim(priv, txq_id, index);
3845
3846         if (!(cmd->meta.flags & CMD_ASYNC)) {
3847                 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
3848                 wake_up_interruptible(&priv->wait_command_queue);
3849         }
3850 }
3851
3852 /************************** RX-FUNCTIONS ****************************/
3853 /*
3854  * Rx theory of operation
3855  *
3856  * The host allocates 32 DMA target addresses and passes the host address
3857  * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3858  * 0 to 31
3859  *
3860  * Rx Queue Indexes
3861  * The host/firmware share two index registers for managing the Rx buffers.
3862  *
3863  * The READ index maps to the first position that the firmware may be writing
3864  * to -- the driver can read up to (but not including) this position and get
3865  * good data.
3866  * The READ index is managed by the firmware once the card is enabled.
3867  *
3868  * The WRITE index maps to the last position the driver has read from -- the
3869  * position preceding WRITE is the last slot the firmware can place a packet.
3870  *
3871  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3872  * WRITE = READ.
3873  *
3874  * During initialization the host sets up the READ queue position to the first
3875  * INDEX position, and WRITE to the last (READ - 1 wrapped)
3876  *
3877  * When the firmware places a packet in a buffer it will advance the READ index
3878  * and fire the RX interrupt.  The driver can then query the READ index and
3879  * process as many packets as possible, moving the WRITE index forward as it
3880  * resets the Rx queue buffers with new memory.
3881  *
3882  * The management in the driver is as follows:
3883  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
3884  *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3885  *   to replensish the iwl->rxq->rx_free.
3886  * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
3887  *   iwl->rxq is replenished and the READ INDEX is updated (updating the
3888  *   'processed' and 'read' driver indexes as well)
3889  * + A received packet is processed and handed to the kernel network stack,
3890  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
3891  * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
3892  *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
3893  *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
3894  *   were enough free buffers and RX_STALLED is set it is cleared.
3895  *
3896  *
3897  * Driver sequence:
3898  *
3899  * iwl_rx_queue_alloc()       Allocates rx_free
3900  * iwl_rx_replenish()         Replenishes rx_free list from rx_used, and calls
3901  *                            iwl_rx_queue_restock
3902  * iwl_rx_queue_restock()     Moves available buffers from rx_free into Rx
3903  *                            queue, updates firmware pointers, and updates
3904  *                            the WRITE index.  If insufficient rx_free buffers
3905  *                            are available, schedules iwl_rx_replenish
3906  *
3907  * -- enable interrupts --
3908  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
3909  *                            READ INDEX, detaching the SKB from the pool.
3910  *                            Moves the packet buffer from queue to rx_used.
3911  *                            Calls iwl_rx_queue_restock to refill any empty
3912  *                            slots.
3913  * ...
3914  *
3915  */
3916
3917 /**
3918  * iwl_rx_queue_space - Return number of free slots available in queue.
3919  */
3920 static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
3921 {
3922         int s = q->read - q->write;
3923         if (s <= 0)
3924                 s += RX_QUEUE_SIZE;
3925         /* keep some buffer to not confuse full and empty queue */
3926         s -= 2;
3927         if (s < 0)
3928                 s = 0;
3929         return s;
3930 }
3931
3932 /**
3933  * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
3934  *
3935  * NOTE: This function has 3945 and 4965 specific code sections
3936  * but is declared in base due to the majority of the
3937  * implementation being the same (only a numeric constant is
3938  * different)
3939  *
3940  */
3941 int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
3942 {
3943         u32 reg = 0;
3944         int rc = 0;
3945         unsigned long flags;
3946
3947         spin_lock_irqsave(&q->lock, flags);
3948
3949         if (q->need_update == 0)
3950                 goto exit_unlock;
3951
3952         if (test_bit(STATUS_POWER_PMI, &priv->status)) {
3953                 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
3954
3955                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
3956                         iwl_set_bit(priv, CSR_GP_CNTRL,
3957                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3958                         goto exit_unlock;
3959                 }
3960
3961                 rc = iwl_grab_restricted_access(priv);
3962                 if (rc)
3963                         goto exit_unlock;
3964
3965                 iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR,
3966                                      q->write & ~0x7);
3967                 iwl_release_restricted_access(priv);
3968         } else
3969                 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
3970
3971
3972         q->need_update = 0;
3973
3974  exit_unlock:
3975         spin_unlock_irqrestore(&q->lock, flags);
3976         return rc;
3977 }
3978
3979 /**
3980  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer.
3981  *
3982  * NOTE: This function has 3945 and 4965 specific code paths in it.
3983  */
3984 static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
3985                                           dma_addr_t dma_addr)
3986 {
3987         return cpu_to_le32((u32)dma_addr);
3988 }
3989
3990 /**
3991  * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
3992  *
3993  * If there are slots in the RX queue that  need to be restocked,
3994  * and we have free pre-allocated buffers, fill the ranks as much
3995  * as we can pulling from rx_free.
3996  *
3997  * This moves the 'write' index forward to catch up with 'processed', and
3998  * also updates the memory address in the firmware to reference the new
3999  * target buffer.
4000  */
4001 int iwl_rx_queue_restock(struct iwl_priv *priv)
4002 {
4003         struct iwl_rx_queue *rxq = &priv->rxq;
4004         struct list_head *element;
4005         struct iwl_rx_mem_buffer *rxb;
4006         unsigned long flags;
4007         int write, rc;
4008
4009         spin_lock_irqsave(&rxq->lock, flags);
4010         write = rxq->write & ~0x7;
4011         while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
4012                 element = rxq->rx_free.next;
4013                 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4014                 list_del(element);
4015                 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
4016                 rxq->queue[rxq->write] = rxb;
4017                 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4018                 rxq->free_count--;
4019         }
4020         spin_unlock_irqrestore(&rxq->lock, flags);
4021         /* If the pre-allocated buffer pool is dropping low, schedule to
4022          * refill it */
4023         if (rxq->free_count <= RX_LOW_WATERMARK)
4024                 queue_work(priv->workqueue, &priv->rx_replenish);
4025
4026
4027         /* If we've added more space for the firmware to place data, tell it */
4028         if ((write != (rxq->write & ~0x7))
4029             || (abs(rxq->write - rxq->read) > 7)) {
4030                 spin_lock_irqsave(&rxq->lock, flags);
4031                 rxq->need_update = 1;
4032                 spin_unlock_irqrestore(&rxq->lock, flags);
4033                 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
4034                 if (rc)
4035                         return rc;
4036         }
4037
4038         return 0;
4039 }
4040
4041 /**
4042  * iwl_rx_replensih - Move all used packet from rx_used to rx_free
4043  *
4044  * When moving to rx_free an SKB is allocated for the slot.
4045  *
4046  * Also restock the Rx queue via iwl_rx_queue_restock.
4047  * This is called as a scheduled work item (except for during intialization)
4048  */
4049 void iwl_rx_replenish(void *data)
4050 {
4051         struct iwl_priv *priv = data;
4052         struct iwl_rx_queue *rxq = &priv->rxq;
4053         struct list_head *element;
4054         struct iwl_rx_mem_buffer *rxb;
4055         unsigned long flags;
4056         spin_lock_irqsave(&rxq->lock, flags);
4057         while (!list_empty(&rxq->rx_used)) {
4058                 element = rxq->rx_used.next;
4059                 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4060                 rxb->skb =
4061                     alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4062                 if (!rxb->skb) {
4063                         if (net_ratelimit())
4064                                 printk(KERN_CRIT DRV_NAME
4065                                        ": Can not allocate SKB buffers\n");
4066                         /* We don't reschedule replenish work here -- we will
4067                          * call the restock method and if it still needs
4068                          * more buffers it will schedule replenish */
4069                         break;
4070                 }
4071                 priv->alloc_rxb_skb++;
4072                 list_del(element);
4073                 rxb->dma_addr =
4074                     pci_map_single(priv->pci_dev, rxb->skb->data,
4075                                    IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4076                 list_add_tail(&rxb->list, &rxq->rx_free);
4077                 rxq->free_count++;
4078         }
4079         spin_unlock_irqrestore(&rxq->lock, flags);
4080
4081         spin_lock_irqsave(&priv->lock, flags);
4082         iwl_rx_queue_restock(priv);
4083         spin_unlock_irqrestore(&priv->lock, flags);
4084 }
4085
4086 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4087  * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4088  * This free routine walks the list of POOL entries and if SKB is set to
4089  * non NULL it is unmapped and freed
4090  */
4091 void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4092 {
4093         int i;
4094         for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4095                 if (rxq->pool[i].skb != NULL) {
4096                         pci_unmap_single(priv->pci_dev,
4097                                          rxq->pool[i].dma_addr,
4098                                          IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4099                         dev_kfree_skb(rxq->pool[i].skb);
4100                 }
4101         }
4102
4103         pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4104                             rxq->dma_addr);
4105         rxq->bd = NULL;
4106 }
4107
4108 int iwl_rx_queue_alloc(struct iwl_priv *priv)
4109 {
4110         struct iwl_rx_queue *rxq = &priv->rxq;
4111         struct pci_dev *dev = priv->pci_dev;
4112         int i;
4113
4114         spin_lock_init(&rxq->lock);
4115         INIT_LIST_HEAD(&rxq->rx_free);
4116         INIT_LIST_HEAD(&rxq->rx_used);
4117         rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4118         if (!rxq->bd)
4119                 return -ENOMEM;
4120         /* Fill the rx_used queue with _all_ of the Rx buffers */
4121         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4122                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4123         /* Set us so that we have processed and used all buffers, but have
4124          * not restocked the Rx queue with fresh buffers */
4125         rxq->read = rxq->write = 0;
4126         rxq->free_count = 0;
4127         rxq->need_update = 0;
4128         return 0;
4129 }
4130
4131 void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4132 {
4133         unsigned long flags;
4134         int i;
4135         spin_lock_irqsave(&rxq->lock, flags);
4136         INIT_LIST_HEAD(&rxq->rx_free);
4137         INIT_LIST_HEAD(&rxq->rx_used);
4138         /* Fill the rx_used queue with _all_ of the Rx buffers */
4139         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4140                 /* In the reset function, these buffers may have been allocated
4141                  * to an SKB, so we need to unmap and free potential storage */
4142                 if (rxq->pool[i].skb != NULL) {
4143                         pci_unmap_single(priv->pci_dev,
4144                                          rxq->pool[i].dma_addr,
4145                                          IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4146                         priv->alloc_rxb_skb--;
4147                         dev_kfree_skb(rxq->pool[i].skb);
4148                         rxq->pool[i].skb = NULL;
4149                 }
4150                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4151         }
4152
4153         /* Set us so that we have processed and used all buffers, but have
4154          * not restocked the Rx queue with fresh buffers */
4155         rxq->read = rxq->write = 0;
4156         rxq->free_count = 0;
4157         spin_unlock_irqrestore(&rxq->lock, flags);
4158 }
4159
4160 /* Convert linear signal-to-noise ratio into dB */
4161 static u8 ratio2dB[100] = {
4162 /*       0   1   2   3   4   5   6   7   8   9 */
4163          0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4164         20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4165         26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4166         29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4167         32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4168         34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4169         36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4170         37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4171         38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4172         39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
4173 };
4174
4175 /* Calculates a relative dB value from a ratio of linear
4176  *   (i.e. not dB) signal levels.
4177  * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4178 int iwl_calc_db_from_ratio(int sig_ratio)
4179 {
4180         /* Anything above 1000:1 just report as 60 dB */
4181         if (sig_ratio > 1000)
4182                 return 60;
4183
4184         /* Above 100:1, divide by 10 and use table,
4185          *   add 20 dB to make up for divide by 10 */
4186         if (sig_ratio > 100)
4187                 return (20 + (int)ratio2dB[sig_ratio/10]);
4188
4189         /* We shouldn't see this */
4190         if (sig_ratio < 1)
4191                 return 0;
4192
4193         /* Use table for ratios 1:1 - 99:1 */
4194         return (int)ratio2dB[sig_ratio];
4195 }
4196
4197 #define PERFECT_RSSI (-20) /* dBm */
4198 #define WORST_RSSI (-95)   /* dBm */
4199 #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4200
4201 /* Calculate an indication of rx signal quality (a percentage, not dBm!).
4202  * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4203  *   about formulas used below. */
4204 int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
4205 {
4206         int sig_qual;
4207         int degradation = PERFECT_RSSI - rssi_dbm;
4208
4209         /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4210          * as indicator; formula is (signal dbm - noise dbm).
4211          * SNR at or above 40 is a great signal (100%).
4212          * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4213          * Weakest usable signal is usually 10 - 15 dB SNR. */
4214         if (noise_dbm) {
4215                 if (rssi_dbm - noise_dbm >= 40)
4216                         return 100;
4217                 else if (rssi_dbm < noise_dbm)
4218                         return 0;
4219                 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4220
4221         /* Else use just the signal level.
4222          * This formula is a least squares fit of data points collected and
4223          *   compared with a reference system that had a percentage (%) display
4224          *   for signal quality. */
4225         } else
4226                 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4227                             (15 * RSSI_RANGE + 62 * degradation)) /
4228                            (RSSI_RANGE * RSSI_RANGE);
4229
4230         if (sig_qual > 100)
4231                 sig_qual = 100;
4232         else if (sig_qual < 1)
4233                 sig_qual = 0;
4234
4235         return sig_qual;
4236 }
4237
4238 /**
4239  * iwl_rx_handle - Main entry function for receiving responses from the uCode
4240  *
4241  * Uses the priv->rx_handlers callback function array to invoke
4242  * the appropriate handlers, including command responses,
4243  * frame-received notifications, and other notifications.
4244  */
4245 static void iwl_rx_handle(struct iwl_priv *priv)
4246 {
4247         struct iwl_rx_mem_buffer *rxb;
4248         struct iwl_rx_packet *pkt;
4249         struct iwl_rx_queue *rxq = &priv->rxq;
4250         u32 r, i;
4251         int reclaim;
4252         unsigned long flags;
4253
4254         r = iwl_hw_get_rx_read(priv);
4255         i = rxq->read;
4256
4257         /* Rx interrupt, but nothing sent from uCode */
4258         if (i == r)
4259                 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4260
4261         while (i != r) {
4262                 rxb = rxq->queue[i];
4263
4264                 /* If an RXB doesn't have a queue slot associated with it
4265                  * then a bug has been introduced in the queue refilling
4266                  * routines -- catch it here */
4267                 BUG_ON(rxb == NULL);
4268
4269                 rxq->queue[i] = NULL;
4270
4271                 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4272                                             IWL_RX_BUF_SIZE,
4273                                             PCI_DMA_FROMDEVICE);
4274                 pkt = (struct iwl_rx_packet *)rxb->skb->data;
4275
4276                 /* Reclaim a command buffer only if this packet is a response
4277                  *   to a (driver-originated) command.
4278                  * If the packet (e.g. Rx frame) originated from uCode,
4279                  *   there is no command buffer to reclaim.
4280                  * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4281                  *   but apparently a few don't get set; catch them here. */
4282                 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4283                         (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4284                         (pkt->hdr.cmd != REPLY_TX);
4285
4286                 /* Based on type of command response or notification,
4287                  *   handle those that need handling via function in
4288                  *   rx_handlers table.  See iwl_setup_rx_handlers() */
4289                 if (priv->rx_handlers[pkt->hdr.cmd]) {
4290                         IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4291                                 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4292                                 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4293                         priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4294                 } else {
4295                         /* No handling needed */
4296                         IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4297                                 "r %d i %d No handler needed for %s, 0x%02x\n",
4298                                 r, i, get_cmd_string(pkt->hdr.cmd),
4299                                 pkt->hdr.cmd);
4300                 }
4301
4302                 if (reclaim) {
4303                         /* Invoke any callbacks, transfer the skb to caller,
4304                          * and fire off the (possibly) blocking iwl_send_cmd()
4305                          * as we reclaim the driver command queue */
4306                         if (rxb && rxb->skb)
4307                                 iwl_tx_cmd_complete(priv, rxb);
4308                         else
4309                                 IWL_WARNING("Claim null rxb?\n");
4310                 }
4311
4312                 /* For now we just don't re-use anything.  We can tweak this
4313                  * later to try and re-use notification packets and SKBs that
4314                  * fail to Rx correctly */
4315                 if (rxb->skb != NULL) {
4316                         priv->alloc_rxb_skb--;
4317                         dev_kfree_skb_any(rxb->skb);
4318                         rxb->skb = NULL;
4319                 }
4320
4321                 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4322                                  IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4323                 spin_lock_irqsave(&rxq->lock, flags);
4324                 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4325                 spin_unlock_irqrestore(&rxq->lock, flags);
4326                 i = (i + 1) & RX_QUEUE_MASK;
4327         }
4328
4329         /* Backtrack one entry */
4330         priv->rxq.read = i;
4331         iwl_rx_queue_restock(priv);
4332 }
4333
4334 int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
4335                                   struct iwl_tx_queue *txq)
4336 {
4337         u32 reg = 0;
4338         int rc = 0;
4339         int txq_id = txq->q.id;
4340
4341         if (txq->need_update == 0)
4342                 return rc;
4343
4344         /* if we're trying to save power */
4345         if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4346                 /* wake up nic if it's powered down ...
4347                  * uCode will wake up, and interrupt us again, so next
4348                  * time we'll skip this part. */
4349                 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4350
4351                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4352                         IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4353                         iwl_set_bit(priv, CSR_GP_CNTRL,
4354                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4355                         return rc;
4356                 }
4357
4358                 /* restore this queue's parameters in nic hardware. */
4359                 rc = iwl_grab_restricted_access(priv);
4360                 if (rc)
4361                         return rc;
4362                 iwl_write_restricted(priv, HBUS_TARG_WRPTR,
4363                                      txq->q.first_empty | (txq_id << 8));
4364                 iwl_release_restricted_access(priv);
4365
4366         /* else not in power-save mode, uCode will never sleep when we're
4367          * trying to tx (during RFKILL, we're not trying to tx). */
4368         } else
4369                 iwl_write32(priv, HBUS_TARG_WRPTR,
4370                             txq->q.first_empty | (txq_id << 8));
4371
4372         txq->need_update = 0;
4373
4374         return rc;
4375 }
4376
4377 #ifdef CONFIG_IWLWIFI_DEBUG
4378 static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon)
4379 {
4380         DECLARE_MAC_BUF(mac);
4381
4382         IWL_DEBUG_RADIO("RX CONFIG:\n");
4383         iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4384         IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4385         IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4386         IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4387                         le32_to_cpu(rxon->filter_flags));
4388         IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4389         IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4390                         rxon->ofdm_basic_rates);
4391         IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4392         IWL_DEBUG_RADIO("u8[6] node_addr: %s\n",
4393                         print_mac(mac, rxon->node_addr));
4394         IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4395                         print_mac(mac, rxon->bssid_addr));
4396         IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4397 }
4398 #endif
4399
4400 static void iwl_enable_interrupts(struct iwl_priv *priv)
4401 {
4402         IWL_DEBUG_ISR("Enabling interrupts\n");
4403         set_bit(STATUS_INT_ENABLED, &priv->status);
4404         iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4405 }
4406
4407 static inline void iwl_disable_interrupts(struct iwl_priv *priv)
4408 {
4409         clear_bit(STATUS_INT_ENABLED, &priv->status);
4410
4411         /* disable interrupts from uCode/NIC to host */
4412         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4413
4414         /* acknowledge/clear/reset any interrupts still pending
4415          * from uCode or flow handler (Rx/Tx DMA) */
4416         iwl_write32(priv, CSR_INT, 0xffffffff);
4417         iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4418         IWL_DEBUG_ISR("Disabled interrupts\n");
4419 }
4420
4421 static const char *desc_lookup(int i)
4422 {
4423         switch (i) {
4424         case 1:
4425                 return "FAIL";
4426         case 2:
4427                 return "BAD_PARAM";
4428         case 3:
4429                 return "BAD_CHECKSUM";
4430         case 4:
4431                 return "NMI_INTERRUPT";
4432         case 5:
4433                 return "SYSASSERT";
4434         case 6:
4435                 return "FATAL_ERROR";
4436         }
4437
4438         return "UNKNOWN";
4439 }
4440
4441 #define ERROR_START_OFFSET  (1 * sizeof(u32))
4442 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
4443
4444 static void iwl_dump_nic_error_log(struct iwl_priv *priv)
4445 {
4446         u32 i;
4447         u32 desc, time, count, base, data1;
4448         u32 blink1, blink2, ilink1, ilink2;
4449         int rc;
4450
4451         base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4452
4453         if (!iwl_hw_valid_rtc_data_addr(base)) {
4454                 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4455                 return;
4456         }
4457
4458         rc = iwl_grab_restricted_access(priv);
4459         if (rc) {
4460                 IWL_WARNING("Can not read from adapter at this time.\n");
4461                 return;
4462         }
4463
4464         count = iwl_read_restricted_mem(priv, base);
4465
4466         if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4467                 IWL_ERROR("Start IWL Error Log Dump:\n");
4468                 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4469                           priv->status, priv->config, count);
4470         }
4471
4472         IWL_ERROR("Desc       Time       asrtPC  blink2 "
4473                   "ilink1  nmiPC   Line\n");
4474         for (i = ERROR_START_OFFSET;
4475              i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
4476              i += ERROR_ELEM_SIZE) {
4477                 desc = iwl_read_restricted_mem(priv, base + i);
4478                 time =
4479                     iwl_read_restricted_mem(priv, base + i + 1 * sizeof(u32));
4480                 blink1 =
4481                     iwl_read_restricted_mem(priv, base + i + 2 * sizeof(u32));
4482                 blink2 =
4483                     iwl_read_restricted_mem(priv, base + i + 3 * sizeof(u32));
4484                 ilink1 =
4485                     iwl_read_restricted_mem(priv, base + i + 4 * sizeof(u32));
4486                 ilink2 =
4487                     iwl_read_restricted_mem(priv, base + i + 5 * sizeof(u32));
4488                 data1 =
4489                     iwl_read_restricted_mem(priv, base + i + 6 * sizeof(u32));
4490
4491                 IWL_ERROR
4492                     ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
4493                      desc_lookup(desc), desc, time, blink1, blink2,
4494                      ilink1, ilink2, data1);
4495         }
4496
4497         iwl_release_restricted_access(priv);
4498
4499 }
4500
4501 #define EVENT_START_OFFSET  (4 * sizeof(u32))
4502
4503 /**
4504  * iwl_print_event_log - Dump error event log to syslog
4505  *
4506  * NOTE: Must be called with iwl_grab_restricted_access() already obtained!
4507  */
4508 static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
4509                                 u32 num_events, u32 mode)
4510 {
4511         u32 i;
4512         u32 base;       /* SRAM byte address of event log header */
4513         u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4514         u32 ptr;        /* SRAM byte address of log data */
4515         u32 ev, time, data; /* event log data */
4516
4517         if (num_events == 0)
4518                 return;
4519
4520         base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4521
4522         if (mode == 0)
4523                 event_size = 2 * sizeof(u32);
4524         else
4525                 event_size = 3 * sizeof(u32);
4526
4527         ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4528
4529         /* "time" is actually "data" for mode 0 (no timestamp).
4530          * place event id # at far right for easier visual parsing. */
4531         for (i = 0; i < num_events; i++) {
4532                 ev = iwl_read_restricted_mem(priv, ptr);
4533                 ptr += sizeof(u32);
4534                 time = iwl_read_restricted_mem(priv, ptr);
4535                 ptr += sizeof(u32);
4536                 if (mode == 0)
4537                         IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4538                 else {
4539                         data = iwl_read_restricted_mem(priv, ptr);
4540                         ptr += sizeof(u32);
4541                         IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4542                 }
4543         }
4544 }
4545
4546 static void iwl_dump_nic_event_log(struct iwl_priv *priv)
4547 {
4548         int rc;
4549         u32 base;       /* SRAM byte address of event log header */
4550         u32 capacity;   /* event log capacity in # entries */
4551         u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
4552         u32 num_wraps;  /* # times uCode wrapped to top of log */
4553         u32 next_entry; /* index of next entry to be written by uCode */
4554         u32 size;       /* # entries that we'll print */
4555
4556         base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4557         if (!iwl_hw_valid_rtc_data_addr(base)) {
4558                 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4559                 return;
4560         }
4561
4562         rc = iwl_grab_restricted_access(priv);
4563         if (rc) {
4564                 IWL_WARNING("Can not read from adapter at this time.\n");
4565                 return;
4566         }
4567
4568         /* event log header */
4569         capacity = iwl_read_restricted_mem(priv, base);
4570         mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32)));
4571         num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32)));
4572         next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32)));
4573
4574         size = num_wraps ? capacity : next_entry;
4575
4576         /* bail out if nothing in log */
4577         if (size == 0) {
4578                 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
4579                 iwl_release_restricted_access(priv);
4580                 return;
4581         }
4582
4583         IWL_ERROR("Start IWL Event Log Dump: display count %d, wraps %d\n",
4584                   size, num_wraps);
4585
4586         /* if uCode has wrapped back to top of log, start at the oldest entry,
4587          * i.e the next one that uCode would fill. */
4588         if (num_wraps)
4589                 iwl_print_event_log(priv, next_entry,
4590                                     capacity - next_entry, mode);
4591
4592         /* (then/else) start at top of log */
4593         iwl_print_event_log(priv, 0, next_entry, mode);
4594
4595         iwl_release_restricted_access(priv);
4596 }
4597
4598 /**
4599  * iwl_irq_handle_error - called for HW or SW error interrupt from card
4600  */
4601 static void iwl_irq_handle_error(struct iwl_priv *priv)
4602 {
4603         /* Set the FW error flag -- cleared on iwl_down */
4604         set_bit(STATUS_FW_ERROR, &priv->status);
4605
4606         /* Cancel currently queued command. */
4607         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4608
4609 #ifdef CONFIG_IWLWIFI_DEBUG
4610         if (iwl_debug_level & IWL_DL_FW_ERRORS) {
4611                 iwl_dump_nic_error_log(priv);
4612                 iwl_dump_nic_event_log(priv);
4613                 iwl_print_rx_config_cmd(&priv->staging_rxon);
4614         }
4615 #endif
4616
4617         wake_up_interruptible(&priv->wait_command_queue);
4618
4619         /* Keep the restart process from trying to send host
4620          * commands by clearing the INIT status bit */
4621         clear_bit(STATUS_READY, &priv->status);
4622
4623         if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4624                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4625                           "Restarting adapter due to uCode error.\n");
4626
4627                 if (iwl_is_associated(priv)) {
4628                         memcpy(&priv->recovery_rxon, &priv->active_rxon,
4629                                sizeof(priv->recovery_rxon));
4630                         priv->error_recovering = 1;
4631                 }
4632                 queue_work(priv->workqueue, &priv->restart);
4633         }
4634 }
4635
4636 static void iwl_error_recovery(struct iwl_priv *priv)
4637 {
4638         unsigned long flags;
4639
4640         memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4641                sizeof(priv->staging_rxon));
4642         priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4643         iwl_commit_rxon(priv);
4644
4645         iwl_add_station(priv, priv->bssid, 1, 0);
4646
4647         spin_lock_irqsave(&priv->lock, flags);
4648         priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4649         priv->error_recovering = 0;
4650         spin_unlock_irqrestore(&priv->lock, flags);
4651 }
4652
4653 static void iwl_irq_tasklet(struct iwl_priv *priv)
4654 {
4655         u32 inta, handled = 0;
4656         u32 inta_fh;
4657         unsigned long flags;
4658 #ifdef CONFIG_IWLWIFI_DEBUG
4659         u32 inta_mask;
4660 #endif
4661
4662         spin_lock_irqsave(&priv->lock, flags);
4663
4664         /* Ack/clear/reset pending uCode interrupts.
4665          * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4666          *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
4667         inta = iwl_read32(priv, CSR_INT);
4668         iwl_write32(priv, CSR_INT, inta);
4669
4670         /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4671          * Any new interrupts that happen after this, either while we're
4672          * in this tasklet, or later, will show up in next ISR/tasklet. */
4673         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4674         iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4675
4676 #ifdef CONFIG_IWLWIFI_DEBUG
4677         if (iwl_debug_level & IWL_DL_ISR) {
4678                 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4679                 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4680                               inta, inta_mask, inta_fh);
4681         }
4682 #endif
4683
4684         /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
4685          * atomic, make sure that inta covers all the interrupts that
4686          * we've discovered, even if FH interrupt came in just after
4687          * reading CSR_INT. */
4688         if (inta_fh & CSR_FH_INT_RX_MASK)
4689                 inta |= CSR_INT_BIT_FH_RX;
4690         if (inta_fh & CSR_FH_INT_TX_MASK)
4691                 inta |= CSR_INT_BIT_FH_TX;
4692
4693         /* Now service all interrupt bits discovered above. */
4694         if (inta & CSR_INT_BIT_HW_ERR) {
4695                 IWL_ERROR("Microcode HW error detected.  Restarting.\n");
4696
4697                 /* Tell the device to stop sending interrupts */
4698                 iwl_disable_interrupts(priv);
4699
4700                 iwl_irq_handle_error(priv);
4701
4702                 handled |= CSR_INT_BIT_HW_ERR;
4703
4704                 spin_unlock_irqrestore(&priv->lock, flags);
4705
4706                 return;
4707         }
4708
4709 #ifdef CONFIG_IWLWIFI_DEBUG
4710         if (iwl_debug_level & (IWL_DL_ISR)) {
4711                 /* NIC fires this, but we don't use it, redundant with WAKEUP */
4712                 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
4713                         IWL_DEBUG_ISR("Microcode started or stopped.\n");
4714
4715                 /* Alive notification via Rx interrupt will do the real work */
4716                 if (inta & CSR_INT_BIT_ALIVE)
4717                         IWL_DEBUG_ISR("Alive interrupt\n");
4718         }
4719 #endif
4720         /* Safely ignore these bits for debug checks below */
4721         inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
4722
4723         /* HW RF KILL switch toggled (4965 only) */
4724         if (inta & CSR_INT_BIT_RF_KILL) {
4725                 int hw_rf_kill = 0;
4726                 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
4727                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4728                         hw_rf_kill = 1;
4729
4730                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
4731                                 "RF_KILL bit toggled to %s.\n",
4732                                 hw_rf_kill ? "disable radio":"enable radio");
4733
4734                 /* Queue restart only if RF_KILL switch was set to "kill"
4735                  *   when we loaded driver, and is now set to "enable".
4736                  * After we're Alive, RF_KILL gets handled by
4737                  *   iwl_rx_card_state_notif() */
4738                 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status))
4739                         queue_work(priv->workqueue, &priv->restart);
4740
4741                 handled |= CSR_INT_BIT_RF_KILL;
4742         }
4743
4744         /* Chip got too hot and stopped itself (4965 only) */
4745         if (inta & CSR_INT_BIT_CT_KILL) {
4746                 IWL_ERROR("Microcode CT kill error detected.\n");
4747                 handled |= CSR_INT_BIT_CT_KILL;
4748         }
4749
4750         /* Error detected by uCode */
4751         if (inta & CSR_INT_BIT_SW_ERR) {
4752                 IWL_ERROR("Microcode SW error detected.  Restarting 0x%X.\n",
4753                           inta);
4754                 iwl_irq_handle_error(priv);
4755                 handled |= CSR_INT_BIT_SW_ERR;
4756         }
4757
4758         /* uCode wakes up after power-down sleep */
4759         if (inta & CSR_INT_BIT_WAKEUP) {
4760                 IWL_DEBUG_ISR("Wakeup interrupt\n");
4761                 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
4762                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]);
4763                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]);
4764                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]);
4765                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]);
4766                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]);
4767                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]);
4768
4769                 handled |= CSR_INT_BIT_WAKEUP;
4770         }
4771
4772         /* All uCode command responses, including Tx command responses,
4773          * Rx "responses" (frame-received notification), and other
4774          * notifications from uCode come through here*/
4775         if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
4776                 iwl_rx_handle(priv);
4777                 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
4778         }
4779
4780         if (inta & CSR_INT_BIT_FH_TX) {
4781                 IWL_DEBUG_ISR("Tx interrupt\n");
4782
4783                 iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
4784                 if (!iwl_grab_restricted_access(priv)) {
4785                         iwl_write_restricted(priv,
4786                                              FH_TCSR_CREDIT
4787                                              (ALM_FH_SRVC_CHNL), 0x0);
4788                         iwl_release_restricted_access(priv);
4789                 }
4790                 handled |= CSR_INT_BIT_FH_TX;
4791         }
4792
4793         if (inta & ~handled)
4794                 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
4795
4796         if (inta & ~CSR_INI_SET_MASK) {
4797                 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
4798                          inta & ~CSR_INI_SET_MASK);
4799                 IWL_WARNING("   with FH_INT = 0x%08x\n", inta_fh);
4800         }
4801
4802         /* Re-enable all interrupts */
4803         iwl_enable_interrupts(priv);
4804
4805 #ifdef CONFIG_IWLWIFI_DEBUG
4806         if (iwl_debug_level & (IWL_DL_ISR)) {
4807                 inta = iwl_read32(priv, CSR_INT);
4808                 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4809                 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4810                 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
4811                         "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
4812         }
4813 #endif
4814         spin_unlock_irqrestore(&priv->lock, flags);
4815 }
4816
4817 static irqreturn_t iwl_isr(int irq, void *data)
4818 {
4819         struct iwl_priv *priv = data;
4820         u32 inta, inta_mask;
4821         u32 inta_fh;
4822         if (!priv)
4823                 return IRQ_NONE;
4824
4825         spin_lock(&priv->lock);
4826
4827         /* Disable (but don't clear!) interrupts here to avoid
4828          *    back-to-back ISRs and sporadic interrupts from our NIC.
4829          * If we have something to service, the tasklet will re-enable ints.
4830          * If we *don't* have something, we'll re-enable before leaving here. */
4831         inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
4832         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4833
4834         /* Discover which interrupts are active/pending */
4835         inta = iwl_read32(priv, CSR_INT);
4836         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4837
4838         /* Ignore interrupt if there's nothing in NIC to service.
4839          * This may be due to IRQ shared with another device,
4840          * or due to sporadic interrupts thrown from our NIC. */
4841         if (!inta && !inta_fh) {
4842                 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
4843                 goto none;
4844         }
4845
4846         if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4847                 /* Hardware disappeared */
4848                 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
4849                 goto none;
4850         }
4851
4852         IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4853                       inta, inta_mask, inta_fh);
4854
4855         /* iwl_irq_tasklet() will service interrupts and re-enable them */
4856         tasklet_schedule(&priv->irq_tasklet);
4857         spin_unlock(&priv->lock);
4858
4859         return IRQ_HANDLED;
4860
4861  none:
4862         /* re-enable interrupts here since we don't have anything to service. */
4863         iwl_enable_interrupts(priv);
4864         spin_unlock(&priv->lock);
4865         return IRQ_NONE;
4866 }
4867
4868 /************************** EEPROM BANDS ****************************
4869  *
4870  * The iwl_eeprom_band definitions below provide the mapping from the
4871  * EEPROM contents to the specific channel number supported for each
4872  * band.
4873  *
4874  * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
4875  * definition below maps to physical channel 42 in the 5.2GHz spectrum.
4876  * The specific geography and calibration information for that channel
4877  * is contained in the eeprom map itself.
4878  *
4879  * During init, we copy the eeprom information and channel map
4880  * information into priv->channel_info_24/52 and priv->channel_map_24/52
4881  *
4882  * channel_map_24/52 provides the index in the channel_info array for a
4883  * given channel.  We have to have two separate maps as there is channel
4884  * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
4885  * band_2
4886  *
4887  * A value of 0xff stored in the channel_map indicates that the channel
4888  * is not supported by the hardware at all.
4889  *
4890  * A value of 0xfe in the channel_map indicates that the channel is not
4891  * valid for Tx with the current hardware.  This means that
4892  * while the system can tune and receive on a given channel, it may not
4893  * be able to associate or transmit any frames on that
4894  * channel.  There is no corresponding channel information for that
4895  * entry.
4896  *
4897  *********************************************************************/
4898
4899 /* 2.4 GHz */
4900 static const u8 iwl_eeprom_band_1[14] = {
4901         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
4902 };
4903
4904 /* 5.2 GHz bands */
4905 static const u8 iwl_eeprom_band_2[] = {
4906         183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
4907 };
4908
4909 static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */
4910         34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
4911 };
4912
4913 static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
4914         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
4915 };
4916
4917 static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
4918         145, 149, 153, 157, 161, 165
4919 };
4920
4921 static void iwl_init_band_reference(const struct iwl_priv *priv, int band,
4922                                     int *eeprom_ch_count,
4923                                     const struct iwl_eeprom_channel
4924                                     **eeprom_ch_info,
4925                                     const u8 **eeprom_ch_index)
4926 {
4927         switch (band) {
4928         case 1:         /* 2.4GHz band */
4929                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
4930                 *eeprom_ch_info = priv->eeprom.band_1_channels;
4931                 *eeprom_ch_index = iwl_eeprom_band_1;
4932                 break;
4933         case 2:         /* 5.2GHz band */
4934                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
4935                 *eeprom_ch_info = priv->eeprom.band_2_channels;
4936                 *eeprom_ch_index = iwl_eeprom_band_2;
4937                 break;
4938         case 3:         /* 5.2GHz band */
4939                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
4940                 *eeprom_ch_info = priv->eeprom.band_3_channels;
4941                 *eeprom_ch_index = iwl_eeprom_band_3;
4942                 break;
4943         case 4:         /* 5.2GHz band */
4944                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
4945                 *eeprom_ch_info = priv->eeprom.band_4_channels;
4946                 *eeprom_ch_index = iwl_eeprom_band_4;
4947                 break;
4948         case 5:         /* 5.2GHz band */
4949                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
4950                 *eeprom_ch_info = priv->eeprom.band_5_channels;
4951                 *eeprom_ch_index = iwl_eeprom_band_5;
4952                 break;
4953         default:
4954                 BUG();
4955                 return;
4956         }
4957 }
4958
4959 const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
4960                                                     int phymode, u16 channel)
4961 {
4962         int i;
4963
4964         switch (phymode) {
4965         case MODE_IEEE80211A:
4966                 for (i = 14; i < priv->channel_count; i++) {
4967                         if (priv->channel_info[i].channel == channel)
4968                                 return &priv->channel_info[i];
4969                 }
4970                 break;
4971
4972         case MODE_IEEE80211B:
4973         case MODE_IEEE80211G:
4974                 if (channel >= 1 && channel <= 14)
4975                         return &priv->channel_info[channel - 1];
4976                 break;
4977
4978         }
4979
4980         return NULL;
4981 }
4982
4983 #define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
4984                             ? # x " " : "")
4985
4986 static int iwl_init_channel_map(struct iwl_priv *priv)
4987 {
4988         int eeprom_ch_count = 0;
4989         const u8 *eeprom_ch_index = NULL;
4990         const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
4991         int band, ch;
4992         struct iwl_channel_info *ch_info;
4993
4994         if (priv->channel_count) {
4995                 IWL_DEBUG_INFO("Channel map already initialized.\n");
4996                 return 0;
4997         }
4998
4999         if (priv->eeprom.version < 0x2f) {
5000                 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5001                             priv->eeprom.version);
5002                 return -EINVAL;
5003         }
5004
5005         IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5006
5007         priv->channel_count =
5008             ARRAY_SIZE(iwl_eeprom_band_1) +
5009             ARRAY_SIZE(iwl_eeprom_band_2) +
5010             ARRAY_SIZE(iwl_eeprom_band_3) +
5011             ARRAY_SIZE(iwl_eeprom_band_4) +
5012             ARRAY_SIZE(iwl_eeprom_band_5);
5013
5014         IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5015
5016         priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
5017                                      priv->channel_count, GFP_KERNEL);
5018         if (!priv->channel_info) {
5019                 IWL_ERROR("Could not allocate channel_info\n");
5020                 priv->channel_count = 0;
5021                 return -ENOMEM;
5022         }
5023
5024         ch_info = priv->channel_info;
5025
5026         /* Loop through the 5 EEPROM bands adding them in order to the
5027          * channel map we maintain (that contains additional information than
5028          * what just in the EEPROM) */
5029         for (band = 1; band <= 5; band++) {
5030
5031                 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5032                                         &eeprom_ch_info, &eeprom_ch_index);
5033
5034                 /* Loop through each band adding each of the channels */
5035                 for (ch = 0; ch < eeprom_ch_count; ch++) {
5036                         ch_info->channel = eeprom_ch_index[ch];
5037                         ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5038                             MODE_IEEE80211A;
5039
5040                         /* permanently store EEPROM's channel regulatory flags
5041                          *   and max power in channel info database. */
5042                         ch_info->eeprom = eeprom_ch_info[ch];
5043
5044                         /* Copy the run-time flags so they are there even on
5045                          * invalid channels */
5046                         ch_info->flags = eeprom_ch_info[ch].flags;
5047
5048                         if (!(is_channel_valid(ch_info))) {
5049                                 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5050                                                "No traffic\n",
5051                                                ch_info->channel,
5052                                                ch_info->flags,
5053                                                is_channel_a_band(ch_info) ?
5054                                                "5.2" : "2.4");
5055                                 ch_info++;
5056                                 continue;
5057                         }
5058
5059                         /* Initialize regulatory-based run-time data */
5060                         ch_info->max_power_avg = ch_info->curr_txpow =
5061                             eeprom_ch_info[ch].max_power_avg;
5062                         ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5063                         ch_info->min_power = 0;
5064
5065                         IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5066                                        " %ddBm): Ad-Hoc %ssupported\n",
5067                                        ch_info->channel,
5068                                        is_channel_a_band(ch_info) ?
5069                                        "5.2" : "2.4",
5070                                        CHECK_AND_PRINT(IBSS),
5071                                        CHECK_AND_PRINT(ACTIVE),
5072                                        CHECK_AND_PRINT(RADAR),
5073                                        CHECK_AND_PRINT(WIDE),
5074                                        CHECK_AND_PRINT(NARROW),
5075                                        CHECK_AND_PRINT(DFS),
5076                                        eeprom_ch_info[ch].flags,
5077                                        eeprom_ch_info[ch].max_power_avg,
5078                                        ((eeprom_ch_info[ch].
5079                                          flags & EEPROM_CHANNEL_IBSS)
5080                                         && !(eeprom_ch_info[ch].
5081                                              flags & EEPROM_CHANNEL_RADAR))
5082                                        ? "" : "not ");
5083
5084                         /* Set the user_txpower_limit to the highest power
5085                          * supported by any channel */
5086                         if (eeprom_ch_info[ch].max_power_avg >
5087                             priv->user_txpower_limit)
5088                                 priv->user_txpower_limit =
5089                                     eeprom_ch_info[ch].max_power_avg;
5090
5091                         ch_info++;
5092                 }
5093         }
5094
5095         if (iwl3945_txpower_set_from_eeprom(priv))
5096                 return -EIO;
5097
5098         return 0;
5099 }
5100
5101 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5102  * sending probe req.  This should be set long enough to hear probe responses
5103  * from more than one AP.  */
5104 #define IWL_ACTIVE_DWELL_TIME_24    (20)        /* all times in msec */
5105 #define IWL_ACTIVE_DWELL_TIME_52    (10)
5106
5107 /* For faster active scanning, scan will move to the next channel if fewer than
5108  * PLCP_QUIET_THRESH packets are heard on this channel within
5109  * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
5110  * time if it's a quiet channel (nothing responded to our probe, and there's
5111  * no other traffic).
5112  * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5113 #define IWL_PLCP_QUIET_THRESH       __constant_cpu_to_le16(1)   /* packets */
5114 #define IWL_ACTIVE_QUIET_TIME       __constant_cpu_to_le16(5)   /* msec */
5115
5116 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5117  * Must be set longer than active dwell time.
5118  * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5119 #define IWL_PASSIVE_DWELL_TIME_24   (20)        /* all times in msec */
5120 #define IWL_PASSIVE_DWELL_TIME_52   (10)
5121 #define IWL_PASSIVE_DWELL_BASE      (100)
5122 #define IWL_CHANNEL_TUNE_TIME       5
5123
5124 static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode)
5125 {
5126         if (phymode == MODE_IEEE80211A)
5127                 return IWL_ACTIVE_DWELL_TIME_52;
5128         else
5129                 return IWL_ACTIVE_DWELL_TIME_24;
5130 }
5131
5132 static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode)
5133 {
5134         u16 active = iwl_get_active_dwell_time(priv, phymode);
5135         u16 passive = (phymode != MODE_IEEE80211A) ?
5136             IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5137             IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5138
5139         if (iwl_is_associated(priv)) {
5140                 /* If we're associated, we clamp the maximum passive
5141                  * dwell time to be 98% of the beacon interval (minus
5142                  * 2 * channel tune time) */
5143                 passive = priv->beacon_int;
5144                 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5145                         passive = IWL_PASSIVE_DWELL_BASE;
5146                 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5147         }
5148
5149         if (passive <= active)
5150                 passive = active + 1;
5151
5152         return passive;
5153 }
5154
5155 static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode,
5156                                      u8 is_active, u8 direct_mask,
5157                                      struct iwl_scan_channel *scan_ch)
5158 {
5159         const struct ieee80211_channel *channels = NULL;
5160         const struct ieee80211_hw_mode *hw_mode;
5161         const struct iwl_channel_info *ch_info;
5162         u16 passive_dwell = 0;
5163         u16 active_dwell = 0;
5164         int added, i;
5165
5166         hw_mode = iwl_get_hw_mode(priv, phymode);
5167         if (!hw_mode)
5168                 return 0;
5169
5170         channels = hw_mode->channels;
5171
5172         active_dwell = iwl_get_active_dwell_time(priv, phymode);
5173         passive_dwell = iwl_get_passive_dwell_time(priv, phymode);
5174
5175         for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5176                 if (channels[i].chan ==
5177                     le16_to_cpu(priv->active_rxon.channel)) {
5178                         if (iwl_is_associated(priv)) {
5179                                 IWL_DEBUG_SCAN
5180                                     ("Skipping current channel %d\n",
5181                                      le16_to_cpu(priv->active_rxon.channel));
5182                                 continue;
5183                         }
5184                 } else if (priv->only_active_channel)
5185                         continue;
5186
5187                 scan_ch->channel = channels[i].chan;
5188
5189                 ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel);
5190                 if (!is_channel_valid(ch_info)) {
5191                         IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5192                                        scan_ch->channel);
5193                         continue;
5194                 }
5195
5196                 if (!is_active || is_channel_passive(ch_info) ||
5197                     !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5198                         scan_ch->type = 0;      /* passive */
5199                 else
5200                         scan_ch->type = 1;      /* active */
5201
5202                 if (scan_ch->type & 1)
5203                         scan_ch->type |= (direct_mask << 1);
5204
5205                 if (is_channel_narrow(ch_info))
5206                         scan_ch->type |= (1 << 7);
5207
5208                 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5209                 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5210
5211                 /* Set power levels to defaults */
5212                 scan_ch->tpc.dsp_atten = 110;
5213                 /* scan_pwr_info->tpc.dsp_atten; */
5214
5215                 /*scan_pwr_info->tpc.tx_gain; */
5216                 if (phymode == MODE_IEEE80211A)
5217                         scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5218                 else {
5219                         scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5220                         /* NOTE: if we were doing 6Mb OFDM for scans we'd use
5221                          * power level
5222                          scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
5223                          */
5224                 }
5225
5226                 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5227                                scan_ch->channel,
5228                                (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5229                                (scan_ch->type & 1) ?
5230                                active_dwell : passive_dwell);
5231
5232                 scan_ch++;
5233                 added++;
5234         }
5235
5236         IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5237         return added;
5238 }
5239
5240 static void iwl_reset_channel_flag(struct iwl_priv *priv)
5241 {
5242         int i, j;
5243         for (i = 0; i < 3; i++) {
5244                 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5245                 for (j = 0; j < hw_mode->num_channels; j++)
5246                         hw_mode->channels[j].flag = hw_mode->channels[j].val;
5247         }
5248 }
5249
5250 static void iwl_init_hw_rates(struct iwl_priv *priv,
5251                               struct ieee80211_rate *rates)
5252 {
5253         int i;
5254
5255         for (i = 0; i < IWL_RATE_COUNT; i++) {
5256                 rates[i].rate = iwl_rates[i].ieee * 5;
5257                 rates[i].val = i; /* Rate scaling will work on indexes */
5258                 rates[i].val2 = i;
5259                 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5260                 /* Only OFDM have the bits-per-symbol set */
5261                 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5262                         rates[i].flags |= IEEE80211_RATE_OFDM;
5263                 else {
5264                         /*
5265                          * If CCK 1M then set rate flag to CCK else CCK_2
5266                          * which is CCK | PREAMBLE2
5267                          */
5268                         rates[i].flags |= (iwl_rates[i].plcp == 10) ?
5269                                 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5270                 }
5271
5272                 /* Set up which ones are basic rates... */
5273                 if (IWL_BASIC_RATES_MASK & (1 << i))
5274                         rates[i].flags |= IEEE80211_RATE_BASIC;
5275         }
5276 }
5277
5278 /**
5279  * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
5280  */
5281 static int iwl_init_geos(struct iwl_priv *priv)
5282 {
5283         struct iwl_channel_info *ch;
5284         struct ieee80211_hw_mode *modes;
5285         struct ieee80211_channel *channels;
5286         struct ieee80211_channel *geo_ch;
5287         struct ieee80211_rate *rates;
5288         int i = 0;
5289         enum {
5290                 A = 0,
5291                 B = 1,
5292                 G = 2,
5293         };
5294         int mode_count = 3;
5295
5296         if (priv->modes) {
5297                 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5298                 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5299                 return 0;
5300         }
5301
5302         modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5303                         GFP_KERNEL);
5304         if (!modes)
5305                 return -ENOMEM;
5306
5307         channels = kzalloc(sizeof(struct ieee80211_channel) *
5308                            priv->channel_count, GFP_KERNEL);
5309         if (!channels) {
5310                 kfree(modes);
5311                 return -ENOMEM;
5312         }
5313
5314         rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5315                         GFP_KERNEL);
5316         if (!rates) {
5317                 kfree(modes);
5318                 kfree(channels);
5319                 return -ENOMEM;
5320         }
5321
5322         /* 0 = 802.11a
5323          * 1 = 802.11b
5324          * 2 = 802.11g
5325          */
5326
5327         /* 5.2GHz channels start after the 2.4GHz channels */
5328         modes[A].mode = MODE_IEEE80211A;
5329         modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5330         modes[A].rates = rates;
5331         modes[A].num_rates = 8; /* just OFDM */
5332         modes[A].num_channels = 0;
5333
5334         modes[B].mode = MODE_IEEE80211B;
5335         modes[B].channels = channels;
5336         modes[B].rates = &rates[8];
5337         modes[B].num_rates = 4; /* just CCK */
5338         modes[B].num_channels = 0;
5339
5340         modes[G].mode = MODE_IEEE80211G;
5341         modes[G].channels = channels;
5342         modes[G].rates = rates;
5343         modes[G].num_rates = 12;        /* OFDM & CCK */
5344         modes[G].num_channels = 0;
5345
5346         priv->ieee_channels = channels;
5347         priv->ieee_rates = rates;
5348
5349         iwl_init_hw_rates(priv, rates);
5350
5351         for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5352                 ch = &priv->channel_info[i];
5353
5354                 if (!is_channel_valid(ch)) {
5355                         IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5356                                     "skipping.\n",
5357                                     ch->channel, is_channel_a_band(ch) ?
5358                                     "5.2" : "2.4");
5359                         continue;
5360                 }
5361
5362                 if (is_channel_a_band(ch))
5363                         geo_ch = &modes[A].channels[modes[A].num_channels++];
5364                 else {
5365                         geo_ch = &modes[B].channels[modes[B].num_channels++];
5366                         modes[G].num_channels++;
5367                 }
5368
5369                 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5370                 geo_ch->chan = ch->channel;
5371                 geo_ch->power_level = ch->max_power_avg;
5372                 geo_ch->antenna_max = 0xff;
5373
5374                 if (is_channel_valid(ch)) {
5375                         geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5376                         if (ch->flags & EEPROM_CHANNEL_IBSS)
5377                                 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5378
5379                         if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5380                                 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5381
5382                         if (ch->flags & EEPROM_CHANNEL_RADAR)
5383                                 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5384
5385                         if (ch->max_power_avg > priv->max_channel_txpower_limit)
5386                                 priv->max_channel_txpower_limit =
5387                                     ch->max_power_avg;
5388                 }
5389
5390                 geo_ch->val = geo_ch->flag;
5391         }
5392
5393         if ((modes[A].num_channels == 0) && priv->is_abg) {
5394                 printk(KERN_INFO DRV_NAME
5395                        ": Incorrectly detected BG card as ABG.  Please send "
5396                        "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5397                        priv->pci_dev->device, priv->pci_dev->subsystem_device);
5398                 priv->is_abg = 0;
5399         }
5400
5401         printk(KERN_INFO DRV_NAME
5402                ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5403                modes[G].num_channels, modes[A].num_channels);
5404
5405         /*
5406          * NOTE:  We register these in preference of order -- the
5407          * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5408          * a phymode based on rates or AP capabilities but seems to
5409          * configure it purely on if the channel being configured
5410          * is supported by a mode -- and the first match is taken
5411          */
5412
5413         if (modes[G].num_channels)
5414                 ieee80211_register_hwmode(priv->hw, &modes[G]);
5415         if (modes[B].num_channels)
5416                 ieee80211_register_hwmode(priv->hw, &modes[B]);
5417         if (modes[A].num_channels)
5418                 ieee80211_register_hwmode(priv->hw, &modes[A]);
5419
5420         priv->modes = modes;
5421         set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5422
5423         return 0;
5424 }
5425
5426 /******************************************************************************
5427  *
5428  * uCode download functions
5429  *
5430  ******************************************************************************/
5431
5432 static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
5433 {
5434         if (priv->ucode_code.v_addr != NULL) {
5435                 pci_free_consistent(priv->pci_dev,
5436                                     priv->ucode_code.len,
5437                                     priv->ucode_code.v_addr,
5438                                     priv->ucode_code.p_addr);
5439                 priv->ucode_code.v_addr = NULL;
5440         }
5441         if (priv->ucode_data.v_addr != NULL) {
5442                 pci_free_consistent(priv->pci_dev,
5443                                     priv->ucode_data.len,
5444                                     priv->ucode_data.v_addr,
5445                                     priv->ucode_data.p_addr);
5446                 priv->ucode_data.v_addr = NULL;
5447         }
5448         if (priv->ucode_data_backup.v_addr != NULL) {
5449                 pci_free_consistent(priv->pci_dev,
5450                                     priv->ucode_data_backup.len,
5451                                     priv->ucode_data_backup.v_addr,
5452                                     priv->ucode_data_backup.p_addr);
5453                 priv->ucode_data_backup.v_addr = NULL;
5454         }
5455         if (priv->ucode_init.v_addr != NULL) {
5456                 pci_free_consistent(priv->pci_dev,
5457                                     priv->ucode_init.len,
5458                                     priv->ucode_init.v_addr,
5459                                     priv->ucode_init.p_addr);
5460                 priv->ucode_init.v_addr = NULL;
5461         }
5462         if (priv->ucode_init_data.v_addr != NULL) {
5463                 pci_free_consistent(priv->pci_dev,
5464                                     priv->ucode_init_data.len,
5465                                     priv->ucode_init_data.v_addr,
5466                                     priv->ucode_init_data.p_addr);
5467                 priv->ucode_init_data.v_addr = NULL;
5468         }
5469         if (priv->ucode_boot.v_addr != NULL) {
5470                 pci_free_consistent(priv->pci_dev,
5471                                     priv->ucode_boot.len,
5472                                     priv->ucode_boot.v_addr,
5473                                     priv->ucode_boot.p_addr);
5474                 priv->ucode_boot.v_addr = NULL;
5475         }
5476 }
5477
5478 /**
5479  * iwl_verify_inst_full - verify runtime uCode image in card vs. host,
5480  *     looking at all data.
5481  */
5482 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len)
5483 {
5484         u32 val;
5485         u32 save_len = len;
5486         int rc = 0;
5487         u32 errcnt;
5488
5489         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5490
5491         rc = iwl_grab_restricted_access(priv);
5492         if (rc)
5493                 return rc;
5494
5495         iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
5496
5497         errcnt = 0;
5498         for (; len > 0; len -= sizeof(u32), image++) {
5499                 /* read data comes through single port, auto-incr addr */
5500                 /* NOTE: Use the debugless read so we don't flood kernel log
5501                  * if IWL_DL_IO is set */
5502                 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5503                 if (val != le32_to_cpu(*image)) {
5504                         IWL_ERROR("uCode INST section is invalid at "
5505                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
5506                                   save_len - len, val, le32_to_cpu(*image));
5507                         rc = -EIO;
5508                         errcnt++;
5509                         if (errcnt >= 20)
5510                                 break;
5511                 }
5512         }
5513
5514         iwl_release_restricted_access(priv);
5515
5516         if (!errcnt)
5517                 IWL_DEBUG_INFO
5518                     ("ucode image in INSTRUCTION memory is good\n");
5519
5520         return rc;
5521 }
5522
5523
5524 /**
5525  * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
5526  *   using sample data 100 bytes apart.  If these sample points are good,
5527  *   it's a pretty good bet that everything between them is good, too.
5528  */
5529 static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
5530 {
5531         u32 val;
5532         int rc = 0;
5533         u32 errcnt = 0;
5534         u32 i;
5535
5536         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5537
5538         rc = iwl_grab_restricted_access(priv);
5539         if (rc)
5540                 return rc;
5541
5542         for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5543                 /* read data comes through single port, auto-incr addr */
5544                 /* NOTE: Use the debugless read so we don't flood kernel log
5545                  * if IWL_DL_IO is set */
5546                 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR,
5547                         i + RTC_INST_LOWER_BOUND);
5548                 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5549                 if (val != le32_to_cpu(*image)) {
5550 #if 0 /* Enable this if you want to see details */
5551                         IWL_ERROR("uCode INST section is invalid at "
5552                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
5553                                   i, val, *image);
5554 #endif
5555                         rc = -EIO;
5556                         errcnt++;
5557                         if (errcnt >= 3)
5558                                 break;
5559                 }
5560         }
5561
5562         iwl_release_restricted_access(priv);
5563
5564         return rc;
5565 }
5566
5567
5568 /**
5569  * iwl_verify_ucode - determine which instruction image is in SRAM,
5570  *    and verify its contents
5571  */
5572 static int iwl_verify_ucode(struct iwl_priv *priv)
5573 {
5574         __le32 *image;
5575         u32 len;
5576         int rc = 0;
5577
5578         /* Try bootstrap */
5579         image = (__le32 *)priv->ucode_boot.v_addr;
5580         len = priv->ucode_boot.len;
5581         rc = iwl_verify_inst_sparse(priv, image, len);
5582         if (rc == 0) {
5583                 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5584                 return 0;
5585         }
5586
5587         /* Try initialize */
5588         image = (__le32 *)priv->ucode_init.v_addr;
5589         len = priv->ucode_init.len;
5590         rc = iwl_verify_inst_sparse(priv, image, len);
5591         if (rc == 0) {
5592                 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5593                 return 0;
5594         }
5595
5596         /* Try runtime/protocol */
5597         image = (__le32 *)priv->ucode_code.v_addr;
5598         len = priv->ucode_code.len;
5599         rc = iwl_verify_inst_sparse(priv, image, len);
5600         if (rc == 0) {
5601                 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5602                 return 0;
5603         }
5604
5605         IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5606
5607         /* Show first several data entries in instruction SRAM.
5608          * Selection of bootstrap image is arbitrary. */
5609         image = (__le32 *)priv->ucode_boot.v_addr;
5610         len = priv->ucode_boot.len;
5611         rc = iwl_verify_inst_full(priv, image, len);
5612
5613         return rc;
5614 }
5615
5616
5617 /* check contents of special bootstrap uCode SRAM */
5618 static int iwl_verify_bsm(struct iwl_priv *priv)
5619 {
5620         __le32 *image = priv->ucode_boot.v_addr;
5621         u32 len = priv->ucode_boot.len;
5622         u32 reg;
5623         u32 val;
5624
5625         IWL_DEBUG_INFO("Begin verify bsm\n");
5626
5627         /* verify BSM SRAM contents */
5628         val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG);
5629         for (reg = BSM_SRAM_LOWER_BOUND;
5630              reg < BSM_SRAM_LOWER_BOUND + len;
5631              reg += sizeof(u32), image ++) {
5632                 val = iwl_read_restricted_reg(priv, reg);
5633                 if (val != le32_to_cpu(*image)) {
5634                         IWL_ERROR("BSM uCode verification failed at "
5635                                   "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
5636                                   BSM_SRAM_LOWER_BOUND,
5637                                   reg - BSM_SRAM_LOWER_BOUND, len,
5638                                   val, le32_to_cpu(*image));
5639                         return -EIO;
5640                 }
5641         }
5642
5643         IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
5644
5645         return 0;
5646 }
5647
5648 /**
5649  * iwl_load_bsm - Load bootstrap instructions
5650  *
5651  * BSM operation:
5652  *
5653  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
5654  * in special SRAM that does not power down during RFKILL.  When powering back
5655  * up after power-saving sleeps (or during initial uCode load), the BSM loads
5656  * the bootstrap program into the on-board processor, and starts it.
5657  *
5658  * The bootstrap program loads (via DMA) instructions and data for a new
5659  * program from host DRAM locations indicated by the host driver in the
5660  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
5661  * automatically.
5662  *
5663  * When initializing the NIC, the host driver points the BSM to the
5664  * "initialize" uCode image.  This uCode sets up some internal data, then
5665  * notifies host via "initialize alive" that it is complete.
5666  *
5667  * The host then replaces the BSM_DRAM_* pointer values to point to the
5668  * normal runtime uCode instructions and a backup uCode data cache buffer
5669  * (filled initially with starting data values for the on-board processor),
5670  * then triggers the "initialize" uCode to load and launch the runtime uCode,
5671  * which begins normal operation.
5672  *
5673  * When doing a power-save shutdown, runtime uCode saves data SRAM into
5674  * the backup data cache in DRAM before SRAM is powered down.
5675  *
5676  * When powering back up, the BSM loads the bootstrap program.  This reloads
5677  * the runtime uCode instructions and the backup data cache into SRAM,
5678  * and re-launches the runtime uCode from where it left off.
5679  */
5680 static int iwl_load_bsm(struct iwl_priv *priv)
5681 {
5682         __le32 *image = priv->ucode_boot.v_addr;
5683         u32 len = priv->ucode_boot.len;
5684         dma_addr_t pinst;
5685         dma_addr_t pdata;
5686         u32 inst_len;
5687         u32 data_len;
5688         int rc;
5689         int i;
5690         u32 done;
5691         u32 reg_offset;
5692
5693         IWL_DEBUG_INFO("Begin load bsm\n");
5694
5695         /* make sure bootstrap program is no larger than BSM's SRAM size */
5696         if (len > IWL_MAX_BSM_SIZE)
5697                 return -EINVAL;
5698
5699         /* Tell bootstrap uCode where to find the "Initialize" uCode
5700          *   in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965.
5701          * NOTE:  iwl_initialize_alive_start() will replace these values,
5702          *        after the "initialize" uCode has run, to point to
5703          *        runtime/protocol instructions and backup data cache. */
5704         pinst = priv->ucode_init.p_addr;
5705         pdata = priv->ucode_init_data.p_addr;
5706         inst_len = priv->ucode_init.len;
5707         data_len = priv->ucode_init_data.len;
5708
5709         rc = iwl_grab_restricted_access(priv);
5710         if (rc)
5711                 return rc;
5712
5713         iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
5714         iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
5715         iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
5716         iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
5717
5718         /* Fill BSM memory with bootstrap instructions */
5719         for (reg_offset = BSM_SRAM_LOWER_BOUND;
5720              reg_offset < BSM_SRAM_LOWER_BOUND + len;
5721              reg_offset += sizeof(u32), image++)
5722                 _iwl_write_restricted_reg(priv, reg_offset,
5723                                           le32_to_cpu(*image));
5724
5725         rc = iwl_verify_bsm(priv);
5726         if (rc) {
5727                 iwl_release_restricted_access(priv);
5728                 return rc;
5729         }
5730
5731         /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
5732         iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0);
5733         iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG,
5734                                  RTC_INST_LOWER_BOUND);
5735         iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
5736
5737         /* Load bootstrap code into instruction SRAM now,
5738          *   to prepare to load "initialize" uCode */
5739         iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
5740                 BSM_WR_CTRL_REG_BIT_START);
5741
5742         /* Wait for load of bootstrap uCode to finish */
5743         for (i = 0; i < 100; i++) {
5744                 done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG);
5745                 if (!(done & BSM_WR_CTRL_REG_BIT_START))
5746                         break;
5747                 udelay(10);
5748         }
5749         if (i < 100)
5750                 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
5751         else {
5752                 IWL_ERROR("BSM write did not complete!\n");
5753                 return -EIO;
5754         }
5755
5756         /* Enable future boot loads whenever power management unit triggers it
5757          *   (e.g. when powering back up after power-save shutdown) */
5758         iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG,
5759                 BSM_WR_CTRL_REG_BIT_START_EN);
5760
5761         iwl_release_restricted_access(priv);
5762
5763         return 0;
5764 }
5765
5766 static void iwl_nic_start(struct iwl_priv *priv)
5767 {
5768         /* Remove all resets to allow NIC to operate */
5769         iwl_write32(priv, CSR_RESET, 0);
5770 }
5771
5772 /**
5773  * iwl_read_ucode - Read uCode images from disk file.
5774  *
5775  * Copy into buffers for card to fetch via bus-mastering
5776  */
5777 static int iwl_read_ucode(struct iwl_priv *priv)
5778 {
5779         struct iwl_ucode *ucode;
5780         int rc = 0;
5781         const struct firmware *ucode_raw;
5782         /* firmware file name contains uCode/driver compatibility version */
5783         const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode";
5784         u8 *src;
5785         size_t len;
5786         u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
5787
5788         /* Ask kernel firmware_class module to get the boot firmware off disk.
5789          * request_firmware() is synchronous, file is in memory on return. */
5790         rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev);
5791         if (rc < 0) {
5792                 IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc);
5793                 goto error;
5794         }
5795
5796         IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5797                        name, ucode_raw->size);
5798
5799         /* Make sure that we got at least our header! */
5800         if (ucode_raw->size < sizeof(*ucode)) {
5801                 IWL_ERROR("File size way too small!\n");
5802                 rc = -EINVAL;
5803                 goto err_release;
5804         }
5805
5806         /* Data from ucode file:  header followed by uCode images */
5807         ucode = (void *)ucode_raw->data;
5808
5809         ver = le32_to_cpu(ucode->ver);
5810         inst_size = le32_to_cpu(ucode->inst_size);
5811         data_size = le32_to_cpu(ucode->data_size);
5812         init_size = le32_to_cpu(ucode->init_size);
5813         init_data_size = le32_to_cpu(ucode->init_data_size);
5814         boot_size = le32_to_cpu(ucode->boot_size);
5815
5816         IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver);
5817         IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
5818                        inst_size);
5819         IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
5820                        data_size);
5821         IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n",
5822                        init_size);
5823         IWL_DEBUG_INFO("f/w package hdr init data size = %u\n",
5824                        init_data_size);
5825         IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n",
5826                        boot_size);
5827
5828         /* Verify size of file vs. image size info in file's header */
5829         if (ucode_raw->size < sizeof(*ucode) +
5830                 inst_size + data_size + init_size +
5831                 init_data_size + boot_size) {
5832
5833                 IWL_DEBUG_INFO("uCode file size %d too small\n",
5834                                (int)ucode_raw->size);
5835                 rc = -EINVAL;
5836                 goto err_release;
5837         }
5838
5839         /* Verify that uCode images will fit in card's SRAM */
5840         if (inst_size > IWL_MAX_INST_SIZE) {
5841                 IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n",
5842                                (int)inst_size);
5843                 rc = -EINVAL;
5844                 goto err_release;
5845         }
5846
5847         if (data_size > IWL_MAX_DATA_SIZE) {
5848                 IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n",
5849                                (int)data_size);
5850                 rc = -EINVAL;
5851                 goto err_release;
5852         }
5853         if (init_size > IWL_MAX_INST_SIZE) {
5854                 IWL_DEBUG_INFO
5855                     ("uCode init instr len %d too large to fit in card\n",
5856                      (int)init_size);
5857                 rc = -EINVAL;
5858                 goto err_release;
5859         }
5860         if (init_data_size > IWL_MAX_DATA_SIZE) {
5861                 IWL_DEBUG_INFO
5862                     ("uCode init data len %d too large to fit in card\n",
5863                      (int)init_data_size);
5864                 rc = -EINVAL;
5865                 goto err_release;
5866         }
5867         if (boot_size > IWL_MAX_BSM_SIZE) {
5868                 IWL_DEBUG_INFO
5869                     ("uCode boot instr len %d too large to fit in bsm\n",
5870                      (int)boot_size);
5871                 rc = -EINVAL;
5872                 goto err_release;
5873         }
5874
5875         /* Allocate ucode buffers for card's bus-master loading ... */
5876
5877         /* Runtime instructions and 2 copies of data:
5878          * 1) unmodified from disk
5879          * 2) backup cache for save/restore during power-downs */
5880         priv->ucode_code.len = inst_size;
5881         priv->ucode_code.v_addr =
5882             pci_alloc_consistent(priv->pci_dev,
5883                                  priv->ucode_code.len,
5884                                  &(priv->ucode_code.p_addr));
5885
5886         priv->ucode_data.len = data_size;
5887         priv->ucode_data.v_addr =
5888             pci_alloc_consistent(priv->pci_dev,
5889                                  priv->ucode_data.len,
5890                                  &(priv->ucode_data.p_addr));
5891
5892         priv->ucode_data_backup.len = data_size;
5893         priv->ucode_data_backup.v_addr =
5894             pci_alloc_consistent(priv->pci_dev,
5895                                  priv->ucode_data_backup.len,
5896                                  &(priv->ucode_data_backup.p_addr));
5897
5898
5899         /* Initialization instructions and data */
5900         priv->ucode_init.len = init_size;
5901         priv->ucode_init.v_addr =
5902             pci_alloc_consistent(priv->pci_dev,
5903                                  priv->ucode_init.len,
5904                                  &(priv->ucode_init.p_addr));
5905
5906         priv->ucode_init_data.len = init_data_size;
5907         priv->ucode_init_data.v_addr =
5908             pci_alloc_consistent(priv->pci_dev,
5909                                  priv->ucode_init_data.len,
5910                                  &(priv->ucode_init_data.p_addr));
5911
5912         /* Bootstrap (instructions only, no data) */
5913         priv->ucode_boot.len = boot_size;
5914         priv->ucode_boot.v_addr =
5915             pci_alloc_consistent(priv->pci_dev,
5916                                  priv->ucode_boot.len,
5917                                  &(priv->ucode_boot.p_addr));
5918
5919         if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
5920             !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr ||
5921             !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr)
5922                 goto err_pci_alloc;
5923
5924         /* Copy images into buffers for card's bus-master reads ... */
5925
5926         /* Runtime instructions (first block of data in file) */
5927         src = &ucode->data[0];
5928         len = priv->ucode_code.len;
5929         IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n",
5930                        (int)len);
5931         memcpy(priv->ucode_code.v_addr, src, len);
5932         IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
5933                 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
5934
5935         /* Runtime data (2nd block)
5936          * NOTE:  Copy into backup buffer will be done in iwl_up()  */
5937         src = &ucode->data[inst_size];
5938         len = priv->ucode_data.len;
5939         IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n",
5940                        (int)len);
5941         memcpy(priv->ucode_data.v_addr, src, len);
5942         memcpy(priv->ucode_data_backup.v_addr, src, len);
5943
5944         /* Initialization instructions (3rd block) */
5945         if (init_size) {
5946                 src = &ucode->data[inst_size + data_size];
5947                 len = priv->ucode_init.len;
5948                 IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n",
5949                                (int)len);
5950                 memcpy(priv->ucode_init.v_addr, src, len);
5951         }
5952
5953         /* Initialization data (4th block) */
5954         if (init_data_size) {
5955                 src = &ucode->data[inst_size + data_size + init_size];
5956                 len = priv->ucode_init_data.len;
5957                 IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n",
5958                                (int)len);
5959                 memcpy(priv->ucode_init_data.v_addr, src, len);
5960         }
5961
5962         /* Bootstrap instructions (5th block) */
5963         src = &ucode->data[inst_size + data_size + init_size + init_data_size];
5964         len = priv->ucode_boot.len;
5965         IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n",
5966                        (int)len);
5967         memcpy(priv->ucode_boot.v_addr, src, len);
5968
5969         /* We have our copies now, allow OS release its copies */
5970         release_firmware(ucode_raw);
5971         return 0;
5972
5973  err_pci_alloc:
5974         IWL_ERROR("failed to allocate pci memory\n");
5975         rc = -ENOMEM;
5976         iwl_dealloc_ucode_pci(priv);
5977
5978  err_release:
5979         release_firmware(ucode_raw);
5980
5981  error:
5982         return rc;
5983 }
5984
5985
5986 /**
5987  * iwl_set_ucode_ptrs - Set uCode address location
5988  *
5989  * Tell initialization uCode where to find runtime uCode.
5990  *
5991  * BSM registers initially contain pointers to initialization uCode.
5992  * We need to replace them to load runtime uCode inst and data,
5993  * and to save runtime data when powering down.
5994  */
5995 static int iwl_set_ucode_ptrs(struct iwl_priv *priv)
5996 {
5997         dma_addr_t pinst;
5998         dma_addr_t pdata;
5999         int rc = 0;
6000         unsigned long flags;
6001
6002         /* bits 31:0 for 3945 */
6003         pinst = priv->ucode_code.p_addr;
6004         pdata = priv->ucode_data_backup.p_addr;
6005
6006         spin_lock_irqsave(&priv->lock, flags);
6007         rc = iwl_grab_restricted_access(priv);
6008         if (rc) {
6009                 spin_unlock_irqrestore(&priv->lock, flags);
6010                 return rc;
6011         }
6012
6013         /* Tell bootstrap uCode where to find image to load */
6014         iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst);
6015         iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6016         iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
6017                                  priv->ucode_data.len);
6018
6019         /* Inst bytecount must be last to set up, bit 31 signals uCode
6020          *   that all new ptr/size info is in place */
6021         iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG,
6022                                  priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6023
6024         iwl_release_restricted_access(priv);
6025
6026         spin_unlock_irqrestore(&priv->lock, flags);
6027
6028         IWL_DEBUG_INFO("Runtime uCode pointers are set.\n");
6029
6030         return rc;
6031 }
6032
6033 /**
6034  * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved
6035  *
6036  * Called after REPLY_ALIVE notification received from "initialize" uCode.
6037  *
6038  * The 4965 "initialize" ALIVE reply contains calibration data for:
6039  *   Voltage, temperature, and MIMO tx gain correction, now stored in priv
6040  *   (3945 does not contain this data).
6041  *
6042  * Tell &q