[IWLWIFI]: add iwlwifi wireless drivers
[linux-2.6.git] / drivers / net / wireless / iwlwifi / iwl4965-base.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29
30 /*
31  * NOTE:  This file (iwl-base.c) is used to build to multiple hardware targets
32  * by defining IWL to either 3945 or 4965.  The Makefile used when building
33  * the base targets will create base-3945.o and base-4965.o
34  *
35  * The eventual goal is to move as many of the #if IWL / #endif blocks out of
36  * this file and into the hardware specific implementation files (iwl-XXXX.c)
37  * and leave only the common (non #ifdef sprinkled) code in this file
38  */
39
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/version.h>
43 #include <linux/init.h>
44 #include <linux/pci.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/delay.h>
47 #include <linux/skbuff.h>
48 #include <linux/netdevice.h>
49 #include <linux/wireless.h>
50 #include <linux/firmware.h>
51 #include <linux/skbuff.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/if_arp.h>
55
56 #include <net/ieee80211_radiotap.h>
57 #include <net/mac80211.h>
58
59 #include <asm/div64.h>
60
61 #include "iwlwifi.h"
62 #include "iwl-4965.h"
63 #include "iwl-helpers.h"
64
65 #ifdef CONFIG_IWLWIFI_DEBUG
66 u32 iwl_debug_level;
67 #endif
68
69 /******************************************************************************
70  *
71  * module boiler plate
72  *
73  ******************************************************************************/
74
75 /* module parameters */
76 int iwl_param_disable_hw_scan;
77 int iwl_param_debug;
78 int iwl_param_disable;      /* def: enable radio */
79 int iwl_param_antenna;      /* def: 0 = both antennas (use diversity) */
80 int iwl_param_hwcrypto;     /* def: using software encryption */
81 int iwl_param_qos_enable = 1;
82 int iwl_param_queues_num = IWL_MAX_NUM_QUEUES;
83
84 /*
85  * module name, copyright, version, etc.
86  * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
87  */
88
89 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
90
91 #ifdef CONFIG_IWLWIFI_DEBUG
92 #define VD "d"
93 #else
94 #define VD
95 #endif
96
97 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
98 #define VS "s"
99 #else
100 #define VS
101 #endif
102
103 #define IWLWIFI_VERSION "0.1.15k" VD VS
104 #define DRV_COPYRIGHT   "Copyright(c) 2003-2007 Intel Corporation"
105 #define DRV_VERSION     IWLWIFI_VERSION
106
107 /* Change firmware file name, using "-" and incrementing number,
108  *   *only* when uCode interface or architecture changes so that it
109  *   is not compatible with earlier drivers.
110  * This number will also appear in << 8 position of 1st dword of uCode file */
111 #define IWL4965_UCODE_API "-1"
112
113 MODULE_DESCRIPTION(DRV_DESCRIPTION);
114 MODULE_VERSION(DRV_VERSION);
115 MODULE_AUTHOR(DRV_COPYRIGHT);
116 MODULE_LICENSE("GPL");
117
118 __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
119 {
120         u16 fc = le16_to_cpu(hdr->frame_control);
121         int hdr_len = ieee80211_get_hdrlen(fc);
122
123         if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
124                 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
125         return NULL;
126 }
127
128 static const struct ieee80211_hw_mode *iwl_get_hw_mode(
129                 struct iwl_priv *priv, int mode)
130 {
131         int i;
132
133         for (i = 0; i < 3; i++)
134                 if (priv->modes[i].mode == mode)
135                         return &priv->modes[i];
136
137         return NULL;
138 }
139
140 static int iwl_is_empty_essid(const char *essid, int essid_len)
141 {
142         /* Single white space is for Linksys APs */
143         if (essid_len == 1 && essid[0] == ' ')
144                 return 1;
145
146         /* Otherwise, if the entire essid is 0, we assume it is hidden */
147         while (essid_len) {
148                 essid_len--;
149                 if (essid[essid_len] != '\0')
150                         return 0;
151         }
152
153         return 1;
154 }
155
156 static const char *iwl_escape_essid(const char *essid, u8 essid_len)
157 {
158         static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
159         const char *s = essid;
160         char *d = escaped;
161
162         if (iwl_is_empty_essid(essid, essid_len)) {
163                 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
164                 return escaped;
165         }
166
167         essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
168         while (essid_len--) {
169                 if (*s == '\0') {
170                         *d++ = '\\';
171                         *d++ = '0';
172                         s++;
173                 } else
174                         *d++ = *s++;
175         }
176         *d = '\0';
177         return escaped;
178 }
179
180 static void iwl_print_hex_dump(int level, void *p, u32 len)
181 {
182 #ifdef CONFIG_IWLWIFI_DEBUG
183         if (!(iwl_debug_level & level))
184                 return;
185
186         print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
187                         p, len, 1);
188 #endif
189 }
190
191 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
192  * DMA services
193  *
194  * Theory of operation
195  *
196  * A queue is a circular buffers with 'Read' and 'Write' pointers.
197  * 2 empty entries always kept in the buffer to protect from overflow.
198  *
199  * For Tx queue, there are low mark and high mark limits. If, after queuing
200  * the packet for Tx, free space become < low mark, Tx queue stopped. When
201  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
202  * Tx queue resumed.
203  *
204  * The IPW operates with six queues, one receive queue in the device's
205  * sram, one transmit queue for sending commands to the device firmware,
206  * and four transmit queues for data.
207  ***************************************************/
208
209 static int iwl_queue_space(const struct iwl_queue *q)
210 {
211         int s = q->last_used - q->first_empty;
212
213         if (q->last_used > q->first_empty)
214                 s -= q->n_bd;
215
216         if (s <= 0)
217                 s += q->n_window;
218         /* keep some reserve to not confuse empty and full situations */
219         s -= 2;
220         if (s < 0)
221                 s = 0;
222         return s;
223 }
224
225 /* XXX: n_bd must be power-of-two size */
226 static inline int iwl_queue_inc_wrap(int index, int n_bd)
227 {
228         return ++index & (n_bd - 1);
229 }
230
231 /* XXX: n_bd must be power-of-two size */
232 static inline int iwl_queue_dec_wrap(int index, int n_bd)
233 {
234         return --index & (n_bd - 1);
235 }
236
237 static inline int x2_queue_used(const struct iwl_queue *q, int i)
238 {
239         return q->first_empty > q->last_used ?
240                 (i >= q->last_used && i < q->first_empty) :
241                 !(i < q->last_used && i >= q->first_empty);
242 }
243
244 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
245 {
246         if (is_huge)
247                 return q->n_window;
248
249         return index & (q->n_window - 1);
250 }
251
252 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
253                           int count, int slots_num, u32 id)
254 {
255         q->n_bd = count;
256         q->n_window = slots_num;
257         q->id = id;
258
259         /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
260          * and iwl_queue_dec_wrap are broken. */
261         BUG_ON(!is_power_of_2(count));
262
263         /* slots_num must be power-of-two size, otherwise
264          * get_cmd_index is broken. */
265         BUG_ON(!is_power_of_2(slots_num));
266
267         q->low_mark = q->n_window / 4;
268         if (q->low_mark < 4)
269                 q->low_mark = 4;
270
271         q->high_mark = q->n_window / 8;
272         if (q->high_mark < 2)
273                 q->high_mark = 2;
274
275         q->first_empty = q->last_used = 0;
276
277         return 0;
278 }
279
280 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
281                               struct iwl_tx_queue *txq, u32 id)
282 {
283         struct pci_dev *dev = priv->pci_dev;
284
285         if (id != IWL_CMD_QUEUE_NUM) {
286                 txq->txb = kmalloc(sizeof(txq->txb[0]) *
287                                    TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
288                 if (!txq->txb) {
289                         IWL_ERROR("kmalloc for auxilary BD "
290                                   "structures failed\n");
291                         goto error;
292                 }
293         } else
294                 txq->txb = NULL;
295
296         txq->bd = pci_alloc_consistent(dev,
297                         sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
298                         &txq->q.dma_addr);
299
300         if (!txq->bd) {
301                 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
302                           sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
303                 goto error;
304         }
305         txq->q.id = id;
306
307         return 0;
308
309  error:
310         if (txq->txb) {
311                 kfree(txq->txb);
312                 txq->txb = NULL;
313         }
314
315         return -ENOMEM;
316 }
317
318 int iwl_tx_queue_init(struct iwl_priv *priv,
319                       struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
320 {
321         struct pci_dev *dev = priv->pci_dev;
322         int len;
323         int rc = 0;
324
325         /* alocate command space + one big command for scan since scan
326          * command is very huge the system will not have two scan at the
327          * same time */
328         len = sizeof(struct iwl_cmd) * slots_num;
329         if (txq_id == IWL_CMD_QUEUE_NUM)
330                 len +=  IWL_MAX_SCAN_SIZE;
331         txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
332         if (!txq->cmd)
333                 return -ENOMEM;
334
335         rc = iwl_tx_queue_alloc(priv, txq, txq_id);
336         if (rc) {
337                 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
338
339                 return -ENOMEM;
340         }
341         txq->need_update = 0;
342
343         /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
344          * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
345         BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
346         iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
347
348         iwl_hw_tx_queue_init(priv, txq);
349
350         return 0;
351 }
352
353 /**
354  * iwl_tx_queue_free - Deallocate DMA queue.
355  * @txq: Transmit queue to deallocate.
356  *
357  * Empty queue by removing and destroying all BD's.
358  * Free all buffers.  txq itself is not freed.
359  *
360  */
361 void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
362 {
363         struct iwl_queue *q = &txq->q;
364         struct pci_dev *dev = priv->pci_dev;
365         int len;
366
367         if (q->n_bd == 0)
368                 return;
369
370         /* first, empty all BD's */
371         for (; q->first_empty != q->last_used;
372              q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd))
373                 iwl_hw_txq_free_tfd(priv, txq);
374
375         len = sizeof(struct iwl_cmd) * q->n_window;
376         if (q->id == IWL_CMD_QUEUE_NUM)
377                 len += IWL_MAX_SCAN_SIZE;
378
379         pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
380
381         /* free buffers belonging to queue itself */
382         if (txq->q.n_bd)
383                 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
384                                     txq->q.n_bd, txq->bd, txq->q.dma_addr);
385
386         if (txq->txb) {
387                 kfree(txq->txb);
388                 txq->txb = NULL;
389         }
390
391         /* 0 fill whole structure */
392         memset(txq, 0, sizeof(*txq));
393 }
394
395 const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
396
397 /*************** STATION TABLE MANAGEMENT ****
398  *
399  * NOTE:  This needs to be overhauled to better synchronize between
400  * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c
401  *
402  * mac80211 should also be examined to determine if sta_info is duplicating
403  * the functionality provided here
404  */
405
406 /**************************************************************/
407
408 static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
409 {
410         int index = IWL_INVALID_STATION;
411         int i;
412         unsigned long flags;
413
414         spin_lock_irqsave(&priv->sta_lock, flags);
415
416         if (is_ap)
417                 index = IWL_AP_ID;
418         else if (is_broadcast_ether_addr(addr))
419                 index = priv->hw_setting.bcast_sta_id;
420         else
421                 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
422                         if (priv->stations[i].used &&
423                             !compare_ether_addr(priv->stations[i].sta.sta.addr,
424                                                 addr)) {
425                                 index = i;
426                                 break;
427                         }
428
429         if (unlikely(index == IWL_INVALID_STATION))
430                 goto out;
431
432         if (priv->stations[index].used) {
433                 priv->stations[index].used = 0;
434                 priv->num_stations--;
435         }
436
437         BUG_ON(priv->num_stations < 0);
438
439 out:
440         spin_unlock_irqrestore(&priv->sta_lock, flags);
441         return 0;
442 }
443
444 static void iwl_clear_stations_table(struct iwl_priv *priv)
445 {
446         unsigned long flags;
447
448         spin_lock_irqsave(&priv->sta_lock, flags);
449
450         priv->num_stations = 0;
451         memset(priv->stations, 0, sizeof(priv->stations));
452
453         spin_unlock_irqrestore(&priv->sta_lock, flags);
454 }
455
456 u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
457 {
458         int i;
459         int index = IWL_INVALID_STATION;
460         struct iwl_station_entry *station;
461         unsigned long flags_spin;
462
463         spin_lock_irqsave(&priv->sta_lock, flags_spin);
464         if (is_ap)
465                 index = IWL_AP_ID;
466         else if (is_broadcast_ether_addr(addr))
467                 index = priv->hw_setting.bcast_sta_id;
468         else
469                 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
470                         if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
471                                                 addr)) {
472                                 index = i;
473                                 break;
474                         }
475
476                         if (!priv->stations[i].used &&
477                             index == IWL_INVALID_STATION)
478                                 index = i;
479                 }
480
481
482         /* These twh conditions has the same outcome but keep them separate
483           since they have different meaning */
484         if (unlikely(index == IWL_INVALID_STATION)) {
485                 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
486                 return index;
487         }
488
489         if (priv->stations[index].used &&
490             !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
491                 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
492                 return index;
493         }
494
495
496         IWL_DEBUG_ASSOC("Add STA ID %d: " MAC_FMT "\n", index, MAC_ARG(addr));
497         station = &priv->stations[index];
498         station->used = 1;
499         priv->num_stations++;
500
501         memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
502         memcpy(station->sta.sta.addr, addr, ETH_ALEN);
503         station->sta.mode = 0;
504         station->sta.sta.sta_id = index;
505         station->sta.station_flags = 0;
506
507 #ifdef CONFIG_IWLWIFI_HT
508         /* BCAST station and IBSS stations do not work in HT mode */
509         if (index != priv->hw_setting.bcast_sta_id &&
510             priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
511                 iwl4965_set_ht_add_station(priv, index);
512 #endif /*CONFIG_IWLWIFI_HT*/
513
514         spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
515         iwl_send_add_station(priv, &station->sta, flags);
516         return index;
517
518 }
519
520 /*************** DRIVER STATUS FUNCTIONS   *****/
521
522 static inline int iwl_is_ready(struct iwl_priv *priv)
523 {
524         /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
525          * set but EXIT_PENDING is not */
526         return test_bit(STATUS_READY, &priv->status) &&
527                test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
528                !test_bit(STATUS_EXIT_PENDING, &priv->status);
529 }
530
531 static inline int iwl_is_alive(struct iwl_priv *priv)
532 {
533         return test_bit(STATUS_ALIVE, &priv->status);
534 }
535
536 static inline int iwl_is_init(struct iwl_priv *priv)
537 {
538         return test_bit(STATUS_INIT, &priv->status);
539 }
540
541 static inline int iwl_is_rfkill(struct iwl_priv *priv)
542 {
543         return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
544                test_bit(STATUS_RF_KILL_SW, &priv->status);
545 }
546
547 static inline int iwl_is_ready_rf(struct iwl_priv *priv)
548 {
549
550         if (iwl_is_rfkill(priv))
551                 return 0;
552
553         return iwl_is_ready(priv);
554 }
555
556 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
557
558 #define IWL_CMD(x) case x : return #x
559
560 static const char *get_cmd_string(u8 cmd)
561 {
562         switch (cmd) {
563                 IWL_CMD(REPLY_ALIVE);
564                 IWL_CMD(REPLY_ERROR);
565                 IWL_CMD(REPLY_RXON);
566                 IWL_CMD(REPLY_RXON_ASSOC);
567                 IWL_CMD(REPLY_QOS_PARAM);
568                 IWL_CMD(REPLY_RXON_TIMING);
569                 IWL_CMD(REPLY_ADD_STA);
570                 IWL_CMD(REPLY_REMOVE_STA);
571                 IWL_CMD(REPLY_REMOVE_ALL_STA);
572                 IWL_CMD(REPLY_TX);
573                 IWL_CMD(REPLY_RATE_SCALE);
574                 IWL_CMD(REPLY_LEDS_CMD);
575                 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
576                 IWL_CMD(RADAR_NOTIFICATION);
577                 IWL_CMD(REPLY_QUIET_CMD);
578                 IWL_CMD(REPLY_CHANNEL_SWITCH);
579                 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
580                 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
581                 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
582                 IWL_CMD(POWER_TABLE_CMD);
583                 IWL_CMD(PM_SLEEP_NOTIFICATION);
584                 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
585                 IWL_CMD(REPLY_SCAN_CMD);
586                 IWL_CMD(REPLY_SCAN_ABORT_CMD);
587                 IWL_CMD(SCAN_START_NOTIFICATION);
588                 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
589                 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
590                 IWL_CMD(BEACON_NOTIFICATION);
591                 IWL_CMD(REPLY_TX_BEACON);
592                 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
593                 IWL_CMD(QUIET_NOTIFICATION);
594                 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
595                 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
596                 IWL_CMD(REPLY_BT_CONFIG);
597                 IWL_CMD(REPLY_STATISTICS_CMD);
598                 IWL_CMD(STATISTICS_NOTIFICATION);
599                 IWL_CMD(REPLY_CARD_STATE_CMD);
600                 IWL_CMD(CARD_STATE_NOTIFICATION);
601                 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
602                 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
603                 IWL_CMD(SENSITIVITY_CMD);
604                 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
605                 IWL_CMD(REPLY_RX_PHY_CMD);
606                 IWL_CMD(REPLY_RX_MPDU_CMD);
607                 IWL_CMD(REPLY_4965_RX);
608                 IWL_CMD(REPLY_COMPRESSED_BA);
609         default:
610                 return "UNKNOWN";
611
612         }
613 }
614
615 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
616
617 /**
618  * iwl_enqueue_hcmd - enqueue a uCode command
619  * @priv: device private data point
620  * @cmd: a point to the ucode command structure
621  *
622  * The function returns < 0 values to indicate the operation is
623  * failed. On success, it turns the index (> 0) of command in the
624  * command queue.
625  */
626 static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
627 {
628         struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
629         struct iwl_queue *q = &txq->q;
630         struct iwl_tfd_frame *tfd;
631         u32 *control_flags;
632         struct iwl_cmd *out_cmd;
633         u32 idx;
634         u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
635         dma_addr_t phys_addr;
636         int ret;
637         unsigned long flags;
638
639         /* If any of the command structures end up being larger than
640          * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
641          * we will need to increase the size of the TFD entries */
642         BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
643                !(cmd->meta.flags & CMD_SIZE_HUGE));
644
645         if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
646                 IWL_ERROR("No space for Tx\n");
647                 return -ENOSPC;
648         }
649
650         spin_lock_irqsave(&priv->hcmd_lock, flags);
651
652         tfd = &txq->bd[q->first_empty];
653         memset(tfd, 0, sizeof(*tfd));
654
655         control_flags = (u32 *) tfd;
656
657         idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE);
658         out_cmd = &txq->cmd[idx];
659
660         out_cmd->hdr.cmd = cmd->id;
661         memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
662         memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
663
664         /* At this point, the out_cmd now has all of the incoming cmd
665          * information */
666
667         out_cmd->hdr.flags = 0;
668         out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
669                         INDEX_TO_SEQ(q->first_empty));
670         if (out_cmd->meta.flags & CMD_SIZE_HUGE)
671                 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
672
673         phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
674                         offsetof(struct iwl_cmd, hdr);
675         iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
676
677         IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
678                      "%d bytes at %d[%d]:%d\n",
679                      get_cmd_string(out_cmd->hdr.cmd),
680                      out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
681                      fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM);
682
683         txq->need_update = 1;
684         ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
685         q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
686         iwl_tx_queue_update_write_ptr(priv, txq);
687
688         spin_unlock_irqrestore(&priv->hcmd_lock, flags);
689         return ret ? ret : idx;
690 }
691
692 int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
693 {
694         int ret;
695
696         BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
697
698         /* An asynchronous command can not expect an SKB to be set. */
699         BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
700
701         /* An asynchronous command MUST have a callback. */
702         BUG_ON(!cmd->meta.u.callback);
703
704         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
705                 return -EBUSY;
706
707         ret = iwl_enqueue_hcmd(priv, cmd);
708         if (ret < 0) {
709                 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
710                           get_cmd_string(cmd->id), ret);
711                 return ret;
712         }
713         return 0;
714 }
715
716 int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
717 {
718         int cmd_idx;
719         int ret;
720         static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
721
722         BUG_ON(cmd->meta.flags & CMD_ASYNC);
723
724          /* A synchronous command can not have a callback set. */
725         BUG_ON(cmd->meta.u.callback != NULL);
726
727         if (atomic_xchg(&entry, 1)) {
728                 IWL_ERROR("Error sending %s: Already sending a host command\n",
729                           get_cmd_string(cmd->id));
730                 return -EBUSY;
731         }
732
733         set_bit(STATUS_HCMD_ACTIVE, &priv->status);
734
735         if (cmd->meta.flags & CMD_WANT_SKB)
736                 cmd->meta.source = &cmd->meta;
737
738         cmd_idx = iwl_enqueue_hcmd(priv, cmd);
739         if (cmd_idx < 0) {
740                 ret = cmd_idx;
741                 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
742                           get_cmd_string(cmd->id), ret);
743                 goto out;
744         }
745
746         ret = wait_event_interruptible_timeout(priv->wait_command_queue,
747                         !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
748                         HOST_COMPLETE_TIMEOUT);
749         if (!ret) {
750                 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
751                         IWL_ERROR("Error sending %s: time out after %dms.\n",
752                                   get_cmd_string(cmd->id),
753                                   jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
754
755                         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
756                         ret = -ETIMEDOUT;
757                         goto cancel;
758                 }
759         }
760
761         if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
762                 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
763                                get_cmd_string(cmd->id));
764                 ret = -ECANCELED;
765                 goto fail;
766         }
767         if (test_bit(STATUS_FW_ERROR, &priv->status)) {
768                 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
769                                get_cmd_string(cmd->id));
770                 ret = -EIO;
771                 goto fail;
772         }
773         if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
774                 IWL_ERROR("Error: Response NULL in '%s'\n",
775                           get_cmd_string(cmd->id));
776                 ret = -EIO;
777                 goto out;
778         }
779
780         ret = 0;
781         goto out;
782
783 cancel:
784         if (cmd->meta.flags & CMD_WANT_SKB) {
785                 struct iwl_cmd *qcmd;
786
787                 /* Cancel the CMD_WANT_SKB flag for the cmd in the
788                  * TX cmd queue. Otherwise in case the cmd comes
789                  * in later, it will possibly set an invalid
790                  * address (cmd->meta.source). */
791                 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
792                 qcmd->meta.flags &= ~CMD_WANT_SKB;
793         }
794 fail:
795         if (cmd->meta.u.skb) {
796                 dev_kfree_skb_any(cmd->meta.u.skb);
797                 cmd->meta.u.skb = NULL;
798         }
799 out:
800         atomic_set(&entry, 0);
801         return ret;
802 }
803
804 int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
805 {
806         /* A command can not be asynchronous AND expect an SKB to be set. */
807         BUG_ON((cmd->meta.flags & CMD_ASYNC) &&
808                (cmd->meta.flags & CMD_WANT_SKB));
809
810         if (cmd->meta.flags & CMD_ASYNC)
811                 return iwl_send_cmd_async(priv, cmd);
812
813         return iwl_send_cmd_sync(priv, cmd);
814 }
815
816 int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
817 {
818         struct iwl_host_cmd cmd = {
819                 .id = id,
820                 .len = len,
821                 .data = data,
822         };
823
824         return iwl_send_cmd_sync(priv, &cmd);
825 }
826
827 static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
828 {
829         struct iwl_host_cmd cmd = {
830                 .id = id,
831                 .len = sizeof(val),
832                 .data = &val,
833         };
834
835         return iwl_send_cmd_sync(priv, &cmd);
836 }
837
838 int iwl_send_statistics_request(struct iwl_priv *priv)
839 {
840         return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
841 }
842
843 /**
844  * iwl_rxon_add_station - add station into station table.
845  *
846  * there is only one AP station with id= IWL_AP_ID
847  * NOTE: mutex must be held before calling the this fnction
848 */
849 static int iwl_rxon_add_station(struct iwl_priv *priv,
850                                 const u8 *addr, int is_ap)
851 {
852         u8 rc;
853
854         /* Remove this station if it happens to already exist */
855         iwl_remove_station(priv, addr, is_ap);
856
857         rc = iwl_add_station(priv, addr, is_ap, 0);
858
859         iwl4965_add_station(priv, addr, is_ap);
860
861         return rc;
862 }
863
864 /**
865  * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
866  * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
867  * @channel: Any channel valid for the requested phymode
868
869  * In addition to setting the staging RXON, priv->phymode is also set.
870  *
871  * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
872  * in the staging RXON flag structure based on the phymode
873  */
874 static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel)
875 {
876         if (!iwl_get_channel_info(priv, phymode, channel)) {
877                 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
878                                channel, phymode);
879                 return -EINVAL;
880         }
881
882         if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
883             (priv->phymode == phymode))
884                 return 0;
885
886         priv->staging_rxon.channel = cpu_to_le16(channel);
887         if (phymode == MODE_IEEE80211A)
888                 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
889         else
890                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
891
892         priv->phymode = phymode;
893
894         IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
895
896         return 0;
897 }
898
899 /**
900  * iwl_check_rxon_cmd - validate RXON structure is valid
901  *
902  * NOTE:  This is really only useful during development and can eventually
903  * be #ifdef'd out once the driver is stable and folks aren't actively
904  * making changes
905  */
906 static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
907 {
908         int error = 0;
909         int counter = 1;
910
911         if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
912                 error |= le32_to_cpu(rxon->flags &
913                                 (RXON_FLG_TGJ_NARROW_BAND_MSK |
914                                  RXON_FLG_RADAR_DETECT_MSK));
915                 if (error)
916                         IWL_WARNING("check 24G fields %d | %d\n",
917                                     counter++, error);
918         } else {
919                 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
920                                 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
921                 if (error)
922                         IWL_WARNING("check 52 fields %d | %d\n",
923                                     counter++, error);
924                 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
925                 if (error)
926                         IWL_WARNING("check 52 CCK %d | %d\n",
927                                     counter++, error);
928         }
929         error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
930         if (error)
931                 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
932
933         /* make sure basic rates 6Mbps and 1Mbps are supported */
934         error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
935                   ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
936         if (error)
937                 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
938
939         error |= (le16_to_cpu(rxon->assoc_id) > 2007);
940         if (error)
941                 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
942
943         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
944                         == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
945         if (error)
946                 IWL_WARNING("check CCK and short slot %d | %d\n",
947                             counter++, error);
948
949         error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
950                         == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
951         if (error)
952                 IWL_WARNING("check CCK & auto detect %d | %d\n",
953                             counter++, error);
954
955         error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
956                         RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
957         if (error)
958                 IWL_WARNING("check TGG and auto detect %d | %d\n",
959                             counter++, error);
960
961         if (error)
962                 IWL_WARNING("Tuning to channel %d\n",
963                             le16_to_cpu(rxon->channel));
964
965         if (error) {
966                 IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n");
967                 return -1;
968         }
969         return 0;
970 }
971
972 /**
973  * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit
974  * @priv: staging_rxon is comapred to active_rxon
975  *
976  * If the RXON structure is changing sufficient to require a new
977  * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1
978  * to indicate a new tune is required.
979  */
980 static int iwl_full_rxon_required(struct iwl_priv *priv)
981 {
982
983         /* These items are only settable from the full RXON command */
984         if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
985             compare_ether_addr(priv->staging_rxon.bssid_addr,
986                                priv->active_rxon.bssid_addr) ||
987             compare_ether_addr(priv->staging_rxon.node_addr,
988                                priv->active_rxon.node_addr) ||
989             compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
990                                priv->active_rxon.wlap_bssid_addr) ||
991             (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
992             (priv->staging_rxon.channel != priv->active_rxon.channel) ||
993             (priv->staging_rxon.air_propagation !=
994              priv->active_rxon.air_propagation) ||
995             (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
996              priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
997             (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
998              priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
999             (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1000             (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1001                 return 1;
1002
1003         /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1004          * be updated with the RXON_ASSOC command -- however only some
1005          * flag transitions are allowed using RXON_ASSOC */
1006
1007         /* Check if we are not switching bands */
1008         if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1009             (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1010                 return 1;
1011
1012         /* Check if we are switching association toggle */
1013         if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1014                 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1015                 return 1;
1016
1017         return 0;
1018 }
1019
1020 static int iwl_send_rxon_assoc(struct iwl_priv *priv)
1021 {
1022         int rc = 0;
1023         struct iwl_rx_packet *res = NULL;
1024         struct iwl_rxon_assoc_cmd rxon_assoc;
1025         struct iwl_host_cmd cmd = {
1026                 .id = REPLY_RXON_ASSOC,
1027                 .len = sizeof(rxon_assoc),
1028                 .meta.flags = CMD_WANT_SKB,
1029                 .data = &rxon_assoc,
1030         };
1031         const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1032         const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1033
1034         if ((rxon1->flags == rxon2->flags) &&
1035             (rxon1->filter_flags == rxon2->filter_flags) &&
1036             (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1037             (rxon1->ofdm_ht_single_stream_basic_rates ==
1038              rxon2->ofdm_ht_single_stream_basic_rates) &&
1039             (rxon1->ofdm_ht_dual_stream_basic_rates ==
1040              rxon2->ofdm_ht_dual_stream_basic_rates) &&
1041             (rxon1->rx_chain == rxon2->rx_chain) &&
1042             (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1043                 IWL_DEBUG_INFO("Using current RXON_ASSOC.  Not resending.\n");
1044                 return 0;
1045         }
1046
1047         rxon_assoc.flags = priv->staging_rxon.flags;
1048         rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1049         rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1050         rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1051         rxon_assoc.reserved = 0;
1052         rxon_assoc.ofdm_ht_single_stream_basic_rates =
1053             priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1054         rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1055             priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1056         rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1057
1058         rc = iwl_send_cmd_sync(priv, &cmd);
1059         if (rc)
1060                 return rc;
1061
1062         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1063         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1064                 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1065                 rc = -EIO;
1066         }
1067
1068         priv->alloc_rxb_skb--;
1069         dev_kfree_skb_any(cmd.meta.u.skb);
1070
1071         return rc;
1072 }
1073
1074 /**
1075  * iwl_commit_rxon - commit staging_rxon to hardware
1076  *
1077  * The RXON command in staging_rxon is commited to the hardware and
1078  * the active_rxon structure is updated with the new data.  This
1079  * function correctly transitions out of the RXON_ASSOC_MSK state if
1080  * a HW tune is required based on the RXON structure changes.
1081  */
1082 static int iwl_commit_rxon(struct iwl_priv *priv)
1083 {
1084         /* cast away the const for active_rxon in this function */
1085         struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1086         int rc = 0;
1087
1088         if (!iwl_is_alive(priv))
1089                 return -1;
1090
1091         /* always get timestamp with Rx frame */
1092         priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1093
1094         rc = iwl_check_rxon_cmd(&priv->staging_rxon);
1095         if (rc) {
1096                 IWL_ERROR("Invalid RXON configuration.  Not committing.\n");
1097                 return -EINVAL;
1098         }
1099
1100         /* If we don't need to send a full RXON, we can use
1101          * iwl_rxon_assoc_cmd which is used to reconfigure filter
1102          * and other flags for the current radio configuration. */
1103         if (!iwl_full_rxon_required(priv)) {
1104                 rc = iwl_send_rxon_assoc(priv);
1105                 if (rc) {
1106                         IWL_ERROR("Error setting RXON_ASSOC "
1107                                   "configuration (%d).\n", rc);
1108                         return rc;
1109                 }
1110
1111                 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1112
1113                 return 0;
1114         }
1115
1116         /* station table will be cleared */
1117         priv->assoc_station_added = 0;
1118
1119 #ifdef CONFIG_IWLWIFI_SENSITIVITY
1120         priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1121         if (!priv->error_recovering)
1122                 priv->start_calib = 0;
1123
1124         iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
1125 #endif /* CONFIG_IWLWIFI_SENSITIVITY */
1126
1127         /* If we are currently associated and the new config requires
1128          * an RXON_ASSOC and the new config wants the associated mask enabled,
1129          * we must clear the associated from the active configuration
1130          * before we apply the new config */
1131         if (iwl_is_associated(priv) &&
1132             (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1133                 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1134                 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1135
1136                 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1137                                       sizeof(struct iwl_rxon_cmd),
1138                                       &priv->active_rxon);
1139
1140                 /* If the mask clearing failed then we set
1141                  * active_rxon back to what it was previously */
1142                 if (rc) {
1143                         active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1144                         IWL_ERROR("Error clearing ASSOC_MSK on current "
1145                                   "configuration (%d).\n", rc);
1146                         return rc;
1147                 }
1148
1149                 /* The RXON bit toggling will have cleared out the
1150                  * station table in the uCode, so blank it in the driver
1151                  * as well */
1152                 iwl_clear_stations_table(priv);
1153         } else if (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) {
1154                 /* When switching from non-associated to associated, the
1155                  * uCode clears out the station table; so clear it in the
1156                  * driver as well */
1157                 iwl_clear_stations_table(priv);
1158         }
1159
1160         IWL_DEBUG_INFO("Sending RXON\n"
1161                        "* with%s RXON_FILTER_ASSOC_MSK\n"
1162                        "* channel = %d\n"
1163                        "* bssid = " MAC_FMT "\n",
1164                        ((priv->staging_rxon.filter_flags &
1165                          RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1166                        le16_to_cpu(priv->staging_rxon.channel),
1167                        MAC_ARG(priv->staging_rxon.bssid_addr));
1168
1169         /* Apply the new configuration */
1170         rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1171                               sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
1172         if (rc) {
1173                 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1174                 return rc;
1175         }
1176
1177 #ifdef CONFIG_IWLWIFI_SENSITIVITY
1178         if (!priv->error_recovering)
1179                 priv->start_calib = 0;
1180
1181         priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1182         iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
1183 #endif /* CONFIG_IWLWIFI_SENSITIVITY */
1184
1185         memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1186
1187         /* If we issue a new RXON command which required a tune then we must
1188          * send a new TXPOWER command or we won't be able to Tx any frames */
1189         rc = iwl_hw_reg_send_txpower(priv);
1190         if (rc) {
1191                 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1192                 return rc;
1193         }
1194
1195         /* Add the broadcast address so we can send broadcast frames */
1196         if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) ==
1197             IWL_INVALID_STATION) {
1198                 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1199                 return -EIO;
1200         }
1201
1202         /* If we have set the ASSOC_MSK and we are in BSS mode then
1203          * add the IWL_AP_ID to the station rate table */
1204         if (iwl_is_associated(priv) &&
1205             (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
1206                 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
1207                     == IWL_INVALID_STATION) {
1208                         IWL_ERROR("Error adding AP address for transmit.\n");
1209                         return -EIO;
1210                 }
1211                 priv->assoc_station_added = 1;
1212         }
1213
1214         return 0;
1215 }
1216
1217 static int iwl_send_bt_config(struct iwl_priv *priv)
1218 {
1219         struct iwl_bt_cmd bt_cmd = {
1220                 .flags = 3,
1221                 .lead_time = 0xAA,
1222                 .max_kill = 1,
1223                 .kill_ack_mask = 0,
1224                 .kill_cts_mask = 0,
1225         };
1226
1227         return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1228                                 sizeof(struct iwl_bt_cmd), &bt_cmd);
1229 }
1230
1231 static int iwl_send_scan_abort(struct iwl_priv *priv)
1232 {
1233         int rc = 0;
1234         struct iwl_rx_packet *res;
1235         struct iwl_host_cmd cmd = {
1236                 .id = REPLY_SCAN_ABORT_CMD,
1237                 .meta.flags = CMD_WANT_SKB,
1238         };
1239
1240         /* If there isn't a scan actively going on in the hardware
1241          * then we are in between scan bands and not actually
1242          * actively scanning, so don't send the abort command */
1243         if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1244                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1245                 return 0;
1246         }
1247
1248         rc = iwl_send_cmd_sync(priv, &cmd);
1249         if (rc) {
1250                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1251                 return rc;
1252         }
1253
1254         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1255         if (res->u.status != CAN_ABORT_STATUS) {
1256                 /* The scan abort will return 1 for success or
1257                  * 2 for "failure".  A failure condition can be
1258                  * due to simply not being in an active scan which
1259                  * can occur if we send the scan abort before we
1260                  * the microcode has notified us that a scan is
1261                  * completed. */
1262                 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1263                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1264                 clear_bit(STATUS_SCAN_HW, &priv->status);
1265         }
1266
1267         dev_kfree_skb_any(cmd.meta.u.skb);
1268
1269         return rc;
1270 }
1271
1272 static int iwl_card_state_sync_callback(struct iwl_priv *priv,
1273                                         struct iwl_cmd *cmd,
1274                                         struct sk_buff *skb)
1275 {
1276         return 1;
1277 }
1278
1279 /*
1280  * CARD_STATE_CMD
1281  *
1282  * Use: Sets the internal card state to enable, disable, or halt
1283  *
1284  * When in the 'enable' state the card operates as normal.
1285  * When in the 'disable' state, the card enters into a low power mode.
1286  * When in the 'halt' state, the card is shut down and must be fully
1287  * restarted to come back on.
1288  */
1289 static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1290 {
1291         struct iwl_host_cmd cmd = {
1292                 .id = REPLY_CARD_STATE_CMD,
1293                 .len = sizeof(u32),
1294                 .data = &flags,
1295                 .meta.flags = meta_flag,
1296         };
1297
1298         if (meta_flag & CMD_ASYNC)
1299                 cmd.meta.u.callback = iwl_card_state_sync_callback;
1300
1301         return iwl_send_cmd(priv, &cmd);
1302 }
1303
1304 static int iwl_add_sta_sync_callback(struct iwl_priv *priv,
1305                                      struct iwl_cmd *cmd, struct sk_buff *skb)
1306 {
1307         struct iwl_rx_packet *res = NULL;
1308
1309         if (!skb) {
1310                 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1311                 return 1;
1312         }
1313
1314         res = (struct iwl_rx_packet *)skb->data;
1315         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1316                 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1317                           res->hdr.flags);
1318                 return 1;
1319         }
1320
1321         switch (res->u.add_sta.status) {
1322         case ADD_STA_SUCCESS_MSK:
1323                 break;
1324         default:
1325                 break;
1326         }
1327
1328         /* We didn't cache the SKB; let the caller free it */
1329         return 1;
1330 }
1331
1332 int iwl_send_add_station(struct iwl_priv *priv,
1333                          struct iwl_addsta_cmd *sta, u8 flags)
1334 {
1335         struct iwl_rx_packet *res = NULL;
1336         int rc = 0;
1337         struct iwl_host_cmd cmd = {
1338                 .id = REPLY_ADD_STA,
1339                 .len = sizeof(struct iwl_addsta_cmd),
1340                 .meta.flags = flags,
1341                 .data = sta,
1342         };
1343
1344         if (flags & CMD_ASYNC)
1345                 cmd.meta.u.callback = iwl_add_sta_sync_callback;
1346         else
1347                 cmd.meta.flags |= CMD_WANT_SKB;
1348
1349         rc = iwl_send_cmd(priv, &cmd);
1350
1351         if (rc || (flags & CMD_ASYNC))
1352                 return rc;
1353
1354         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1355         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1356                 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1357                           res->hdr.flags);
1358                 rc = -EIO;
1359         }
1360
1361         if (rc == 0) {
1362                 switch (res->u.add_sta.status) {
1363                 case ADD_STA_SUCCESS_MSK:
1364                         IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1365                         break;
1366                 default:
1367                         rc = -EIO;
1368                         IWL_WARNING("REPLY_ADD_STA failed\n");
1369                         break;
1370                 }
1371         }
1372
1373         priv->alloc_rxb_skb--;
1374         dev_kfree_skb_any(cmd.meta.u.skb);
1375
1376         return rc;
1377 }
1378
1379 static int iwl_update_sta_key_info(struct iwl_priv *priv,
1380                                    struct ieee80211_key_conf *keyconf,
1381                                    u8 sta_id)
1382 {
1383         unsigned long flags;
1384         __le16 key_flags = 0;
1385
1386         switch (keyconf->alg) {
1387         case ALG_CCMP:
1388                 key_flags |= STA_KEY_FLG_CCMP;
1389                 key_flags |= cpu_to_le16(
1390                                 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1391                 key_flags &= ~STA_KEY_FLG_INVALID;
1392                 break;
1393         case ALG_TKIP:
1394         case ALG_WEP:
1395                 return -EINVAL;
1396         default:
1397                 return -EINVAL;
1398         }
1399         spin_lock_irqsave(&priv->sta_lock, flags);
1400         priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1401         priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1402         memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1403                keyconf->keylen);
1404
1405         memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1406                keyconf->keylen);
1407         priv->stations[sta_id].sta.key.key_flags = key_flags;
1408         priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1409         priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1410
1411         spin_unlock_irqrestore(&priv->sta_lock, flags);
1412
1413         IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1414         iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1415         return 0;
1416 }
1417
1418 static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1419 {
1420         unsigned long flags;
1421
1422         spin_lock_irqsave(&priv->sta_lock, flags);
1423         memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
1424         memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo));
1425         priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1426         priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1427         priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1428         spin_unlock_irqrestore(&priv->sta_lock, flags);
1429
1430         IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1431         iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1432         return 0;
1433 }
1434
1435 static void iwl_clear_free_frames(struct iwl_priv *priv)
1436 {
1437         struct list_head *element;
1438
1439         IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1440                        priv->frames_count);
1441
1442         while (!list_empty(&priv->free_frames)) {
1443                 element = priv->free_frames.next;
1444                 list_del(element);
1445                 kfree(list_entry(element, struct iwl_frame, list));
1446                 priv->frames_count--;
1447         }
1448
1449         if (priv->frames_count) {
1450                 IWL_WARNING("%d frames still in use.  Did we lose one?\n",
1451                             priv->frames_count);
1452                 priv->frames_count = 0;
1453         }
1454 }
1455
1456 static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1457 {
1458         struct iwl_frame *frame;
1459         struct list_head *element;
1460         if (list_empty(&priv->free_frames)) {
1461                 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1462                 if (!frame) {
1463                         IWL_ERROR("Could not allocate frame!\n");
1464                         return NULL;
1465                 }
1466
1467                 priv->frames_count++;
1468                 return frame;
1469         }
1470
1471         element = priv->free_frames.next;
1472         list_del(element);
1473         return list_entry(element, struct iwl_frame, list);
1474 }
1475
1476 static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1477 {
1478         memset(frame, 0, sizeof(*frame));
1479         list_add(&frame->list, &priv->free_frames);
1480 }
1481
1482 unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
1483                                 struct ieee80211_hdr *hdr,
1484                                 const u8 *dest, int left)
1485 {
1486
1487         if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1488             ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1489              (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1490                 return 0;
1491
1492         if (priv->ibss_beacon->len > left)
1493                 return 0;
1494
1495         memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1496
1497         return priv->ibss_beacon->len;
1498 }
1499
1500 int iwl_rate_index_from_plcp(int plcp)
1501 {
1502         int i = 0;
1503
1504         if (plcp & RATE_MCS_HT_MSK) {
1505                 i = (plcp & 0xff);
1506
1507                 if (i >= IWL_RATE_MIMO_6M_PLCP)
1508                         i = i - IWL_RATE_MIMO_6M_PLCP;
1509
1510                 i += IWL_FIRST_OFDM_RATE;
1511                 /* skip 9M not supported in ht*/
1512                 if (i >= IWL_RATE_9M_INDEX)
1513                         i += 1;
1514                 if ((i >= IWL_FIRST_OFDM_RATE) &&
1515                     (i <= IWL_LAST_OFDM_RATE))
1516                         return i;
1517         } else {
1518                 for (i = 0; i < ARRAY_SIZE(iwl_rates); i++)
1519                         if (iwl_rates[i].plcp == (plcp &0xFF))
1520                                 return i;
1521         }
1522         return -1;
1523 }
1524
1525 static u8 iwl_rate_get_lowest_plcp(int rate_mask)
1526 {
1527         u8 i;
1528
1529         for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1530              i = iwl_rates[i].next_ieee) {
1531                 if (rate_mask & (1 << i))
1532                         return iwl_rates[i].plcp;
1533         }
1534
1535         return IWL_RATE_INVALID;
1536 }
1537
1538 static int iwl_send_beacon_cmd(struct iwl_priv *priv)
1539 {
1540         struct iwl_frame *frame;
1541         unsigned int frame_size;
1542         int rc;
1543         u8 rate;
1544
1545         frame = iwl_get_free_frame(priv);
1546
1547         if (!frame) {
1548                 IWL_ERROR("Could not obtain free frame buffer for beacon "
1549                           "command.\n");
1550                 return -ENOMEM;
1551         }
1552
1553         if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
1554                 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic &
1555                                                 0xFF0);
1556                 if (rate == IWL_INVALID_RATE)
1557                         rate = IWL_RATE_6M_PLCP;
1558         } else {
1559                 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1560                 if (rate == IWL_INVALID_RATE)
1561                         rate = IWL_RATE_1M_PLCP;
1562         }
1563
1564         frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
1565
1566         rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1567                               &frame->u.cmd[0]);
1568
1569         iwl_free_frame(priv, frame);
1570
1571         return rc;
1572 }
1573
1574 /******************************************************************************
1575  *
1576  * EEPROM related functions
1577  *
1578  ******************************************************************************/
1579
1580 static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1581 {
1582         memcpy(mac, priv->eeprom.mac_address, 6);
1583 }
1584
1585 /**
1586  * iwl_eeprom_init - read EEPROM contents
1587  *
1588  * Load the EEPROM from adapter into priv->eeprom
1589  *
1590  * NOTE:  This routine uses the non-debug IO access functions.
1591  */
1592 int iwl_eeprom_init(struct iwl_priv *priv)
1593 {
1594         u16 *e = (u16 *)&priv->eeprom;
1595         u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1596         u32 r;
1597         int sz = sizeof(priv->eeprom);
1598         int rc;
1599         int i;
1600         u16 addr;
1601
1602         /* The EEPROM structure has several padding buffers within it
1603          * and when adding new EEPROM maps is subject to programmer errors
1604          * which may be very difficult to identify without explicitly
1605          * checking the resulting size of the eeprom map. */
1606         BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1607
1608         if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1609                 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1610                 return -ENOENT;
1611         }
1612
1613         rc = iwl_eeprom_aqcuire_semaphore(priv);
1614         if (rc < 0) {
1615                 IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n");
1616                 return -ENOENT;
1617         }
1618
1619         /* eeprom is an array of 16bit values */
1620         for (addr = 0; addr < sz; addr += sizeof(u16)) {
1621                 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
1622                 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1623
1624                 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1625                                         i += IWL_EEPROM_ACCESS_DELAY) {
1626                         r = _iwl_read_restricted(priv, CSR_EEPROM_REG);
1627                         if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1628                                 break;
1629                         udelay(IWL_EEPROM_ACCESS_DELAY);
1630                 }
1631
1632                 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1633                         IWL_ERROR("Time out reading EEPROM[%d]", addr);
1634                         rc = -ETIMEDOUT;
1635                         goto done;
1636                 }
1637                 e[addr / 2] = le16_to_cpu(r >> 16);
1638         }
1639         rc = 0;
1640
1641 done:
1642         iwl_eeprom_release_semaphore(priv);
1643         return rc;
1644 }
1645
1646 /******************************************************************************
1647  *
1648  * Misc. internal state and helper functions
1649  *
1650  ******************************************************************************/
1651 #ifdef CONFIG_IWLWIFI_DEBUG
1652
1653 /**
1654  * iwl_report_frame - dump frame to syslog during debug sessions
1655  *
1656  * hack this function to show different aspects of received frames,
1657  * including selective frame dumps.
1658  * group100 parameter selects whether to show 1 out of 100 good frames.
1659  *
1660  * TODO:  ieee80211_hdr stuff is common to 3945 and 4965, so frame type
1661  *        info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats)
1662  *        is 3945-specific and gives bad output for 4965.  Need to split the
1663  *        functionality, keep common stuff here.
1664  */
1665 void iwl_report_frame(struct iwl_priv *priv,
1666                       struct iwl_rx_packet *pkt,
1667                       struct ieee80211_hdr *header, int group100)
1668 {
1669         u32 to_us;
1670         u32 print_summary = 0;
1671         u32 print_dump = 0;     /* set to 1 to dump all frames' contents */
1672         u32 hundred = 0;
1673         u32 dataframe = 0;
1674         u16 fc;
1675         u16 seq_ctl;
1676         u16 channel;
1677         u16 phy_flags;
1678         int rate_sym;
1679         u16 length;
1680         u16 status;
1681         u16 bcn_tmr;
1682         u32 tsf_low;
1683         u64 tsf;
1684         u8 rssi;
1685         u8 agc;
1686         u16 sig_avg;
1687         u16 noise_diff;
1688         struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1689         struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1690         struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
1691         u8 *data = IWL_RX_DATA(pkt);
1692
1693         /* MAC header */
1694         fc = le16_to_cpu(header->frame_control);
1695         seq_ctl = le16_to_cpu(header->seq_ctrl);
1696
1697         /* metadata */
1698         channel = le16_to_cpu(rx_hdr->channel);
1699         phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1700         rate_sym = rx_hdr->rate;
1701         length = le16_to_cpu(rx_hdr->len);
1702
1703         /* end-of-frame status and timestamp */
1704         status = le32_to_cpu(rx_end->status);
1705         bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1706         tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1707         tsf = le64_to_cpu(rx_end->timestamp);
1708
1709         /* signal statistics */
1710         rssi = rx_stats->rssi;
1711         agc = rx_stats->agc;
1712         sig_avg = le16_to_cpu(rx_stats->sig_avg);
1713         noise_diff = le16_to_cpu(rx_stats->noise_diff);
1714
1715         to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1716
1717         /* if data frame is to us and all is good,
1718          *   (optionally) print summary for only 1 out of every 100 */
1719         if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1720             (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1721                 dataframe = 1;
1722                 if (!group100)
1723                         print_summary = 1;      /* print each frame */
1724                 else if (priv->framecnt_to_us < 100) {
1725                         priv->framecnt_to_us++;
1726                         print_summary = 0;
1727                 } else {
1728                         priv->framecnt_to_us = 0;
1729                         print_summary = 1;
1730                         hundred = 1;
1731                 }
1732         } else {
1733                 /* print summary for all other frames */
1734                 print_summary = 1;
1735         }
1736
1737         if (print_summary) {
1738                 char *title;
1739                 u32 rate;
1740
1741                 if (hundred)
1742                         title = "100Frames";
1743                 else if (fc & IEEE80211_FCTL_RETRY)
1744                         title = "Retry";
1745                 else if (ieee80211_is_assoc_response(fc))
1746                         title = "AscRsp";
1747                 else if (ieee80211_is_reassoc_response(fc))
1748                         title = "RasRsp";
1749                 else if (ieee80211_is_probe_response(fc)) {
1750                         title = "PrbRsp";
1751                         print_dump = 1; /* dump frame contents */
1752                 } else if (ieee80211_is_beacon(fc)) {
1753                         title = "Beacon";
1754                         print_dump = 1; /* dump frame contents */
1755                 } else if (ieee80211_is_atim(fc))
1756                         title = "ATIM";
1757                 else if (ieee80211_is_auth(fc))
1758                         title = "Auth";
1759                 else if (ieee80211_is_deauth(fc))
1760                         title = "DeAuth";
1761                 else if (ieee80211_is_disassoc(fc))
1762                         title = "DisAssoc";
1763                 else
1764                         title = "Frame";
1765
1766                 rate = iwl_rate_index_from_plcp(rate_sym);
1767                 if (rate == -1)
1768                         rate = 0;
1769                 else
1770                         rate = iwl_rates[rate].ieee / 2;
1771
1772                 /* print frame summary.
1773                  * MAC addresses show just the last byte (for brevity),
1774                  *    but you can hack it to show more, if you'd like to. */
1775                 if (dataframe)
1776                         IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1777                                      "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1778                                      title, fc, header->addr1[5],
1779                                      length, rssi, channel, rate);
1780                 else {
1781                         /* src/dst addresses assume managed mode */
1782                         IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1783                                      "src=0x%02x, rssi=%u, tim=%lu usec, "
1784                                      "phy=0x%02x, chnl=%d\n",
1785                                      title, fc, header->addr1[5],
1786                                      header->addr3[5], rssi,
1787                                      tsf_low - priv->scan_start_tsf,
1788                                      phy_flags, channel);
1789                 }
1790         }
1791         if (print_dump)
1792                 iwl_print_hex_dump(IWL_DL_RX, data, length);
1793 }
1794 #endif
1795
1796 static void iwl_unset_hw_setting(struct iwl_priv *priv)
1797 {
1798         if (priv->hw_setting.shared_virt)
1799                 pci_free_consistent(priv->pci_dev,
1800                                     sizeof(struct iwl_shared),
1801                                     priv->hw_setting.shared_virt,
1802                                     priv->hw_setting.shared_phys);
1803 }
1804
1805 /**
1806  * iwl_supported_rate_to_ie - fill in the supported rate in IE field
1807  *
1808  * return : set the bit for each supported rate insert in ie
1809  */
1810 static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1811                                     u16 basic_rate, int max_count)
1812 {
1813         u16 ret_rates = 0, bit;
1814         int i;
1815         u8 *rates;
1816
1817         rates = &(ie[1]);
1818
1819         for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1820                 if (bit & supported_rate) {
1821                         ret_rates |= bit;
1822                         rates[*ie] = iwl_rates[i].ieee |
1823                             ((bit & basic_rate) ? 0x80 : 0x00);
1824                         *ie = *ie + 1;
1825                         if (*ie >= max_count)
1826                                 break;
1827                 }
1828         }
1829
1830         return ret_rates;
1831 }
1832
1833 #ifdef CONFIG_IWLWIFI_HT
1834 void static iwl_set_ht_capab(struct ieee80211_hw *hw,
1835                              struct ieee80211_ht_capability *ht_cap,
1836                              u8 use_wide_chan);
1837 #endif
1838
1839 /**
1840  * iwl_fill_probe_req - fill in all required fields and IE for probe request
1841  */
1842 static u16 iwl_fill_probe_req(struct iwl_priv *priv,
1843                               struct ieee80211_mgmt *frame,
1844                               int left, int is_direct)
1845 {
1846         int len = 0;
1847         u8 *pos = NULL;
1848         u16 ret_rates;
1849
1850         /* Make sure there is enough space for the probe request,
1851          * two mandatory IEs and the data */
1852         left -= 24;
1853         if (left < 0)
1854                 return 0;
1855         len += 24;
1856
1857         frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1858         memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN);
1859         memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1860         memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN);
1861         frame->seq_ctrl = 0;
1862
1863         /* fill in our indirect SSID IE */
1864         /* ...next IE... */
1865
1866         left -= 2;
1867         if (left < 0)
1868                 return 0;
1869         len += 2;
1870         pos = &(frame->u.probe_req.variable[0]);
1871         *pos++ = WLAN_EID_SSID;
1872         *pos++ = 0;
1873
1874         /* fill in our direct SSID IE... */
1875         if (is_direct) {
1876                 /* ...next IE... */
1877                 left -= 2 + priv->essid_len;
1878                 if (left < 0)
1879                         return 0;
1880                 /* ... fill it in... */
1881                 *pos++ = WLAN_EID_SSID;
1882                 *pos++ = priv->essid_len;
1883                 memcpy(pos, priv->essid, priv->essid_len);
1884                 pos += priv->essid_len;
1885                 len += 2 + priv->essid_len;
1886         }
1887
1888         /* fill in supported rate */
1889         /* ...next IE... */
1890         left -= 2;
1891         if (left < 0)
1892                 return 0;
1893         /* ... fill it in... */
1894         *pos++ = WLAN_EID_SUPP_RATES;
1895         *pos = 0;
1896         ret_rates = priv->active_rate = priv->rates_mask;
1897         priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1898
1899         iwl_supported_rate_to_ie(pos, priv->active_rate,
1900                                  priv->active_rate_basic, left);
1901         len += 2 + *pos;
1902         pos += (*pos) + 1;
1903         ret_rates = ~ret_rates & priv->active_rate;
1904
1905         if (ret_rates == 0)
1906                 goto fill_end;
1907
1908         /* fill in supported extended rate */
1909         /* ...next IE... */
1910         left -= 2;
1911         if (left < 0)
1912                 return 0;
1913         /* ... fill it in... */
1914         *pos++ = WLAN_EID_EXT_SUPP_RATES;
1915         *pos = 0;
1916         iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left);
1917         if (*pos > 0)
1918                 len += 2 + *pos;
1919
1920 #ifdef CONFIG_IWLWIFI_HT
1921         if (is_direct && priv->is_ht_enabled) {
1922                 u8 use_wide_chan = 1;
1923
1924                 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
1925                         use_wide_chan = 0;
1926                 pos += (*pos) + 1;
1927                 *pos++ = WLAN_EID_HT_CAPABILITY;
1928                 *pos++ = sizeof(struct ieee80211_ht_capability);
1929                 iwl_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos,
1930                                  use_wide_chan);
1931                 len += 2 + sizeof(struct ieee80211_ht_capability);
1932         }
1933 #endif  /*CONFIG_IWLWIFI_HT */
1934
1935  fill_end:
1936         return (u16)len;
1937 }
1938
1939 /*
1940  * QoS  support
1941 */
1942 #ifdef CONFIG_IWLWIFI_QOS
1943 static int iwl_send_qos_params_command(struct iwl_priv *priv,
1944                                        struct iwl_qosparam_cmd *qos)
1945 {
1946
1947         return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1948                                 sizeof(struct iwl_qosparam_cmd), qos);
1949 }
1950
1951 static void iwl_reset_qos(struct iwl_priv *priv)
1952 {
1953         u16 cw_min = 15;
1954         u16 cw_max = 1023;
1955         u8 aifs = 2;
1956         u8 is_legacy = 0;
1957         unsigned long flags;
1958         int i;
1959
1960         spin_lock_irqsave(&priv->lock, flags);
1961         priv->qos_data.qos_active = 0;
1962
1963         if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
1964                 if (priv->qos_data.qos_enable)
1965                         priv->qos_data.qos_active = 1;
1966                 if (!(priv->active_rate & 0xfff0)) {
1967                         cw_min = 31;
1968                         is_legacy = 1;
1969                 }
1970         } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1971                 if (priv->qos_data.qos_enable)
1972                         priv->qos_data.qos_active = 1;
1973         } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
1974                 cw_min = 31;
1975                 is_legacy = 1;
1976         }
1977
1978         if (priv->qos_data.qos_active)
1979                 aifs = 3;
1980
1981         priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1982         priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1983         priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1984         priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1985         priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1986
1987         if (priv->qos_data.qos_active) {
1988                 i = 1;
1989                 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1990                 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1991                 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1992                 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1993                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1994
1995                 i = 2;
1996                 priv->qos_data.def_qos_parm.ac[i].cw_min =
1997                         cpu_to_le16((cw_min + 1) / 2 - 1);
1998                 priv->qos_data.def_qos_parm.ac[i].cw_max =
1999                         cpu_to_le16(cw_max);
2000                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2001                 if (is_legacy)
2002                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
2003                                 cpu_to_le16(6016);
2004                 else
2005                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
2006                                 cpu_to_le16(3008);
2007                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2008
2009                 i = 3;
2010                 priv->qos_data.def_qos_parm.ac[i].cw_min =
2011                         cpu_to_le16((cw_min + 1) / 4 - 1);
2012                 priv->qos_data.def_qos_parm.ac[i].cw_max =
2013                         cpu_to_le16((cw_max + 1) / 2 - 1);
2014                 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2015                 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2016                 if (is_legacy)
2017                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
2018                                 cpu_to_le16(3264);
2019                 else
2020                         priv->qos_data.def_qos_parm.ac[i].edca_txop =
2021                                 cpu_to_le16(1504);
2022         } else {
2023                 for (i = 1; i < 4; i++) {
2024                         priv->qos_data.def_qos_parm.ac[i].cw_min =
2025                                 cpu_to_le16(cw_min);
2026                         priv->qos_data.def_qos_parm.ac[i].cw_max =
2027                                 cpu_to_le16(cw_max);
2028                         priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2029                         priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2030                         priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2031                 }
2032         }
2033         IWL_DEBUG_QOS("set QoS to default \n");
2034
2035         spin_unlock_irqrestore(&priv->lock, flags);
2036 }
2037
2038 static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
2039 {
2040         unsigned long flags;
2041
2042         if (priv == NULL)
2043                 return;
2044
2045         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2046                 return;
2047
2048         if (!priv->qos_data.qos_enable)
2049                 return;
2050
2051         spin_lock_irqsave(&priv->lock, flags);
2052         priv->qos_data.def_qos_parm.qos_flags = 0;
2053
2054         if (priv->qos_data.qos_cap.q_AP.queue_request &&
2055             !priv->qos_data.qos_cap.q_AP.txop_request)
2056                 priv->qos_data.def_qos_parm.qos_flags |=
2057                         QOS_PARAM_FLG_TXOP_TYPE_MSK;
2058
2059         if (priv->qos_data.qos_active)
2060                 priv->qos_data.def_qos_parm.qos_flags |=
2061                         QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2062
2063         spin_unlock_irqrestore(&priv->lock, flags);
2064
2065         if (force || iwl_is_associated(priv)) {
2066                 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n",
2067                               priv->qos_data.qos_active);
2068
2069                 iwl_send_qos_params_command(priv,
2070                                 &(priv->qos_data.def_qos_parm));
2071         }
2072 }
2073
2074 #endif /* CONFIG_IWLWIFI_QOS */
2075 /*
2076  * Power management (not Tx power!) functions
2077  */
2078 #define MSEC_TO_USEC 1024
2079
2080 #define NOSLP __constant_cpu_to_le16(0), 0, 0
2081 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2082 #define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2083 #define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2084                                      __constant_cpu_to_le32(X1), \
2085                                      __constant_cpu_to_le32(X2), \
2086                                      __constant_cpu_to_le32(X3), \
2087                                      __constant_cpu_to_le32(X4)}
2088
2089
2090 /* default power management (not Tx power) table values */
2091 /* for tim  0-10 */
2092 static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
2093         {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2094         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2095         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2096         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2097         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2098         {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2099 };
2100
2101 /* for tim > 10 */
2102 static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
2103         {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2104         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2105                  SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2106         {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2107                  SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2108         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2109                  SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2110         {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2111         {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2112                  SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2113 };
2114
2115 int iwl_power_init_handle(struct iwl_priv *priv)
2116 {
2117         int rc = 0, i;
2118         struct iwl_power_mgr *pow_data;
2119         int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
2120         u16 pci_pm;
2121
2122         IWL_DEBUG_POWER("Initialize power \n");
2123
2124         pow_data = &(priv->power_data);
2125
2126         memset(pow_data, 0, sizeof(*pow_data));
2127
2128         pow_data->active_index = IWL_POWER_RANGE_0;
2129         pow_data->dtim_val = 0xffff;
2130
2131         memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2132         memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2133
2134         rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2135         if (rc != 0)
2136                 return 0;
2137         else {
2138                 struct iwl_powertable_cmd *cmd;
2139
2140                 IWL_DEBUG_POWER("adjust power command flags\n");
2141
2142                 for (i = 0; i < IWL_POWER_AC; i++) {
2143                         cmd = &pow_data->pwr_range_0[i].cmd;
2144
2145                         if (pci_pm & 0x1)
2146                                 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2147                         else
2148                                 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2149                 }
2150         }
2151         return rc;
2152 }
2153
2154 static int iwl_update_power_cmd(struct iwl_priv *priv,
2155                                 struct iwl_powertable_cmd *cmd, u32 mode)
2156 {
2157         int rc = 0, i;
2158         u8 skip;
2159         u32 max_sleep = 0;
2160         struct iwl_power_vec_entry *range;
2161         u8 period = 0;
2162         struct iwl_power_mgr *pow_data;
2163
2164         if (mode > IWL_POWER_INDEX_5) {
2165                 IWL_DEBUG_POWER("Error invalid power mode \n");
2166                 return -1;
2167         }
2168         pow_data = &(priv->power_data);
2169
2170         if (pow_data->active_index == IWL_POWER_RANGE_0)
2171                 range = &pow_data->pwr_range_0[0];
2172         else
2173                 range = &pow_data->pwr_range_1[1];
2174
2175         memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
2176
2177 #ifdef IWL_MAC80211_DISABLE
2178         if (priv->assoc_network != NULL) {
2179                 unsigned long flags;
2180
2181                 period = priv->assoc_network->tim.tim_period;
2182         }
2183 #endif  /*IWL_MAC80211_DISABLE */
2184         skip = range[mode].no_dtim;
2185
2186         if (period == 0) {
2187                 period = 1;
2188                 skip = 0;
2189         }
2190
2191         if (skip == 0) {
2192                 max_sleep = period;
2193                 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2194         } else {
2195                 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2196                 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2197                 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2198         }
2199
2200         for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2201                 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2202                         cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2203         }
2204
2205         IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2206         IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2207         IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2208         IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2209                         le32_to_cpu(cmd->sleep_interval[0]),
2210                         le32_to_cpu(cmd->sleep_interval[1]),
2211                         le32_to_cpu(cmd->sleep_interval[2]),
2212                         le32_to_cpu(cmd->sleep_interval[3]),
2213                         le32_to_cpu(cmd->sleep_interval[4]));
2214
2215         return rc;
2216 }
2217
2218 static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode)
2219 {
2220         u32 final_mode = mode;
2221         int rc;
2222         struct iwl_powertable_cmd cmd;
2223
2224         /* If on battery, set to 3,
2225          * if plugged into AC power, set to CAM ("continuosly aware mode"),
2226          * else user level */
2227         switch (mode) {
2228         case IWL_POWER_BATTERY:
2229                 final_mode = IWL_POWER_INDEX_3;
2230                 break;
2231         case IWL_POWER_AC:
2232                 final_mode = IWL_POWER_MODE_CAM;
2233                 break;
2234         default:
2235                 final_mode = mode;
2236                 break;
2237         }
2238
2239         cmd.keep_alive_beacons = 0;
2240
2241         iwl_update_power_cmd(priv, &cmd, final_mode);
2242
2243         rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2244
2245         if (final_mode == IWL_POWER_MODE_CAM)
2246                 clear_bit(STATUS_POWER_PMI, &priv->status);
2247         else
2248                 set_bit(STATUS_POWER_PMI, &priv->status);
2249
2250         return rc;
2251 }
2252
2253 int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2254 {
2255         /* Filter incoming packets to determine if they are targeted toward
2256          * this network, discarding packets coming from ourselves */
2257         switch (priv->iw_mode) {
2258         case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source    | BSSID */
2259                 /* packets from our adapter are dropped (echo) */
2260                 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2261                         return 0;
2262                 /* {broad,multi}cast packets to our IBSS go through */
2263                 if (is_multicast_ether_addr(header->addr1))
2264                         return !compare_ether_addr(header->addr3, priv->bssid);
2265                 /* packets to our adapter go through */
2266                 return !compare_ether_addr(header->addr1, priv->mac_addr);
2267         case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2268                 /* packets from our adapter are dropped (echo) */
2269                 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2270                         return 0;
2271                 /* {broad,multi}cast packets to our BSS go through */
2272                 if (is_multicast_ether_addr(header->addr1))
2273                         return !compare_ether_addr(header->addr2, priv->bssid);
2274                 /* packets to our adapter go through */
2275                 return !compare_ether_addr(header->addr1, priv->mac_addr);
2276         }
2277
2278         return 1;
2279 }
2280
2281 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2282
2283 const char *iwl_get_tx_fail_reason(u32 status)
2284 {
2285         switch (status & TX_STATUS_MSK) {
2286         case TX_STATUS_SUCCESS:
2287                 return "SUCCESS";
2288                 TX_STATUS_ENTRY(SHORT_LIMIT);
2289                 TX_STATUS_ENTRY(LONG_LIMIT);
2290                 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2291                 TX_STATUS_ENTRY(MGMNT_ABORT);
2292                 TX_STATUS_ENTRY(NEXT_FRAG);
2293                 TX_STATUS_ENTRY(LIFE_EXPIRE);
2294                 TX_STATUS_ENTRY(DEST_PS);
2295                 TX_STATUS_ENTRY(ABORTED);
2296                 TX_STATUS_ENTRY(BT_RETRY);
2297                 TX_STATUS_ENTRY(STA_INVALID);
2298                 TX_STATUS_ENTRY(FRAG_DROPPED);
2299                 TX_STATUS_ENTRY(TID_DISABLE);
2300                 TX_STATUS_ENTRY(FRAME_FLUSHED);
2301                 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2302                 TX_STATUS_ENTRY(TX_LOCKED);
2303                 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2304         }
2305
2306         return "UNKNOWN";
2307 }
2308
2309 /**
2310  * iwl_scan_cancel - Cancel any currently executing HW scan
2311  *
2312  * NOTE: priv->mutex is not required before calling this function
2313  */
2314 static int iwl_scan_cancel(struct iwl_priv *priv)
2315 {
2316         if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2317                 clear_bit(STATUS_SCANNING, &priv->status);
2318                 return 0;
2319         }
2320
2321         if (test_bit(STATUS_SCANNING, &priv->status)) {
2322                 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2323                         IWL_DEBUG_SCAN("Queuing scan abort.\n");
2324                         set_bit(STATUS_SCAN_ABORTING, &priv->status);
2325                         queue_work(priv->workqueue, &priv->abort_scan);
2326
2327                 } else
2328                         IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2329
2330                 return test_bit(STATUS_SCANNING, &priv->status);
2331         }
2332
2333         return 0;
2334 }
2335
2336 /**
2337  * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
2338  * @ms: amount of time to wait (in milliseconds) for scan to abort
2339  *
2340  * NOTE: priv->mutex must be held before calling this function
2341  */
2342 static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2343 {
2344         unsigned long now = jiffies;
2345         int ret;
2346
2347         ret = iwl_scan_cancel(priv);
2348         if (ret && ms) {
2349                 mutex_unlock(&priv->mutex);
2350                 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2351                                 test_bit(STATUS_SCANNING, &priv->status))
2352                         msleep(1);
2353                 mutex_lock(&priv->mutex);
2354
2355                 return test_bit(STATUS_SCANNING, &priv->status);
2356         }
2357
2358         return ret;
2359 }
2360
2361 static void iwl_sequence_reset(struct iwl_priv *priv)
2362 {
2363         /* Reset ieee stats */
2364
2365         /* We don't reset the net_device_stats (ieee->stats) on
2366          * re-association */
2367
2368         priv->last_seq_num = -1;
2369         priv->last_frag_num = -1;
2370         priv->last_packet_time = 0;
2371
2372         iwl_scan_cancel(priv);
2373 }
2374
2375 #define MAX_UCODE_BEACON_INTERVAL       4096
2376 #define INTEL_CONN_LISTEN_INTERVAL      __constant_cpu_to_le16(0xA)
2377
2378 static __le16 iwl_adjust_beacon_interval(u16 beacon_val)
2379 {
2380         u16 new_val = 0;
2381         u16 beacon_factor = 0;
2382
2383         beacon_factor =
2384             (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2385                 / MAX_UCODE_BEACON_INTERVAL;
2386         new_val = beacon_val / beacon_factor;
2387
2388         return cpu_to_le16(new_val);
2389 }
2390
2391 static void iwl_setup_rxon_timing(struct iwl_priv *priv)
2392 {
2393         u64 interval_tm_unit;
2394         u64 tsf, result;
2395         unsigned long flags;
2396         struct ieee80211_conf *conf = NULL;
2397         u16 beacon_int = 0;
2398
2399         conf = ieee80211_get_hw_conf(priv->hw);
2400
2401         spin_lock_irqsave(&priv->lock, flags);
2402         priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2403         priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2404
2405         priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2406
2407         tsf = priv->timestamp1;
2408         tsf = ((tsf << 32) | priv->timestamp0);
2409
2410         beacon_int = priv->beacon_int;
2411         spin_unlock_irqrestore(&priv->lock, flags);
2412
2413         if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2414                 if (beacon_int == 0) {
2415                         priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2416                         priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2417                 } else {
2418                         priv->rxon_timing.beacon_interval =
2419                                 cpu_to_le16(beacon_int);
2420                         priv->rxon_timing.beacon_interval =
2421                             iwl_adjust_beacon_interval(
2422                                 le16_to_cpu(priv->rxon_timing.beacon_interval));
2423                 }
2424
2425                 priv->rxon_timing.atim_window = 0;
2426         } else {
2427                 priv->rxon_timing.beacon_interval =
2428                         iwl_adjust_beacon_interval(conf->beacon_int);
2429                 /* TODO: we need to get atim_window from upper stack
2430                  * for now we set to 0 */
2431                 priv->rxon_timing.atim_window = 0;
2432         }
2433
2434         interval_tm_unit =
2435                 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2436         result = do_div(tsf, interval_tm_unit);
2437         priv->rxon_timing.beacon_init_val =
2438             cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2439
2440         IWL_DEBUG_ASSOC
2441             ("beacon interval %d beacon timer %d beacon tim %d\n",
2442                 le16_to_cpu(priv->rxon_timing.beacon_interval),
2443                 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2444                 le16_to_cpu(priv->rxon_timing.atim_window));
2445 }
2446
2447 static int iwl_scan_initiate(struct iwl_priv *priv)
2448 {
2449         if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2450                 IWL_ERROR("APs don't scan.\n");
2451                 return 0;
2452         }
2453
2454         if (!iwl_is_ready_rf(priv)) {
2455                 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2456                 return -EIO;
2457         }
2458
2459         if (test_bit(STATUS_SCANNING, &priv->status)) {
2460                 IWL_DEBUG_SCAN("Scan already in progress.\n");
2461                 return -EAGAIN;
2462         }
2463
2464         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2465                 IWL_DEBUG_SCAN("Scan request while abort pending.  "
2466                                "Queuing.\n");
2467                 return -EAGAIN;
2468         }
2469
2470         IWL_DEBUG_INFO("Starting scan...\n");
2471         priv->scan_bands = 2;
2472         set_bit(STATUS_SCANNING, &priv->status);
2473         priv->scan_start = jiffies;
2474         priv->scan_pass_start = priv->scan_start;
2475
2476         queue_work(priv->workqueue, &priv->request_scan);
2477
2478         return 0;
2479 }
2480
2481 static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2482 {
2483         struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
2484
2485         if (hw_decrypt)
2486                 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2487         else
2488                 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2489
2490         return 0;
2491 }
2492
2493 static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode)
2494 {
2495         if (phymode == MODE_IEEE80211A) {
2496                 priv->staging_rxon.flags &=
2497                     ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2498                       | RXON_FLG_CCK_MSK);
2499                 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2500         } else {
2501                 /* Copied from iwl_bg_post_associate() */
2502                 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2503                         priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2504                 else
2505                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2506
2507                 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2508                         priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2509
2510                 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2511                 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2512                 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2513         }
2514 }
2515
2516 /*
2517  * initilize rxon structure with default values fromm eeprom
2518  */
2519 static void iwl_connection_init_rx_config(struct iwl_priv *priv)
2520 {
2521         const struct iwl_channel_info *ch_info;
2522
2523         memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2524
2525         switch (priv->iw_mode) {
2526         case IEEE80211_IF_TYPE_AP:
2527                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2528                 break;
2529
2530         case IEEE80211_IF_TYPE_STA:
2531                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2532                 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2533                 break;
2534
2535         case IEEE80211_IF_TYPE_IBSS:
2536                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2537                 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2538                 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2539                                                   RXON_FILTER_ACCEPT_GRP_MSK;
2540                 break;
2541
2542         case IEEE80211_IF_TYPE_MNTR:
2543                 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2544                 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2545                     RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2546                 break;
2547         }
2548
2549 #if 0
2550         /* TODO:  Figure out when short_preamble would be set and cache from
2551          * that */
2552         if (!hw_to_local(priv->hw)->short_preamble)
2553                 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2554         else
2555                 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2556 #endif
2557
2558         ch_info = iwl_get_channel_info(priv, priv->phymode,
2559                                        le16_to_cpu(priv->staging_rxon.channel));
2560
2561         if (!ch_info)
2562                 ch_info = &priv->channel_info[0];
2563
2564         /*
2565          * in some case A channels are all non IBSS
2566          * in this case force B/G channel
2567          */
2568         if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2569             !(is_channel_ibss(ch_info)))
2570                 ch_info = &priv->channel_info[0];
2571
2572         priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2573         if (is_channel_a_band(ch_info))
2574                 priv->phymode = MODE_IEEE80211A;
2575         else
2576                 priv->phymode = MODE_IEEE80211G;
2577
2578         iwl_set_flags_for_phymode(priv, priv->phymode);
2579
2580         priv->staging_rxon.ofdm_basic_rates =
2581             (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2582         priv->staging_rxon.cck_basic_rates =
2583             (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2584
2585         priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2586                                         RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2587         memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2588         memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2589         priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2590         priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2591         iwl4965_set_rxon_chain(priv);
2592 }
2593
2594 static int iwl_set_mode(struct iwl_priv *priv, int mode)
2595 {
2596         if (!iwl_is_ready_rf(priv))
2597                 return -EAGAIN;
2598
2599         if (mode == IEEE80211_IF_TYPE_IBSS) {
2600                 const struct iwl_channel_info *ch_info;
2601
2602                 ch_info = iwl_get_channel_info(priv,
2603                         priv->phymode,
2604                         le16_to_cpu(priv->staging_rxon.channel));
2605
2606                 if (!ch_info || !is_channel_ibss(ch_info)) {
2607                         IWL_ERROR("channel %d not IBSS channel\n",
2608                                   le16_to_cpu(priv->staging_rxon.channel));
2609                         return -EINVAL;
2610                 }
2611         }
2612
2613         cancel_delayed_work(&priv->scan_check);
2614         if (iwl_scan_cancel_timeout(priv, 100)) {
2615                 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2616                 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2617                 return -EAGAIN;
2618         }
2619
2620         priv->iw_mode = mode;
2621
2622         iwl_connection_init_rx_config(priv);
2623         memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2624
2625         iwl_clear_stations_table(priv);
2626
2627         iwl_commit_rxon(priv);
2628
2629         return 0;
2630 }
2631
2632 static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2633                                       struct ieee80211_tx_control *ctl,
2634                                       struct iwl_cmd *cmd,
2635                                       struct sk_buff *skb_frag,
2636                                       int last_frag)
2637 {
2638         struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
2639
2640         switch (keyinfo->alg) {
2641         case ALG_CCMP:
2642                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2643                 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2644                 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2645                 break;
2646
2647         case ALG_TKIP:
2648 #if 0
2649                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2650
2651                 if (last_frag)
2652                         memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2653                                8);
2654                 else
2655                         memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2656 #endif
2657                 break;
2658
2659         case ALG_WEP:
2660                 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2661                         (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2662
2663                 if (keyinfo->keylen == 13)
2664                         cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2665
2666                 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2667
2668                 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2669                              "with key %d\n", ctl->key_idx);
2670                 break;
2671
2672         case ALG_NONE:
2673                 IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n");
2674                 break;
2675
2676         default:
2677                 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2678                 break;
2679         }
2680 }
2681
2682 /*
2683  * handle build REPLY_TX command notification.
2684  */
2685 static void iwl_build_tx_cmd_basic(struct iwl_priv *priv,
2686                                   struct iwl_cmd *cmd,
2687                                   struct ieee80211_tx_control *ctrl,
2688                                   struct ieee80211_hdr *hdr,
2689                                   int is_unicast, u8 std_id)
2690 {
2691         __le16 *qc;
2692         u16 fc = le16_to_cpu(hdr->frame_control);
2693         __le32 tx_flags = cmd->cmd.tx.tx_flags;
2694
2695         cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2696         if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2697                 tx_flags |= TX_CMD_FLG_ACK_MSK;
2698                 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2699                         tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2700                 if (ieee80211_is_probe_response(fc) &&
2701                     !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2702                         tx_flags |= TX_CMD_FLG_TSF_MSK;
2703         } else {
2704                 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2705                 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2706         }
2707
2708         cmd->cmd.tx.sta_id = std_id;
2709         if (ieee80211_get_morefrag(hdr))
2710                 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2711
2712         qc = ieee80211_get_qos_ctrl(hdr);
2713         if (qc) {
2714                 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2715                 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2716         } else
2717                 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2718
2719         if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2720                 tx_flags |= TX_CMD_FLG_RTS_MSK;
2721                 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2722         } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2723                 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2724                 tx_flags |= TX_CMD_FLG_CTS_MSK;
2725         }
2726
2727         if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2728                 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2729
2730         tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2731         if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2732                 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2733                     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2734                         cmd->cmd.tx.timeout.pm_frame_timeout =
2735                                 cpu_to_le16(3);
2736                 else
2737                         cmd->cmd.tx.timeout.pm_frame_timeout =
2738                                 cpu_to_le16(2);
2739         } else
2740                 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2741
2742         cmd->cmd.tx.driver_txop = 0;
2743         cmd->cmd.tx.tx_flags = tx_flags;
2744         cmd->cmd.tx.next_frame_len = 0;
2745 }
2746
2747 static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2748 {
2749         int sta_id;
2750         u16 fc = le16_to_cpu(hdr->frame_control);
2751
2752         /* If this frame is broadcast or not data then use the broadcast
2753          * station id */
2754         if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2755             is_multicast_ether_addr(hdr->addr1))
2756                 return priv->hw_setting.bcast_sta_id;
2757
2758         switch (priv->iw_mode) {
2759
2760         /* If this frame is part of a BSS network (we're a station), then
2761          * we use the AP's station id */
2762         case IEEE80211_IF_TYPE_STA:
2763                 return IWL_AP_ID;
2764
2765         /* If we are an AP, then find the station, or use BCAST */
2766         case IEEE80211_IF_TYPE_AP:
2767                 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2768                 if (sta_id != IWL_INVALID_STATION)
2769                         return sta_id;
2770                 return priv->hw_setting.bcast_sta_id;
2771
2772         /* If this frame is part of a IBSS network, then we use the
2773          * target specific station id */
2774         case IEEE80211_IF_TYPE_IBSS:
2775                 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2776                 if (sta_id != IWL_INVALID_STATION)
2777                         return sta_id;
2778
2779                 sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2780
2781                 if (sta_id != IWL_INVALID_STATION)
2782                         return sta_id;
2783
2784                 IWL_DEBUG_DROP("Station " MAC_FMT " not in station map. "
2785                                "Defaulting to broadcast...\n",
2786                                MAC_ARG(hdr->addr1));
2787                 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2788                 return priv->hw_setting.bcast_sta_id;
2789
2790         default:
2791                 IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode);
2792                 return priv->hw_setting.bcast_sta_id;
2793         }
2794 }
2795
2796 /*
2797  * start REPLY_TX command process
2798  */
2799 static int iwl_tx_skb(struct iwl_priv *priv,
2800                       struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2801 {
2802         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2803         struct iwl_tfd_frame *tfd;
2804         u32 *control_flags;
2805         int txq_id = ctl->queue;
2806         struct iwl_tx_queue *txq = NULL;
2807         struct iwl_queue *q = NULL;
2808         dma_addr_t phys_addr;
2809         dma_addr_t txcmd_phys;
2810         struct iwl_cmd *out_cmd = NULL;
2811         u16 len, idx, len_org;
2812         u8 id, hdr_len, unicast;
2813         u8 sta_id;
2814         u16 seq_number = 0;
2815         u16 fc;
2816         __le16 *qc;
2817         u8 wait_write_ptr = 0;
2818         unsigned long flags;
2819         int rc;
2820
2821         spin_lock_irqsave(&priv->lock, flags);
2822         if (iwl_is_rfkill(priv)) {
2823                 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2824                 goto drop_unlock;
2825         }
2826
2827         if (!priv->interface_id) {
2828                 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2829                 goto drop_unlock;
2830         }
2831
2832         if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2833                 IWL_ERROR("ERROR: No TX rate available.\n");
2834                 goto drop_unlock;
2835         }
2836
2837         unicast = !is_multicast_ether_addr(hdr->addr1);
2838         id = 0;
2839
2840         fc = le16_to_cpu(hdr->frame_control);
2841
2842 #ifdef CONFIG_IWLWIFI_DEBUG
2843         if (ieee80211_is_auth(fc))
2844                 IWL_DEBUG_TX("Sending AUTH frame\n");
2845         else if (ieee80211_is_assoc_request(fc))
2846                 IWL_DEBUG_TX("Sending ASSOC frame\n");
2847         else if (ieee80211_is_reassoc_request(fc))
2848                 IWL_DEBUG_TX("Sending REASSOC frame\n");
2849 #endif
2850
2851         if (!iwl_is_associated(priv) &&
2852             ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2853                 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2854                 goto drop_unlock;
2855         }
2856
2857         spin_unlock_irqrestore(&priv->lock, flags);
2858
2859         hdr_len = ieee80211_get_hdrlen(fc);
2860         sta_id = iwl_get_sta_id(priv, hdr);
2861         if (sta_id == IWL_INVALID_STATION) {
2862                 IWL_DEBUG_DROP("Dropping - INVALID STATION: " MAC_FMT "\n",
2863                                MAC_ARG(hdr->addr1));
2864                 goto drop;
2865         }
2866
2867         IWL_DEBUG_RATE("station Id %d\n", sta_id);
2868
2869         qc = ieee80211_get_qos_ctrl(hdr);
2870         if (qc) {
2871                 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2872                 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2873                                 IEEE80211_SCTL_SEQ;
2874                 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2875                         (hdr->seq_ctrl &
2876                                 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2877                 seq_number += 0x10;
2878 #ifdef CONFIG_IWLWIFI_HT
2879 #ifdef CONFIG_IWLWIFI_HT_AGG
2880                 /* aggregation is on for this <sta,tid> */
2881                 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2882                         txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2883 #endif /* CONFIG_IWLWIFI_HT_AGG */
2884 #endif /* CONFIG_IWLWIFI_HT */
2885         }
2886         txq = &priv->txq[txq_id];
2887         q = &txq->q;
2888
2889         spin_lock_irqsave(&priv->lock, flags);
2890
2891         tfd = &txq->bd[q->first_empty];
2892         memset(tfd, 0, sizeof(*tfd));
2893         control_flags = (u32 *) tfd;
2894         idx = get_cmd_index(q, q->first_empty, 0);
2895
2896         memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info));
2897         txq->txb[q->first_empty].skb[0] = skb;
2898         memcpy(&(txq->txb[q->first_empty].status.control),
2899                ctl, sizeof(struct ieee80211_tx_control));
2900         out_cmd = &txq->cmd[idx];
2901         memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2902         memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2903         out_cmd->hdr.cmd = REPLY_TX;
2904         out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2905                                 INDEX_TO_SEQ(q->first_empty)));
2906         /* copy frags header */
2907         memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2908
2909         /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */
2910         len = priv->hw_setting.tx_cmd_len +
2911                 sizeof(struct iwl_cmd_header) + hdr_len;
2912
2913         len_org = len;
2914         len = (len + 3) & ~3;
2915
2916         if (len_org != len)
2917                 len_org = 1;
2918         else
2919                 len_org = 0;
2920
2921         txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2922                      offsetof(struct iwl_cmd, hdr);
2923
2924         iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2925
2926         if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2927                 iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
2928
2929         /* 802.11 null functions have no payload... */
2930         len = skb->len - hdr_len;
2931         if (len) {
2932                 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2933                                            len, PCI_DMA_TODEVICE);
2934                 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2935         }
2936
2937         if (len_org)
2938                 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2939
2940         len = (u16)skb->len;
2941         out_cmd->cmd.tx.len = cpu_to_le16(len);
2942
2943         /* TODO need this for burst mode later on */
2944         iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2945
2946         /* set is_hcca to 0; it probably will never be implemented */
2947         iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2948
2949         iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys,
2950                        hdr, hdr_len, ctl, NULL);
2951
2952         if (!ieee80211_get_morefrag(hdr)) {
2953                 txq->need_update = 1;
2954                 if (qc) {
2955                         u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2956                         priv->stations[sta_id].tid[tid].seq_number = seq_number;
2957                 }
2958         } else {
2959                 wait_write_ptr = 1;
2960                 txq->need_update = 0;
2961         }
2962
2963         iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2964                            sizeof(out_cmd->cmd.tx));
2965
2966         iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2967                            ieee80211_get_hdrlen(fc));
2968
2969         iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
2970
2971         q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
2972         rc = iwl_tx_queue_update_write_ptr(priv, txq);
2973         spin_unlock_irqrestore(&priv->lock, flags);
2974
2975         if (rc)
2976                 return rc;
2977
2978         if ((iwl_queue_space(q) < q->high_mark)
2979             && priv->mac80211_registered) {
2980                 if (wait_write_ptr) {
2981                         spin_lock_irqsave(&priv->lock, flags);
2982                         txq->need_update = 1;
2983                         iwl_tx_queue_update_write_ptr(priv, txq);
2984                         spin_unlock_irqrestore(&priv->lock, flags);
2985                 }
2986
2987                 ieee80211_stop_queue(priv->hw, ctl->queue);
2988         }
2989
2990         return 0;
2991
2992 drop_unlock:
2993         spin_unlock_irqrestore(&priv->lock, flags);
2994 drop:
2995         return -1;
2996 }
2997
2998 static void iwl_set_rate(struct iwl_priv *priv)
2999 {
3000         const struct ieee80211_hw_mode *hw = NULL;
3001         struct ieee80211_rate *rate;
3002         int i;
3003
3004         hw = iwl_get_hw_mode(priv, priv->phymode);
3005
3006         priv->active_rate = 0;
3007         priv->active_rate_basic = 0;
3008
3009         IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3010                        hw->mode == MODE_IEEE80211A ?
3011                        'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3012
3013         for (i = 0; i < hw->num_rates; i++) {
3014                 rate = &(hw->rates[i]);
3015                 if ((rate->val < IWL_RATE_COUNT) &&
3016                     (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3017                         IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
3018                                        rate->val, iwl_rates[rate->val].plcp,
3019                                        (rate->flags & IEEE80211_RATE_BASIC) ?
3020                                        "*" : "");
3021                         priv->active_rate |= (1 << rate->val);
3022                         if (rate->flags & IEEE80211_RATE_BASIC)
3023                                 priv->active_rate_basic |= (1 << rate->val);
3024                 } else
3025                         IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
3026                                        rate->val, iwl_rates[rate->val].plcp);
3027         }
3028
3029         IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3030                        priv->active_rate, priv->active_rate_basic);
3031
3032         /*
3033          * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3034          * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3035          * OFDM
3036          */
3037         if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3038                 priv->staging_rxon.cck_basic_rates =
3039                     ((priv->active_rate_basic &
3040                       IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3041         else
3042                 priv->staging_rxon.cck_basic_rates =
3043                     (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3044
3045         if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3046                 priv->staging_rxon.ofdm_basic_rates =
3047                     ((priv->active_rate_basic &
3048                       (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3049                       IWL_FIRST_OFDM_RATE) & 0xFF;
3050         else
3051                 priv->staging_rxon.ofdm_basic_rates =
3052                    (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3053 }
3054
3055 static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
3056 {
3057         unsigned long flags;
3058
3059         if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3060                 return;
3061
3062         IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3063                           disable_radio ? "OFF" : "ON");
3064
3065         if (disable_radio) {
3066                 iwl_scan_cancel(priv);
3067                 /* FIXME: This is a workaround for AP */
3068                 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3069                         spin_lock_irqsave(&priv->lock, flags);
3070                         iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3071                                     CSR_UCODE_SW_BIT_RFKILL);
3072                         spin_unlock_irqrestore(&priv->lock, flags);
3073                         iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
3074                         set_bit(STATUS_RF_KILL_SW, &priv->status);
3075                 }
3076                 return;
3077         }
3078
3079         spin_lock_irqsave(&priv->lock, flags);
3080         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3081
3082         clear_bit(STATUS_RF_KILL_SW, &priv->status);
3083         spin_unlock_irqrestore(&priv->lock, flags);
3084
3085         /* wake up ucode */
3086         msleep(10);
3087
3088         spin_lock_irqsave(&priv->lock, flags);
3089         iwl_read32(priv, CSR_UCODE_DRV_GP1);
3090         if (!iwl_grab_restricted_access(priv))
3091                 iwl_release_restricted_access(priv);
3092         spin_unlock_irqrestore(&priv->lock, flags);
3093
3094         if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3095                 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3096                                   "disabled by HW switch\n");
3097                 return;
3098         }
3099
3100         queue_work(priv->workqueue, &priv->restart);
3101         return;
3102 }
3103
3104 void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3105                             u32 decrypt_res, struct ieee80211_rx_status *stats)
3106 {
3107         u16 fc =
3108             le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3109
3110         if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3111                 return;
3112
3113         if (!(fc & IEEE80211_FCTL_PROTECTED))
3114                 return;
3115
3116         IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3117         switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3118         case RX_RES_STATUS_SEC_TYPE_TKIP:
3119                 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3120                     RX_RES_STATUS_BAD_ICV_MIC)
3121                         stats->flag |= RX_FLAG_MMIC_ERROR;
3122         case RX_RES_STATUS_SEC_TYPE_WEP:
3123         case RX_RES_STATUS_SEC_TYPE_CCMP:
3124                 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3125                     RX_RES_STATUS_DECRYPT_OK) {
3126                         IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3127                         stats->flag |= RX_FLAG_DECRYPTED;
3128                 }
3129                 break;
3130
3131         default:
3132                 break;
3133         }
3134 }
3135
3136 void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
3137                                     struct iwl_rx_mem_buffer *rxb,
3138                                     void *data, short len,
3139                                     struct ieee80211_rx_status *stats,
3140                                     u16 phy_flags)
3141 {
3142         struct iwl_rt_rx_hdr *iwl_rt;
3143
3144         /* First cache any information we need before we overwrite
3145          * the information provided in the skb from the hardware */
3146         s8 signal = stats->ssi;
3147         s8 noise = 0;
3148         int rate = stats->rate;
3149         u64 tsf = stats->mactime;
3150         __le16 phy_flags_hw = cpu_to_le16(phy_flags);
3151
3152         /* We received data from the HW, so stop the watchdog */
3153         if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) {
3154                 IWL_DEBUG_DROP("Dropping too large packet in monitor\n");
3155                 return;
3156         }
3157
3158         /* copy the frame data to write after where the radiotap header goes */
3159         iwl_rt = (void *)rxb->skb->data;
3160         memmove(iwl_rt->payload, data, len);
3161
3162         iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
3163         iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */
3164
3165         /* total header + data */
3166         iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt));
3167
3168         /* Set the size of the skb to the size of the frame */
3169         skb_put(rxb->skb, sizeof(*iwl_rt) + len);
3170
3171         /* Big bitfield of all the fields we provide in radiotap */
3172         iwl_rt->rt_hdr.it_present =
3173             cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
3174                         (1 << IEEE80211_RADIOTAP_FLAGS) |
3175                         (1 << IEEE80211_RADIOTAP_RATE) |
3176                         (1 << IEEE80211_RADIOTAP_CHANNEL) |
3177                         (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
3178                         (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
3179                         (1 << IEEE80211_RADIOTAP_ANTENNA));
3180
3181         /* Zero the flags, we'll add to them as we go */
3182         iwl_rt->rt_flags = 0;
3183
3184         iwl_rt->rt_tsf = cpu_to_le64(tsf);
3185
3186         /* Convert to dBm */
3187         iwl_rt->rt_dbmsignal = signal;
3188         iwl_rt->rt_dbmnoise = noise;
3189
3190         /* Convert the channel frequency and set the flags */
3191         iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq);
3192         if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
3193                 iwl_rt->rt_chbitmask =
3194                     cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
3195         else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
3196                 iwl_rt->rt_chbitmask =
3197                     cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
3198         else    /* 802.11g */
3199                 iwl_rt->rt_chbitmask =
3200                     cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ));
3201
3202         rate = iwl_rate_index_from_plcp(rate);
3203         if (rate == -1)
3204                 iwl_rt->rt_rate = 0;
3205         else
3206                 iwl_rt->rt_rate = iwl_rates[rate].ieee;
3207
3208         /* antenna number */
3209         iwl_rt->rt_antenna =
3210                 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
3211
3212         /* set the preamble flag if we have it */
3213         if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
3214                 iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3215
3216         IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
3217
3218         stats->flag |= RX_FLAG_RADIOTAP;
3219         ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3220         rxb->skb = NULL;
3221 }
3222
3223
3224 #define IWL_PACKET_RETRY_TIME HZ
3225
3226 int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
3227 {
3228         u16 sc = le16_to_cpu(header->seq_ctrl);
3229         u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
3230         u16 frag = sc & IEEE80211_SCTL_FRAG;
3231         u16 *last_seq, *last_frag;
3232         unsigned long *last_time;
3233
3234         switch (priv->iw_mode) {
3235         case IEEE80211_IF_TYPE_IBSS:{
3236                 struct list_head *p;
3237                 struct iwl_ibss_seq *entry = NULL;
3238                 u8 *mac = header->addr2;
3239                 int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1);
3240
3241                 __list_for_each(p, &priv->ibss_mac_hash[index]) {
3242                         entry =
3243                                 list_entry(p, struct iwl_ibss_seq, list);
3244                         if (!compare_ether_addr(entry->mac, mac))
3245                                 break;
3246                 }
3247                 if (p == &priv->ibss_mac_hash[index]) {
3248                         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
3249                         if (!entry) {
3250                                 IWL_ERROR
3251                                         ("Cannot malloc new mac entry\n");
3252                                 return 0;
3253                         }
3254                         memcpy(entry->mac, mac, ETH_ALEN);
3255                         entry->seq_num = seq;
3256                         entry->frag_num = frag;
3257                         entry->packet_time = jiffies;
3258                         list_add(&entry->list,
3259                                  &priv->ibss_mac_hash[index]);
3260                         return 0;
3261                 }
3262                 last_seq = &entry->seq_num;
3263                 last_frag = &entry->frag_num;
3264                 last_time = &entry->packet_time;
3265                 break;
3266         }
3267         case IEEE80211_IF_TYPE_STA:
3268                 last_seq = &priv->last_seq_num;
3269                 last_frag = &priv->last_frag_num;
3270                 last_time = &priv->last_packet_time;
3271                 break;
3272         default:
3273                 return 0;
3274         }
3275         if ((*last_seq == seq) &&
3276             time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) {
3277                 if (*last_frag == frag)
3278                         goto drop;
3279                 if (*last_frag + 1 != frag)
3280                         /* out-of-order fragment */
3281                         goto drop;
3282         } else
3283                 *last_seq = seq;
3284
3285         *last_frag = frag;
3286         *last_time = jiffies;
3287         return 0;
3288
3289  drop:
3290         return 1;
3291 }
3292
3293 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3294
3295 #include "iwl-spectrum.h"
3296
3297 #define BEACON_TIME_MASK_LOW    0x00FFFFFF
3298 #define BEACON_TIME_MASK_HIGH   0xFF000000
3299 #define TIME_UNIT               1024
3300
3301 /*
3302  * extended beacon time format
3303  * time in usec will be changed into a 32-bit value in 8:24 format
3304  * the high 1 byte is the beacon counts
3305  * the lower 3 bytes is the time in usec within one beacon interval
3306  */
3307
3308 static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
3309 {
3310         u32 quot;
3311         u32 rem;
3312         u32 interval = beacon_interval * 1024;
3313
3314         if (!interval || !usec)
3315                 return 0;
3316
3317         quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
3318         rem = (usec % interval) & BEACON_TIME_MASK_LOW;
3319
3320         return (quot << 24) + rem;
3321 }
3322
3323 /* base is usually what we get from ucode with each received frame,
3324  * the same as HW timer counter counting down
3325  */
3326
3327 static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
3328 {
3329         u32 base_low = base & BEACON_TIME_MASK_LOW;
3330         u32 addon_low = addon & BEACON_TIME_MASK_LOW;
3331         u32 interval = beacon_interval * TIME_UNIT;
3332         u32 res = (base & BEACON_TIME_MASK_HIGH) +
3333             (addon & BEACON_TIME_MASK_HIGH);
3334
3335         if (base_low > addon_low)
3336                 res += base_low - addon_low;
3337         else if (base_low < addon_low) {
3338                 res += interval + base_low - addon_low;
3339                 res += (1 << 24);
3340         } else
3341                 res += (1 << 24);
3342
3343         return cpu_to_le32(res);
3344 }
3345
3346 static int iwl_get_measurement(struct iwl_priv *priv,
3347                                struct ieee80211_measurement_params *params,
3348                                u8 type)
3349 {
3350         struct iwl_spectrum_cmd spectrum;
3351         struct iwl_rx_packet *res;
3352         struct iwl_host_cmd cmd = {
3353                 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3354                 .data = (void *)&spectrum,
3355                 .meta.flags = CMD_WANT_SKB,
3356         };
3357         u32 add_time = le64_to_cpu(params->start_time);
3358         int rc;
3359         int spectrum_resp_status;
3360         int duration = le16_to_cpu(params->duration);
3361
3362         if (iwl_is_associated(priv))
3363                 add_time =
3364                     iwl_usecs_to_beacons(
3365                         le64_to_cpu(params->start_time) - priv->last_tsf,
3366                         le16_to_cpu(priv->rxon_timing.beacon_interval));
3367
3368         memset(&spectrum, 0, sizeof(spectrum));
3369
3370         spectrum.channel_count = cpu_to_le16(1);
3371         spectrum.flags =
3372             RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
3373         spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
3374         cmd.len = sizeof(spectrum);
3375         spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3376
3377         if (iwl_is_associated(priv))
3378                 spectrum.start_time =
3379                     iwl_add_beacon_time(priv->last_beacon_time,
3380                                 add_time,
3381                                 le16_to_cpu(priv->rxon_timing.beacon_interval));
3382         else
3383                 spectrum.start_time = 0;
3384
3385         spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
3386         spectrum.channels[0].channel = params->channel;
3387         spectrum.channels[0].type = type;
3388         if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
3389                 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3390                     RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3391
3392         rc = iwl_send_cmd_sync(priv, &cmd);
3393         if (rc)
3394                 return rc;
3395
3396         res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
3397         if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
3398                 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
3399                 rc = -EIO;
3400         }
3401
3402         spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
3403         switch (spectrum_resp_status) {
3404         case 0:         /* Command will be handled */
3405                 if (res->u.spectrum.id != 0xff) {
3406                         IWL_DEBUG_INFO
3407                             ("Replaced existing measurement: %d\n",
3408                              res->u.spectrum.id);
3409                         priv->measurement_status &= ~MEASUREMENT_READY;
3410                 }
3411                 priv->measurement_status |= MEASUREMENT_ACTIVE;
3412                 rc = 0;
3413                 break;
3414
3415         case 1:         /* Command will not be handled */
3416                 rc = -EAGAIN;
3417                 break;
3418         }
3419
3420         dev_kfree_skb_any(cmd.meta.u.skb);
3421
3422         return rc;
3423 }
3424 #endif
3425
3426 static void iwl_txstatus_to_ieee(struct iwl_priv *priv,
3427                                  struct iwl_tx_info *tx_sta)
3428 {
3429
3430         tx_sta->status.ack_signal = 0;
3431         tx_sta->status.excessive_retries = 0;
3432         tx_sta->status.queue_length = 0;
3433         tx_sta->status.queue_number = 0;
3434
3435         if (in_interrupt())
3436                 ieee80211_tx_status_irqsafe(priv->hw,
3437                                             tx_sta->skb[0], &(tx_sta->status));
3438         else
3439                 ieee80211_tx_status(priv->hw,
3440                                     tx_sta->skb[0], &(tx_sta->status));
3441
3442         tx_sta->skb[0] = NULL;
3443 }
3444
3445 /**
3446  * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC.
3447  *
3448  * When FW advances 'R' index, all entries between old and
3449  * new 'R' index need to be reclaimed. As result, some free space
3450  * forms. If there is enough free space (> low mark), wake Tx queue.
3451  */
3452 int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
3453 {
3454         struct iwl_tx_queue *txq = &priv->txq[txq_id];
3455         struct iwl_queue *q = &txq->q;
3456         int nfreed = 0;
3457
3458         if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3459                 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3460                           "is out of range [0-%d] %d %d.\n", txq_id,
3461                           index, q->n_bd, q->first_empty, q->last_used);
3462                 return 0;
3463         }
3464
3465         for (index = iwl_queue_inc_wrap(index, q->n_bd);
3466                 q->last_used != index;
3467                 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) {
3468                 if (txq_id != IWL_CMD_QUEUE_NUM) {
3469                         iwl_txstatus_to_ieee(priv,
3470                                         &(txq->txb[txq->q.last_used]));
3471                         iwl_hw_txq_free_tfd(priv, txq);
3472                 } else if (nfreed > 1) {
3473                         IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3474                                         q->first_empty, q->last_used);
3475                         queue_work(priv->workqueue, &priv->restart);
3476                 }
3477                 nfreed++;
3478         }
3479
3480         if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3481                         (txq_id != IWL_CMD_QUEUE_NUM) &&
3482                         priv->mac80211_registered)
3483                 ieee80211_wake_queue(priv->hw, txq_id);
3484
3485
3486         return nfreed;
3487 }
3488
3489 static int iwl_is_tx_success(u32 status)
3490 {
3491         status &= TX_STATUS_MSK;
3492         return (status == TX_STATUS_SUCCESS)
3493             || (status == TX_STATUS_DIRECT_DONE);
3494 }
3495
3496 /******************************************************************************
3497  *
3498  * Generic RX handler implementations
3499  *
3500  ******************************************************************************/
3501 #ifdef CONFIG_IWLWIFI_HT
3502 #ifdef CONFIG_IWLWIFI_HT_AGG
3503
3504 static inline int iwl_get_ra_sta_id(struct iwl_priv *priv,
3505                                     struct ieee80211_hdr *hdr)
3506 {
3507         if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
3508                 return IWL_AP_ID;
3509         else {
3510                 u8 *da = ieee80211_get_DA(hdr);
3511                 return iwl_hw_find_station(priv, da);
3512         }
3513 }
3514
3515 static struct ieee80211_hdr *iwl_tx_queue_get_hdr(
3516         struct iwl_priv *priv, int txq_id, int idx)
3517 {
3518         if (priv->txq[txq_id].txb[idx].skb[0])
3519                 return (struct ieee80211_hdr *)priv->txq[txq_id].
3520                                 txb[idx].skb[0]->data;
3521         return NULL;
3522 }
3523
3524 static inline u32 iwl_get_scd_ssn(struct iwl_tx_resp *tx_resp)
3525 {
3526         __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status +
3527                                 tx_resp->frame_count);
3528         return le32_to_cpu(*scd_ssn) & MAX_SN;
3529
3530 }
3531 static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
3532                                       struct iwl_ht_agg *agg,
3533                                       struct iwl_tx_resp *tx_resp,
3534                                       u16 start_idx)
3535 {
3536         u32 status;
3537         __le32 *frame_status = &tx_resp->status;
3538         struct ieee80211_tx_status *tx_status = NULL;
3539         struct ieee80211_hdr *hdr = NULL;
3540         int i, sh;
3541         int txq_id, idx;
3542         u16 seq;
3543
3544         if (agg->wait_for_ba)
3545                 IWL_DEBUG_TX_REPLY("got tx repsons w/o back\n");
3546
3547         agg->frame_count = tx_resp->frame_count;
3548         agg->start_idx = start_idx;
3549         agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3550         agg->bitmap0 = agg->bitmap1 = 0;
3551
3552         if (agg->frame_count == 1) {
3553                 struct iwl_tx_queue *txq ;
3554                 status = le32_to_cpu(frame_status[0]);
3555
3556                 txq_id = agg->txq_id;
3557                 txq = &priv->txq[txq_id];
3558                 /* FIXME: code repetition */
3559                 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n",
3560                                    agg->frame_count, agg->start_idx);
3561
3562                 tx_status = &(priv->txq[txq_id].txb[txq->q.last_used].status);
3563                 tx_status->retry_count = tx_resp->failure_frame;
3564                 tx_status->queue_number = status & 0xff;
3565                 tx_status->queue_length = tx_resp->bt_kill_count;
3566                 tx_status->queue_length |= tx_resp->failure_rts;
3567
3568                 tx_status->flags = iwl_is_tx_success(status)?
3569                         IEEE80211_TX_STATUS_ACK : 0;
3570                 tx_status->control.tx_rate =
3571                                 iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags);
3572                 /* FIXME: code repetition end */
3573
3574                 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
3575                                     status & 0xff, tx_resp->failure_frame);
3576                 IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n",
3577                                 iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags));
3578
3579                 agg->wait_for_ba = 0;
3580         } else {
3581                 u64 bitmap = 0;
3582                 int start = agg->start_idx;
3583
3584                 for (i = 0; i < agg->frame_count; i++) {
3585                         u16 sc;
3586                         status = le32_to_cpu(frame_status[i]);
3587                         seq  = status >> 16;
3588                         idx = SEQ_TO_INDEX(seq);
3589                         txq_id = SEQ_TO_QUEUE(seq);
3590
3591                         if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
3592                                       AGG_TX_STATE_ABORT_MSK))
3593                                 continue;
3594
3595                         IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
3596                                            agg->frame_count, txq_id, idx);
3597
3598                         hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
3599
3600                         sc = le16_to_cpu(hdr->seq_ctrl);
3601                         if (idx != (SEQ_TO_SN(sc) & 0xff)) {
3602                                 IWL_ERROR("BUG_ON idx doesn't match seq control"
3603                                           " idx=%d, seq_idx=%d, seq=%d\n",
3604                                           idx, SEQ_TO_SN(sc),
3605                                           hdr->seq_ctrl);
3606                                 return -1;
3607                         }
3608
3609                         IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n",
3610                                            i, idx, SEQ_TO_SN(sc));
3611
3612                         sh = idx - start;
3613                         if (sh > 64) {
3614                                 sh = (start - idx) + 0xff;
3615                                 bitmap = bitmap << sh;
3616                                 sh = 0;
3617                                 start = idx;
3618                         } else if (sh < -64)
3619                                 sh  = 0xff - (start - idx);
3620                         else if (sh < 0) {
3621                                 sh = start - idx;
3622                                 start = idx;
3623                                 bitmap = bitmap << sh;
3624                                 sh = 0;
3625                         }
3626                         bitmap |= (1 << sh);
3627                         IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n",
3628                                            start, (u32)(bitmap & 0xFFFFFFFF));
3629                 }
3630
3631                 agg->bitmap0 = bitmap & 0xFFFFFFFF;
3632                 agg->bitmap1 = bitmap >> 32;
3633                 agg->start_idx = start;
3634                 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3635                 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n",
3636                                    agg->frame_count, agg->start_idx,
3637                                    agg->bitmap0);
3638
3639                 if (bitmap)
3640                         agg->wait_for_ba = 1;
3641         }
3642         return 0;
3643 }
3644 #endif
3645 #endif
3646
3647 static void iwl_rx_reply_tx(struct iwl_priv *priv,
3648                             struct iwl_rx_mem_buffer *rxb)
3649 {
3650         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3651         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3652         int txq_id = SEQ_TO_QUEUE(sequence);
3653         int index = SEQ_TO_INDEX(sequence);
3654         struct iwl_tx_queue *txq = &priv->txq[txq_id];
3655         struct ieee80211_tx_status *tx_status;
3656         struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3657         u32  status = le32_to_cpu(tx_resp->status);
3658 #ifdef CONFIG_IWLWIFI_HT
3659 #ifdef CONFIG_IWLWIFI_HT_AGG
3660         int tid, sta_id;
3661 #endif
3662 #endif
3663
3664         if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3665                 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3666                           "is out of range [0-%d] %d %d\n", txq_id,
3667                           index, txq->q.n_bd, txq->q.first_empty,
3668                           txq->q.last_used);
3669                 return;
3670         }
3671
3672 #ifdef CONFIG_IWLWIFI_HT
3673 #ifdef CONFIG_IWLWIFI_HT_AGG
3674         if (txq->sched_retry) {
3675                 const u32 scd_ssn = iwl_get_scd_ssn(tx_resp);
3676                 struct ieee80211_hdr *hdr =
3677                         iwl_tx_queue_get_hdr(priv, txq_id, index);
3678                 struct iwl_ht_agg *agg = NULL;
3679                 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3680
3681                 if (qc == NULL) {
3682                         IWL_ERROR("BUG_ON qc is null!!!!\n");
3683                         return;
3684                 }
3685
3686                 tid = le16_to_cpu(*qc) & 0xf;
3687
3688                 sta_id = iwl_get_ra_sta_id(priv, hdr);
3689                 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3690                         IWL_ERROR("Station not known for\n");
3691                         return;
3692                 }
3693
3694                 agg = &priv->stations[sta_id].tid[tid].agg;
3695
3696                 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index);
3697
3698                 if ((tx_resp->frame_count == 1) &&
3699                     !iwl_is_tx_success(status)) {
3700                         /* TODO: send BAR */
3701                 }
3702
3703                 if ((txq->q.last_used != (scd_ssn & 0xff))) {
3704                         index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
3705                         IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3706                                            "%d index %d\n", scd_ssn , index);
3707                         iwl_tx_queue_reclaim(priv, txq_id, index);
3708                 }
3709         } else {
3710 #endif /* CONFIG_IWLWIFI_HT_AGG */
3711 #endif /* CONFIG_IWLWIFI_HT */
3712         tx_status = &(txq->txb[txq->q.last_used].status);
3713
3714         tx_status->retry_count = tx_resp->failure_frame;
3715         tx_status->queue_number = status;
3716         tx_status->queue_length = tx_resp->bt_kill_count;
3717         tx_status->queue_length |= tx_resp->failure_rts;
3718
3719         tx_status->flags =
3720             iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3721
3722         tx_status->control.tx_rate =
3723                 iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags);
3724
3725         IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
3726                      "retries %d\n", txq_id, iwl_get_tx_fail_reason(status),
3727                      status, le32_to_cpu(tx_resp->rate_n_flags),
3728                      tx_resp->failure_frame);
3729
3730         IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3731         if (index != -1)
3732                 iwl_tx_queue_reclaim(priv, txq_id, index);
3733 #ifdef CONFIG_IWLWIFI_HT
3734 #ifdef CONFIG_IWLWIFI_HT_AGG
3735         }
3736 #endif /* CONFIG_IWLWIFI_HT_AGG */
3737 #endif /* CONFIG_IWLWIFI_HT */
3738
3739         if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3740                 IWL_ERROR("TODO:  Implement Tx ABORT REQUIRED!!!\n");
3741 }
3742
3743
3744 static void iwl_rx_reply_alive(struct iwl_priv *priv,
3745                                struct iwl_rx_mem_buffer *rxb)
3746 {
3747         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3748         struct iwl_alive_resp *palive;
3749         struct delayed_work *pwork;
3750
3751         palive = &pkt->u.alive_frame;
3752
3753         IWL_DEBUG_INFO("Alive ucode status 0x%08X revision "
3754                        "0x%01X 0x%01X\n",
3755                        palive->is_valid, palive->ver_type,
3756                        palive->ver_subtype);
3757
3758         if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
3759                 IWL_DEBUG_INFO("Initialization Alive received.\n");
3760                 memcpy(&priv->card_alive_init,
3761                        &pkt->u.alive_frame,
3762                        sizeof(struct iwl_init_alive_resp));
3763                 pwork = &priv->init_alive_start;
3764         } else {
3765                 IWL_DEBUG_INFO("Runtime Alive received.\n");
3766                 memcpy(&priv->card_alive, &pkt->u.alive_frame,
3767                        sizeof(struct iwl_alive_resp));
3768                 pwork = &priv->alive_start;
3769         }
3770
3771         /* We delay the ALIVE response by 5ms to
3772          * give the HW RF Kill time to activate... */
3773         if (palive->is_valid == UCODE_VALID_OK)
3774                 queue_delayed_work(priv->workqueue, pwork,
3775                                    msecs_to_jiffies(5));
3776         else
3777                 IWL_WARNING("uCode did not respond OK.\n");
3778 }
3779
3780 static void iwl_rx_reply_add_sta(struct iwl_priv *priv,
3781                                  struct iwl_rx_mem_buffer *rxb)
3782 {
3783         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3784
3785         IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
3786         return;
3787 }
3788
3789 static void iwl_rx_reply_error(struct iwl_priv *priv,
3790                                struct iwl_rx_mem_buffer *rxb)
3791 {
3792         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3793
3794         IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) "
3795                 "seq 0x%04X ser 0x%08X\n",
3796                 le32_to_cpu(pkt->u.err_resp.error_type),
3797                 get_cmd_string(pkt->u.err_resp.cmd_id),
3798                 pkt->u.err_resp.cmd_id,
3799                 le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
3800                 le32_to_cpu(pkt->u.err_resp.error_info));
3801 }
3802
3803 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3804
3805 static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
3806 {
3807         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3808         struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
3809         struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
3810         IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
3811                       le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
3812         rxon->channel = csa->channel;
3813         priv->staging_rxon.channel = csa->channel;
3814 }
3815
3816 static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
3817                                           struct iwl_rx_mem_buffer *rxb)
3818 {
3819 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
3820         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3821         struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
3822
3823         if (!report->state) {
3824                 IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO,
3825                           "Spectrum Measure Notification: Start\n");
3826                 return;
3827         }
3828
3829         memcpy(&priv->measure_report, report, sizeof(*report));
3830         priv->measurement_status |= MEASUREMENT_READY;
3831 #endif
3832 }
3833
3834 static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
3835                                   struct iwl_rx_mem_buffer *rxb)
3836 {
3837 #ifdef CONFIG_IWLWIFI_DEBUG
3838         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3839         struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
3840         IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
3841                      sleep->pm_sleep_mode, sleep->pm_wakeup_src);
3842 #endif
3843 }
3844
3845 static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3846                                              struct iwl_rx_mem_buffer *rxb)
3847 {
3848         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3849         IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3850                         "notification for %s:\n",
3851                         le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3852         iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3853 }
3854
3855 static void iwl_bg_beacon_update(struct work_struct *work)
3856 {
3857         struct iwl_priv *priv =
3858                 container_of(work, struct iwl_priv, beacon_update);
3859         struct sk_buff *beacon;
3860
3861         /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
3862         beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL);
3863
3864         if (!beacon) {
3865                 IWL_ERROR("update beacon failed\n");
3866                 return;
3867         }
3868
3869         mutex_lock(&priv->mutex);
3870         /* new beacon skb is allocated every time; dispose previous.*/
3871         if (priv->ibss_beacon)
3872                 dev_kfree_skb(priv->ibss_beacon);
3873
3874         priv->ibss_beacon = beacon;
3875         mutex_unlock(&priv->mutex);
3876
3877         iwl_send_beacon_cmd(priv);
3878 }
3879
3880 static void iwl_rx_beacon_notif(struct iwl_priv *priv,
3881                                 struct iwl_rx_mem_buffer *rxb)
3882 {
3883 #ifdef CONFIG_IWLWIFI_DEBUG
3884         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3885         struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status);
3886         u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
3887
3888         IWL_DEBUG_RX("beacon status %x retries %d iss %d "
3889                 "tsf %d %d rate %d\n",
3890                 le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
3891                 beacon->beacon_notify_hdr.failure_frame,
3892                 le32_to_cpu(beacon->ibss_mgr_status),
3893                 le32_to_cpu(beacon->high_tsf),
3894                 le32_to_cpu(beacon->low_tsf), rate);
3895 #endif
3896
3897         if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
3898             (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3899                 queue_work(priv->workqueue, &priv->beacon_update);
3900 }
3901
3902 /* Service response to REPLY_SCAN_CMD (0x80) */
3903 static void iwl_rx_reply_scan(struct iwl_priv *priv,
3904                               struct iwl_rx_mem_buffer *rxb)
3905 {
3906 #ifdef CONFIG_IWLWIFI_DEBUG
3907         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3908         struct iwl_scanreq_notification *notif =
3909             (struct iwl_scanreq_notification *)pkt->u.raw;
3910
3911         IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status);
3912 #endif
3913 }
3914
3915 /* Service SCAN_START_NOTIFICATION (0x82) */
3916 static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
3917                                     struct iwl_rx_mem_buffer *rxb)
3918 {
3919         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3920         struct iwl_scanstart_notification *notif =
3921             (struct iwl_scanstart_notification *)pkt->u.raw;
3922         priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
3923         IWL_DEBUG_SCAN("Scan start: "
3924                        "%d [802.11%s] "
3925                        "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
3926                        notif->channel,
3927                        notif->band ? "bg" : "a",
3928                        notif->tsf_high,
3929                        notif->tsf_low, notif->status, notif->beacon_timer);
3930 }
3931
3932 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3933 static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
3934                                       struct iwl_rx_mem_buffer *rxb)
3935 {
3936         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3937         struct iwl_scanresults_notification *notif =
3938             (struct iwl_scanresults_notification *)pkt->u.raw;
3939
3940         IWL_DEBUG_SCAN("Scan ch.res: "
3941                        "%d [802.11%s] "
3942                        "(TSF: 0x%08X:%08X) - %d "
3943                        "elapsed=%lu usec (%dms since last)\n",
3944                        notif->channel,
3945                        notif->band ? "bg" : "a",
3946                        le32_to_cpu(notif->tsf_high),
3947                        le32_to_cpu(notif->tsf_low),
3948                        le32_to_cpu(notif->statistics[0]),
3949                        le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
3950                        jiffies_to_msecs(elapsed_jiffies
3951                                         (priv->last_scan_jiffies, jiffies)));
3952
3953         priv->last_scan_jiffies = jiffies;
3954 }
3955
3956 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
3957 static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
3958                                        struct iwl_rx_mem_buffer *rxb)
3959 {
3960         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
3961         struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
3962
3963         IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
3964                        scan_notif->scanned_channels,
3965                        scan_notif->tsf_low,
3966                        scan_notif->tsf_high, scan_notif->status);
3967
3968         /* The HW is no longer scanning */
3969         clear_bit(STATUS_SCAN_HW, &priv->status);
3970
3971         /* The scan completion notification came in, so kill that timer... */
3972         cancel_delayed_work(&priv->scan_check);
3973
3974         IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
3975                        (priv->scan_bands == 2) ? "2.4" : "5.2",
3976                        jiffies_to_msecs(elapsed_jiffies
3977                                         (priv->scan_pass_start, jiffies)));
3978
3979         /* Remove this scanned band from the list
3980          * of pending bands to scan */
3981         priv->scan_bands--;
3982
3983         /* If a request to abort was given, or the scan did not succeed
3984          * then we reset the scan state machine and terminate,
3985          * re-queuing another scan if one has been requested */
3986         if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
3987                 IWL_DEBUG_INFO("Aborted scan completed.\n");
3988                 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
3989         } else {
3990                 /* If there are more bands on this scan pass reschedule */
3991                 if (priv->scan_bands > 0)
3992                         goto reschedule;
3993         }
3994
3995         priv->last_scan_jiffies = jiffies;
3996         IWL_DEBUG_INFO("Setting scan to off\n");
3997
3998         clear_bit(STATUS_SCANNING, &priv->status);
3999
4000         IWL_DEBUG_INFO("Scan took %dms\n",
4001                 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
4002
4003         queue_work(priv->workqueue, &priv->scan_completed);
4004
4005         return;
4006
4007 reschedule:
4008         priv->scan_pass_start = jiffies;
4009         queue_work(priv->workqueue, &priv->request_scan);
4010 }
4011
4012 /* Handle notification from uCode that card's power state is changing
4013  * due to software, hardware, or critical temperature RFKILL */
4014 static void iwl_rx_card_state_notif(struct iwl_priv *priv,
4015                                     struct iwl_rx_mem_buffer *rxb)
4016 {
4017         struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
4018         u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
4019         unsigned long status = priv->status;
4020
4021         IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n",
4022                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
4023                           (flags & SW_CARD_DISABLED) ? "Kill" : "On");
4024
4025         if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4026                      RF_CARD_DISABLED)) {
4027
4028                 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
4029                             CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4030
4031                 if (!iwl_grab_restricted_access(priv)) {
4032                         iwl_write_restricted(
4033                                 priv, HBUS_TARG_MBX_C,
4034                                 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4035
4036                         iwl_release_restricted_access(priv);
4037                 }
4038
4039                 if (!(flags & RXON_CARD_DISABLED)) {
4040                         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
4041                                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4042                         if (!iwl_grab_restricted_access(priv)) {
4043                                 iwl_write_restricted(
4044                                         priv, HBUS_TARG_MBX_C,
4045                                         HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4046
4047                                 iwl_release_restricted_access(priv);
4048                         }
4049                 }
4050
4051                 if (flags & RF_CARD_DISABLED) {
4052                         iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
4053                                     CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4054                         iwl_read32(priv, CSR_UCODE_DRV_GP1);
4055                         if (!iwl_grab_restricted_access(priv))
4056                                 iwl_release_restricted_access(priv);
4057                 }
4058         }
4059
4060         if (flags & HW_CARD_DISABLED)
4061                 set_bit(STATUS_RF_KILL_HW, &priv->status);
4062         else
4063                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4064
4065
4066         if (flags & SW_CARD_DISABLED)
4067                 set_bit(STATUS_RF_KILL_SW, &priv->status);
4068         else
4069                 clear_bit(STATUS_RF_KILL_SW, &priv->status);
4070
4071         if (!(flags & RXON_CARD_DISABLED))
4072                 iwl_scan_cancel(priv);
4073
4074         if ((test_bit(STATUS_RF_KILL_HW, &status) !=
4075              test_bit(STATUS_RF_KILL_HW, &priv->status)) ||
4076             (test_bit(STATUS_RF_KILL_SW, &status) !=
4077              test_bit(STATUS_RF_KILL_SW, &priv->status)))
4078                 queue_work(priv->workqueue, &priv->rf_kill);
4079         else
4080                 wake_up_interruptible(&priv->wait_command_queue);
4081 }
4082
4083 /**
4084  * iwl_setup_rx_handlers - Initialize Rx handler callbacks
4085  *
4086  * Setup the RX handlers for each of the reply types sent from the uCode
4087  * to the host.
4088  *
4089  * This function chains into the hardware specific files for them to setup
4090  * any hardware specific handlers as well.
4091  */
4092 static void iwl_setup_rx_handlers(struct iwl_priv *priv)
4093 {
4094         priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
4095         priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta;
4096         priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
4097         priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
4098         priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
4099             iwl_rx_spectrum_measure_notif;
4100         priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
4101         priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
4102             iwl_rx_pm_debug_statistics_notif;
4103         priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
4104
4105         /* NOTE:  iwl_rx_statistics is different based on whether
4106          * the build is for the 3945 or the 4965.  See the
4107          * corresponding implementation in iwl-XXXX.c
4108          *
4109          * The same handler is used for both the REPLY to a
4110          * discrete statistics request from the host as well as
4111          * for the periodic statistics notification from the uCode
4112          */
4113         priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics;
4114         priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics;
4115
4116         priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
4117         priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
4118         priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
4119             iwl_rx_scan_results_notif;
4120         priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
4121             iwl_rx_scan_complete_notif;
4122         priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
4123         priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx;
4124
4125         /* Setup hardware specific Rx handlers */
4126         iwl_hw_rx_handler_setup(priv);
4127 }
4128
4129 /**
4130  * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
4131  * @rxb: Rx buffer to reclaim
4132  *
4133  * If an Rx buffer has an async callback associated with it the callback
4134  * will be executed.  The attached skb (if present) will only be freed
4135  * if the callback returns 1
4136  */
4137 static void iwl_tx_cmd_complete(struct iwl_priv *priv,
4138                                 struct iwl_rx_mem_buffer *rxb)
4139 {
4140         struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
4141         u16 sequence = le16_to_cpu(pkt->hdr.sequence);
4142         int txq_id = SEQ_TO_QUEUE(sequence);
4143         int index = SEQ_TO_INDEX(sequence);
4144         int huge = sequence & SEQ_HUGE_FRAME;
4145         int cmd_index;
4146         struct iwl_cmd *cmd;
4147
4148         /* If a Tx command is being handled and it isn't in the actual
4149          * command queue then there a command routing bug has been introduced
4150          * in the queue management code. */
4151         if (txq_id != IWL_CMD_QUEUE_NUM)
4152                 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
4153                           txq_id, pkt->hdr.cmd);
4154         BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
4155
4156         cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
4157         cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
4158
4159         /* Input error checking is done when commands are added to queue. */
4160         if (cmd->meta.flags & CMD_WANT_SKB) {
4161                 cmd->meta.source->u.skb = rxb->skb;
4162                 rxb->skb = NULL;
4163         } else if (cmd->meta.u.callback &&
4164                    !cmd->meta.u.callback(priv, cmd, rxb->skb))
4165                 rxb->skb = NULL;
4166
4167         iwl_tx_queue_reclaim(priv, txq_id, index);
4168
4169         if (!(cmd->meta.flags & CMD_ASYNC)) {
4170                 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4171                 wake_up_interruptible(&priv->wait_command_queue);
4172         }
4173 }
4174
4175 /************************** RX-FUNCTIONS ****************************/
4176 /*
4177  * Rx theory of operation
4178  *
4179  * The host allocates 32 DMA target addresses and passes the host address
4180  * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
4181  * 0 to 31
4182  *
4183  * Rx Queue Indexes
4184  * The host/firmware share two index registers for managing the Rx buffers.
4185  *
4186  * The READ index maps to the first position that the firmware may be writing
4187  * to -- the driver can read up to (but not including) this position and get
4188  * good data.
4189  * The READ index is managed by the firmware once the card is enabled.
4190  *
4191  * The WRITE index maps to the last position the driver has read from -- the
4192  * position preceding WRITE is the last slot the firmware can place a packet.
4193  *
4194  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4195  * WRITE = READ.
4196  *
4197  * During initialization the host sets up the READ queue position to the first
4198  * INDEX position, and WRITE to the last (READ - 1 wrapped)
4199  *
4200  * When the firmware places a packet in a buffer it will advance the READ index
4201  * and fire the RX interrupt.  The driver can then query the READ index and
4202  * process as many packets as possible, moving the WRITE index forward as it
4203  * resets the Rx queue buffers with new memory.
4204  *
4205  * The management in the driver is as follows:
4206  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
4207  *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
4208  *   to replensish the iwl->rxq->rx_free.
4209  * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
4210  *   iwl->rxq is replenished and the READ INDEX is updated (updating the
4211  *   'processed' and 'read' driver indexes as well)
4212  * + A received packet is processed and handed to the kernel network stack,
4213  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
4214  * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
4215  *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
4216  *   INDEX is not incremented and iwl->status(RX_STALLED) is set.  If there
4217  *   were enough free buffers and RX_STALLED is set it is cleared.
4218  *
4219  *
4220  * Driver sequence:
4221  *
4222  * iwl_rx_queue_alloc()       Allocates rx_free
4223  * iwl_rx_replenish()         Replenishes rx_free list from rx_used, and calls
4224  *                            iwl_rx_queue_restock
4225  * iwl_rx_queue_restock()     Moves available buffers from rx_free into Rx
4226  *                            queue, updates firmware pointers, and updates
4227  *                            the WRITE index.  If insufficient rx_free buffers
4228  *                            are available, schedules iwl_rx_replenish
4229  *
4230  * -- enable interrupts --
4231  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
4232  *                            READ INDEX, detaching the SKB from the pool.
4233  *                            Moves the packet buffer from queue to rx_used.
4234  *                            Calls iwl_rx_queue_restock to refill any empty
4235  *                            slots.
4236  * ...
4237  *
4238  */
4239
4240 /**
4241  * iwl_rx_queue_space - Return number of free slots available in queue.
4242  */
4243 static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
4244 {
4245         int s = q->read - q->write;
4246         if (s <= 0)
4247                 s += RX_QUEUE_SIZE;
4248         /* keep some buffer to not confuse full and empty queue */
4249         s -= 2;
4250         if (s < 0)
4251                 s = 0;
4252         return s;
4253 }
4254
4255 /**
4256  * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
4257  *
4258  * NOTE: This function has 3945 and 4965 specific code sections
4259  * but is declared in base due to the majority of the
4260  * implementation being the same (only a numeric constant is
4261  * different)
4262  *
4263  */
4264 int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
4265 {
4266         u32 reg = 0;
4267         int rc = 0;
4268         unsigned long flags;
4269
4270         spin_lock_irqsave(&q->lock, flags);
4271
4272         if (q->need_update == 0)
4273                 goto exit_unlock;
4274
4275         if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4276                 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4277
4278                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4279                         iwl_set_bit(priv, CSR_GP_CNTRL,
4280                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4281                         goto exit_unlock;
4282                 }
4283
4284                 rc = iwl_grab_restricted_access(priv);
4285                 if (rc)
4286                         goto exit_unlock;
4287
4288                 iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR,
4289                                      q->write & ~0x7);
4290                 iwl_release_restricted_access(priv);
4291         } else
4292                 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
4293
4294
4295         q->need_update = 0;
4296
4297  exit_unlock:
4298         spin_unlock_irqrestore(&q->lock, flags);
4299         return rc;
4300 }
4301
4302 /**
4303  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer.
4304  *
4305  * NOTE: This function has 3945 and 4965 specific code paths in it.
4306  */
4307 static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
4308                                           dma_addr_t dma_addr)
4309 {
4310         return cpu_to_le32((u32)(dma_addr >> 8));
4311 }
4312
4313
4314 /**
4315  * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
4316  *
4317  * If there are slots in the RX queue that  need to be restocked,
4318  * and we have free pre-allocated buffers, fill the ranks as much
4319  * as we can pulling from rx_free.
4320  *
4321  * This moves the 'write' index forward to catch up with 'processed', and
4322  * also updates the memory address in the firmware to reference the new
4323  * target buffer.
4324  */
4325 int iwl_rx_queue_restock(struct iwl_priv *priv)
4326 {
4327         struct iwl_rx_queue *rxq = &priv->rxq;
4328         struct list_head *element;
4329         struct iwl_rx_mem_buffer *rxb;
4330         unsigned long flags;
4331         int write, rc;
4332
4333         spin_lock_irqsave(&rxq->lock, flags);
4334         write = rxq->write & ~0x7;
4335         while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
4336                 element = rxq->rx_free.next;
4337                 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4338                 list_del(element);
4339                 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
4340                 rxq->queue[rxq->write] = rxb;
4341                 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
4342                 rxq->free_count--;
4343         }
4344         spin_unlock_irqrestore(&rxq->lock, flags);
4345         /* If the pre-allocated buffer pool is dropping low, schedule to
4346          * refill it */
4347         if (rxq->free_count <= RX_LOW_WATERMARK)
4348                 queue_work(priv->workqueue, &priv->rx_replenish);
4349
4350
4351         /* If we've added more space for the firmware to place data, tell it */
4352         if ((write != (rxq->write & ~0x7))
4353             || (abs(rxq->write - rxq->read) > 7)) {
4354                 spin_lock_irqsave(&rxq->lock, flags);
4355                 rxq->need_update = 1;
4356                 spin_unlock_irqrestore(&rxq->lock, flags);
4357                 rc = iwl_rx_queue_update_write_ptr(priv, rxq);
4358                 if (rc)
4359                         return rc;
4360         }
4361
4362         return 0;
4363 }
4364
4365 /**
4366  * iwl_rx_replensih - Move all used packet from rx_used to rx_free
4367  *
4368  * When moving to rx_free an SKB is allocated for the slot.
4369  *
4370  * Also restock the Rx queue via iwl_rx_queue_restock.
4371  * This is called as a scheduled work item (except for during intialization)
4372  */
4373 void iwl_rx_replenish(void *data)
4374 {
4375         struct iwl_priv *priv = data;
4376         struct iwl_rx_queue *rxq = &priv->rxq;
4377         struct list_head *element;
4378         struct iwl_rx_mem_buffer *rxb;
4379         unsigned long flags;
4380         spin_lock_irqsave(&rxq->lock, flags);
4381         while (!list_empty(&rxq->rx_used)) {
4382                 element = rxq->rx_used.next;
4383                 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
4384                 rxb->skb =
4385                     alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC);
4386                 if (!rxb->skb) {
4387                         if (net_ratelimit())
4388                                 printk(KERN_CRIT DRV_NAME
4389                                        ": Can not allocate SKB buffers\n");
4390                         /* We don't reschedule replenish work here -- we will
4391                          * call the restock method and if it still needs
4392                          * more buffers it will schedule replenish */
4393                         break;
4394                 }
4395                 priv->alloc_rxb_skb++;
4396                 list_del(element);
4397                 rxb->dma_addr =
4398                     pci_map_single(priv->pci_dev, rxb->skb->data,
4399                                    IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4400                 list_add_tail(&rxb->list, &rxq->rx_free);
4401                 rxq->free_count++;
4402         }
4403         spin_unlock_irqrestore(&rxq->lock, flags);
4404
4405         spin_lock_irqsave(&priv->lock, flags);
4406         iwl_rx_queue_restock(priv);
4407         spin_unlock_irqrestore(&priv->lock, flags);
4408 }
4409
4410 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
4411  * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
4412  * This free routine walks the list of POOL entries and if SKB is set to
4413  * non NULL it is unmapped and freed
4414  */
4415 void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4416 {
4417         int i;
4418         for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4419                 if (rxq->pool[i].skb != NULL) {
4420                         pci_unmap_single(priv->pci_dev,
4421                                          rxq->pool[i].dma_addr,
4422                                          IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4423                         dev_kfree_skb(rxq->pool[i].skb);
4424                 }
4425         }
4426
4427         pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
4428                             rxq->dma_addr);
4429         rxq->bd = NULL;
4430 }
4431
4432 int iwl_rx_queue_alloc(struct iwl_priv *priv)
4433 {
4434         struct iwl_rx_queue *rxq = &priv->rxq;
4435         struct pci_dev *dev = priv->pci_dev;
4436         int i;
4437
4438         spin_lock_init(&rxq->lock);
4439         INIT_LIST_HEAD(&rxq->rx_free);
4440         INIT_LIST_HEAD(&rxq->rx_used);
4441         rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
4442         if (!rxq->bd)
4443                 return -ENOMEM;
4444         /* Fill the rx_used queue with _all_ of the Rx buffers */
4445         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
4446                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4447         /* Set us so that we have processed and used all buffers, but have
4448          * not restocked the Rx queue with fresh buffers */
4449         rxq->read = rxq->write = 0;
4450         rxq->free_count = 0;
4451         rxq->need_update = 0;
4452         return 0;
4453 }
4454
4455 void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
4456 {
4457         unsigned long flags;
4458         int i;
4459         spin_lock_irqsave(&rxq->lock, flags);
4460         INIT_LIST_HEAD(&rxq->rx_free);
4461         INIT_LIST_HEAD(&rxq->rx_used);
4462         /* Fill the rx_used queue with _all_ of the Rx buffers */
4463         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
4464                 /* In the reset function, these buffers may have been allocated
4465                  * to an SKB, so we need to unmap and free potential storage */
4466                 if (rxq->pool[i].skb != NULL) {
4467                         pci_unmap_single(priv->pci_dev,
4468                                          rxq->pool[i].dma_addr,
4469                                          IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4470                         priv->alloc_rxb_skb--;
4471                         dev_kfree_skb(rxq->pool[i].skb);
4472                         rxq->pool[i].skb = NULL;
4473                 }
4474                 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
4475         }
4476
4477         /* Set us so that we have processed and used all buffers, but have
4478          * not restocked the Rx queue with fresh buffers */
4479         rxq->read = rxq->write = 0;
4480         rxq->free_count = 0;
4481         spin_unlock_irqrestore(&rxq->lock, flags);
4482 }
4483
4484 /* Convert linear signal-to-noise ratio into dB */
4485 static u8 ratio2dB[100] = {
4486 /*       0   1   2   3   4   5   6   7   8   9 */
4487          0,  0,  6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
4488         20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
4489         26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
4490         29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
4491         32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
4492         34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
4493         36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
4494         37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
4495         38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
4496         39, 39, 39, 39, 39, 40, 40, 40, 40, 40  /* 90 - 99 */
4497 };
4498
4499 /* Calculates a relative dB value from a ratio of linear
4500  *   (i.e. not dB) signal levels.
4501  * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
4502 int iwl_calc_db_from_ratio(int sig_ratio)
4503 {
4504         /* Anything above 1000:1 just report as 60 dB */
4505         if (sig_ratio > 1000)
4506                 return 60;
4507
4508         /* Above 100:1, divide by 10 and use table,
4509          *   add 20 dB to make up for divide by 10 */
4510         if (sig_ratio > 100)
4511                 return (20 + (int)ratio2dB[sig_ratio/10]);
4512
4513         /* We shouldn't see this */
4514         if (sig_ratio < 1)
4515                 return 0;
4516
4517         /* Use table for ratios 1:1 - 99:1 */
4518         return (int)ratio2dB[sig_ratio];
4519 }
4520
4521 #define PERFECT_RSSI (-20) /* dBm */
4522 #define WORST_RSSI (-95)   /* dBm */
4523 #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
4524
4525 /* Calculate an indication of rx signal quality (a percentage, not dBm!).
4526  * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
4527  *   about formulas used below. */
4528 int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
4529 {
4530         int sig_qual;
4531         int degradation = PERFECT_RSSI - rssi_dbm;
4532
4533         /* If we get a noise measurement, use signal-to-noise ratio (SNR)
4534          * as indicator; formula is (signal dbm - noise dbm).
4535          * SNR at or above 40 is a great signal (100%).
4536          * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
4537          * Weakest usable signal is usually 10 - 15 dB SNR. */
4538         if (noise_dbm) {
4539                 if (rssi_dbm - noise_dbm >= 40)
4540                         return 100;
4541                 else if (rssi_dbm < noise_dbm)
4542                         return 0;
4543                 sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
4544
4545         /* Else use just the signal level.
4546          * This formula is a least squares fit of data points collected and
4547          *   compared with a reference system that had a percentage (%) display
4548          *   for signal quality. */
4549         } else
4550                 sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
4551                             (15 * RSSI_RANGE + 62 * degradation)) /
4552                            (RSSI_RANGE * RSSI_RANGE);
4553
4554         if (sig_qual > 100)
4555                 sig_qual = 100;
4556         else if (sig_qual < 1)
4557                 sig_qual = 0;
4558
4559         return sig_qual;
4560 }
4561
4562 /**
4563  * iwl_rx_handle - Main entry function for receiving responses from the uCode
4564  *
4565  * Uses the priv->rx_handlers callback function array to invoke
4566  * the appropriate handlers, including command responses,
4567  * frame-received notifications, and other notifications.
4568  */
4569 static void iwl_rx_handle(struct iwl_priv *priv)
4570 {
4571         struct iwl_rx_mem_buffer *rxb;
4572         struct iwl_rx_packet *pkt;
4573         struct iwl_rx_queue *rxq = &priv->rxq;
4574         u32 r, i;
4575         int reclaim;
4576         unsigned long flags;
4577
4578         r = iwl_hw_get_rx_read(priv);
4579         i = rxq->read;
4580
4581         /* Rx interrupt, but nothing sent from uCode */
4582         if (i == r)
4583                 IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i);
4584
4585         while (i != r) {
4586                 rxb = rxq->queue[i];
4587
4588                 /* If an RXB doesn't have a queue slot associated with it
4589                  * then a bug has been introduced in the queue refilling
4590                  * routines -- catch it here */
4591                 BUG_ON(rxb == NULL);
4592
4593                 rxq->queue[i] = NULL;
4594
4595                 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4596                                             IWL_RX_BUF_SIZE,
4597                                             PCI_DMA_FROMDEVICE);
4598                 pkt = (struct iwl_rx_packet *)rxb->skb->data;
4599
4600                 /* Reclaim a command buffer only if this packet is a response
4601                  *   to a (driver-originated) command.
4602                  * If the packet (e.g. Rx frame) originated from uCode,
4603                  *   there is no command buffer to reclaim.
4604                  * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
4605                  *   but apparently a few don't get set; catch them here. */
4606                 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4607                         (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4608                         (pkt->hdr.cmd != REPLY_4965_RX) &&
4609                         (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4610                         (pkt->hdr.cmd != REPLY_TX);
4611
4612                 /* Based on type of command response or notification,
4613                  *   handle those that need handling via function in
4614                  *   rx_handlers table.  See iwl_setup_rx_handlers() */
4615                 if (priv->rx_handlers[pkt->hdr.cmd]) {
4616                         IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4617                                 "r = %d, i = %d, %s, 0x%02x\n", r, i,
4618                                 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
4619                         priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
4620                 } else {
4621                         /* No handling needed */
4622                         IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR,
4623                                 "r %d i %d No handler needed for %s, 0x%02x\n",
4624                                 r, i, get_cmd_string(pkt->hdr.cmd),
4625                                 pkt->hdr.cmd);
4626                 }
4627
4628                 if (reclaim) {
4629                         /* Invoke any callbacks, transfer the skb to caller,
4630                          * and fire off the (possibly) blocking iwl_send_cmd()
4631                          * as we reclaim the driver command queue */
4632                         if (rxb && rxb->skb)
4633                                 iwl_tx_cmd_complete(priv, rxb);
4634                         else
4635                                 IWL_WARNING("Claim null rxb?\n");
4636                 }
4637
4638                 /* For now we just don't re-use anything.  We can tweak this
4639                  * later to try and re-use notification packets and SKBs that
4640                  * fail to Rx correctly */
4641                 if (rxb->skb != NULL) {
4642                         priv->alloc_rxb_skb--;
4643                         dev_kfree_skb_any(rxb->skb);
4644                         rxb->skb = NULL;
4645                 }
4646
4647                 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4648                                  IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
4649                 spin_lock_irqsave(&rxq->lock, flags);
4650                 list_add_tail(&rxb->list, &priv->rxq.rx_used);
4651                 spin_unlock_irqrestore(&rxq->lock, flags);
4652                 i = (i + 1) & RX_QUEUE_MASK;
4653         }
4654
4655         /* Backtrack one entry */
4656         priv->rxq.read = i;
4657         iwl_rx_queue_restock(priv);
4658 }
4659
4660 int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv,
4661                                   struct iwl_tx_queue *txq)
4662 {
4663         u32 reg = 0;
4664         int rc = 0;
4665         int txq_id = txq->q.id;
4666
4667         if (txq->need_update == 0)
4668                 return rc;
4669
4670         /* if we're trying to save power */
4671         if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4672                 /* wake up nic if it's powered down ...
4673                  * uCode will wake up, and interrupt us again, so next
4674                  * time we'll skip this part. */
4675                 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4676
4677                 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4678                         IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4679                         iwl_set_bit(priv, CSR_GP_CNTRL,
4680                                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4681                         return rc;
4682                 }
4683
4684                 /* restore this queue's parameters in nic hardware. */
4685                 rc = iwl_grab_restricted_access(priv);
4686                 if (rc)
4687                         return rc;
4688                 iwl_write_restricted(priv, HBUS_TARG_WRPTR,
4689                                      txq->q.first_empty | (txq_id << 8));
4690                 iwl_release_restricted_access(priv);
4691
4692         /* else not in power-save mode, uCode will never sleep when we're
4693          * trying to tx (during RFKILL, we're not trying to tx). */
4694         } else
4695                 iwl_write32(priv, HBUS_TARG_WRPTR,
4696                             txq->q.first_empty | (txq_id << 8));
4697
4698         txq->need_update = 0;
4699
4700         return rc;
4701 }
4702
4703 #ifdef CONFIG_IWLWIFI_DEBUG
4704 static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon)
4705 {
4706         IWL_DEBUG_RADIO("RX CONFIG:\n");
4707         iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4708         IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4709         IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4710         IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
4711                         le32_to_cpu(rxon->filter_flags));
4712         IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
4713         IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4714                         rxon->ofdm_basic_rates);
4715         IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4716         IWL_DEBUG_RADIO("u8[6] node_addr: " MAC_FMT "\n",
4717                         MAC_ARG(rxon->node_addr));
4718         IWL_DEBUG_RADIO("u8[6] bssid_addr: " MAC_FMT "\n",
4719                         MAC_ARG(rxon->bssid_addr));
4720         IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4721 }
4722 #endif
4723
4724 static void iwl_enable_interrupts(struct iwl_priv *priv)
4725 {
4726         IWL_DEBUG_ISR("Enabling interrupts\n");
4727         set_bit(STATUS_INT_ENABLED, &priv->status);
4728         iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4729 }
4730
4731 static inline void iwl_disable_interrupts(struct iwl_priv *priv)
4732 {
4733         clear_bit(STATUS_INT_ENABLED, &priv->status);
4734
4735         /* disable interrupts from uCode/NIC to host */
4736         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4737
4738         /* acknowledge/clear/reset any interrupts still pending
4739          * from uCode or flow handler (Rx/Tx DMA) */
4740         iwl_write32(priv, CSR_INT, 0xffffffff);
4741         iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4742         IWL_DEBUG_ISR("Disabled interrupts\n");
4743 }
4744
4745 static const char *desc_lookup(int i)
4746 {
4747         switch (i) {
4748         case 1:
4749                 return "FAIL";
4750         case 2:
4751                 return "BAD_PARAM";
4752         case 3:
4753                 return "BAD_CHECKSUM";
4754         case 4:
4755                 return "NMI_INTERRUPT";
4756         case 5:
4757                 return "SYSASSERT";
4758         case 6:
4759                 return "FATAL_ERROR";
4760         }
4761
4762         return "UNKNOWN";
4763 }
4764
4765 #define ERROR_START_OFFSET  (1 * sizeof(u32))
4766 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
4767
4768 static void iwl_dump_nic_error_log(struct iwl_priv *priv)
4769 {
4770         u32 data2, line;
4771         u32 desc, time, count, base, data1;
4772         u32 blink1, blink2, ilink1, ilink2;
4773         int rc;
4774
4775         base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4776
4777         if (!iwl_hw_valid_rtc_data_addr(base)) {
4778                 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4779                 return;
4780         }
4781
4782         rc = iwl_grab_restricted_access(priv);
4783         if (rc) {
4784                 IWL_WARNING("Can not read from adapter at this time.\n");
4785                 return;
4786         }
4787
4788         count = iwl_read_restricted_mem(priv, base);
4789
4790         if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4791                 IWL_ERROR("Start IWL Error Log Dump:\n");
4792                 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n",
4793                           priv->status, priv->config, count);
4794         }
4795
4796         desc = iwl_read_restricted_mem(priv, base + 1 * sizeof(u32));
4797         blink1 = iwl_read_restricted_mem(priv, base + 3 * sizeof(u32));
4798         blink2 = iwl_read_restricted_mem(priv, base + 4 * sizeof(u32));
4799         ilink1 = iwl_read_restricted_mem(priv, base + 5 * sizeof(u32));
4800         ilink2 = iwl_read_restricted_mem(priv, base + 6 * sizeof(u32));
4801         data1 = iwl_read_restricted_mem(priv, base + 7 * sizeof(u32));
4802         data2 = iwl_read_restricted_mem(priv, base + 8 * sizeof(u32));
4803         line = iwl_read_restricted_mem(priv, base + 9 * sizeof(u32));
4804         time = iwl_read_restricted_mem(priv, base + 11 * sizeof(u32));
4805
4806         IWL_ERROR("Desc               Time       "
4807                   "data1      data2      line\n");
4808         IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n",
4809                   desc_lookup(desc), desc, time, data1, data2, line);
4810         IWL_ERROR("blink1  blink2  ilink1  ilink2\n");
4811         IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4812                   ilink1, ilink2);
4813
4814         iwl_release_restricted_access(priv);
4815 }
4816
4817 #define EVENT_START_OFFSET  (4 * sizeof(u32))
4818
4819 /**
4820  * iwl_print_event_log - Dump error event log to syslog
4821  *
4822  * NOTE: Must be called with iwl_grab_restricted_access() already obtained!
4823  */
4824 static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
4825                                 u32 num_events, u32 mode)
4826 {
4827         u32 i;
4828         u32 base;       /* SRAM byte address of event log header */
4829         u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
4830         u32 ptr;        /* SRAM byte address of log data */
4831         u32 ev, time, data; /* event log data */
4832
4833         if (num_events == 0)
4834                 return;
4835
4836         base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4837
4838         if (mode == 0)
4839                 event_size = 2 * sizeof(u32);
4840         else
4841                 event_size = 3 * sizeof(u32);
4842
4843         ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
4844
4845         /* "time" is actually "data" for mode 0 (no timestamp).
4846          * place event id # at far right for easier visual parsing. */
4847         for (i = 0; i < num_events; i++) {
4848                 ev = iwl_read_restricted_mem(priv, ptr);
4849                 ptr += sizeof(u32);
4850                 time = iwl_read_restricted_mem(priv, ptr);
4851                 ptr += sizeof(u32);
4852                 if (mode == 0)
4853                         IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4854                 else {
4855                         data = iwl_read_restricted_mem(priv, ptr);
4856                         ptr += sizeof(u32);
4857                         IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4858                 }
4859         }
4860 }
4861
4862 static void iwl_dump_nic_event_log(struct iwl_priv *priv)
4863 {
4864         int rc;
4865         u32 base;       /* SRAM byte address of event log header */
4866         u32 capacity;   /* event log capacity in # entries */
4867         u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
4868         u32 num_wraps;  /* # times uCode wrapped to top of log */
4869         u32 next_entry; /* index of next entry to be written by uCode */
4870         u32 size;       /* # entries that we'll print */
4871
4872         base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4873         if (!iwl_hw_valid_rtc_data_addr(base)) {
4874                 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4875                 return;
4876         }
4877
4878         rc = iwl_grab_restricted_access(priv);
4879         if (rc) {
4880                 IWL_WARNING("Can not read from adapter at this time.\n");
4881                 return;
4882         }
4883
4884         /* event log header */
4885         capacity = iwl_read_restricted_mem(priv, base);
4886         mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32)));
4887         num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32)));
4888         next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32)));
4889
4890         size = num_wraps ? capacity : next_entry;
4891
4892         /* bail out if nothing in log */
4893         if (size == 0) {
4894                 IWL_ERROR("Start IPW Event Log Dump: nothing in log\n");
4895                 iwl_release_restricted_access(priv);
4896                 return;
4897         }
4898
4899         IWL_ERROR("Start IPW Event Log Dump: display count %d, wraps %d\n",
4900                   size, num_wraps);
4901
4902         /* if uCode has wrapped back to top of log, start at the oldest entry,
4903          * i.e the next one that uCode would fill. */
4904         if (num_wraps)
4905                 iwl_print_event_log(priv, next_entry,
4906                                     capacity - next_entry, mode);
4907
4908         /* (then/else) start at top of log */
4909         iwl_print_event_log(priv, 0, next_entry, mode);
4910
4911         iwl_release_restricted_access(priv);
4912 }
4913
4914 /**
4915  * iwl_irq_handle_error - called for HW or SW error interrupt from card
4916  */
4917 static void iwl_irq_handle_error(struct iwl_priv *priv)
4918 {
4919         /* Set the FW error flag -- cleared on iwl_down */
4920         set_bit(STATUS_FW_ERROR, &priv->status);
4921
4922         /* Cancel currently queued command. */
4923         clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
4924
4925 #ifdef CONFIG_IWLWIFI_DEBUG
4926         if (iwl_debug_level & IWL_DL_FW_ERRORS) {
4927                 iwl_dump_nic_error_log(priv);
4928                 iwl_dump_nic_event_log(priv);
4929                 iwl_print_rx_config_cmd(&priv->staging_rxon);
4930         }
4931 #endif
4932
4933         wake_up_interruptible(&priv->wait_command_queue);
4934
4935         /* Keep the restart process from trying to send host
4936          * commands by clearing the INIT status bit */
4937         clear_bit(STATUS_READY, &priv->status);
4938
4939         if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
4940                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
4941                           "Restarting adapter due to uCode error.\n");
4942
4943                 if (iwl_is_associated(priv)) {
4944                         memcpy(&priv->recovery_rxon, &priv->active_rxon,
4945                                sizeof(priv->recovery_rxon));
4946                         priv->error_recovering = 1;
4947                 }
4948                 queue_work(priv->workqueue, &priv->restart);
4949         }
4950 }
4951
4952 static void iwl_error_recovery(struct iwl_priv *priv)
4953 {
4954         unsigned long flags;
4955
4956         memcpy(&priv->staging_rxon, &priv->recovery_rxon,
4957                sizeof(priv->staging_rxon));
4958         priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
4959         iwl_commit_rxon(priv);
4960
4961         iwl_rxon_add_station(priv, priv->bssid, 1);
4962
4963         spin_lock_irqsave(&priv->lock, flags);
4964         priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id);
4965         priv->error_recovering = 0;
4966         spin_unlock_irqrestore(&priv->lock, flags);
4967 }
4968
4969 static void iwl_irq_tasklet(struct iwl_priv *priv)
4970 {
4971         u32 inta, handled = 0;
4972         u32 inta_fh;
4973         unsigned long flags;
4974 #ifdef CONFIG_IWLWIFI_DEBUG
4975         u32 inta_mask;
4976 #endif
4977
4978         spin_lock_irqsave(&priv->lock, flags);
4979
4980         /* Ack/clear/reset pending uCode interrupts.
4981          * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
4982          *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
4983         inta = iwl_read32(priv, CSR_INT);
4984         iwl_write32(priv, CSR_INT, inta);
4985
4986         /* Ack/clear/reset pending flow-handler (DMA) interrupts.
4987          * Any new interrupts that happen after this, either while we're
4988          * in this tasklet, or later, will show up in next ISR/tasklet. */
4989         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
4990         iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
4991
4992 #ifdef CONFIG_IWLWIFI_DEBUG
4993         if (iwl_debug_level & IWL_DL_ISR) {
4994                 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
4995                 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
4996                               inta, inta_mask, inta_fh);
4997         }
4998 #endif
4999
5000         /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
5001          * atomic, make sure that inta covers all the interrupts that
5002          * we've discovered, even if FH interrupt came in just after
5003          * reading CSR_INT. */
5004         if (inta_fh & CSR_FH_INT_RX_MASK)
5005                 inta |= CSR_INT_BIT_FH_RX;
5006         if (inta_fh & CSR_FH_INT_TX_MASK)
5007                 inta |= CSR_INT_BIT_FH_TX;
5008
5009         /* Now service all interrupt bits discovered above. */
5010         if (inta & CSR_INT_BIT_HW_ERR) {
5011                 IWL_ERROR("Microcode HW error detected.  Restarting.\n");
5012
5013                 /* Tell the device to stop sending interrupts */
5014                 iwl_disable_interrupts(priv);
5015
5016                 iwl_irq_handle_error(priv);
5017
5018                 handled |= CSR_INT_BIT_HW_ERR;
5019
5020                 spin_unlock_irqrestore(&priv->lock, flags);
5021
5022                 return;
5023         }
5024
5025 #ifdef CONFIG_IWLWIFI_DEBUG
5026         if (iwl_debug_level & (IWL_DL_ISR)) {
5027                 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5028                 if (inta & CSR_INT_BIT_MAC_CLK_ACTV)
5029                         IWL_DEBUG_ISR("Microcode started or stopped.\n");
5030
5031                 /* Alive notification via Rx interrupt will do the real work */
5032                 if (inta & CSR_INT_BIT_ALIVE)
5033                         IWL_DEBUG_ISR("Alive interrupt\n");
5034         }
5035 #endif
5036         /* Safely ignore these bits for debug checks below */
5037         inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE);
5038
5039         /* HW RF KILL switch toggled (4965 only) */
5040         if (inta & CSR_INT_BIT_RF_KILL) {
5041                 int hw_rf_kill = 0;
5042                 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
5043                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5044                         hw_rf_kill = 1;
5045
5046                 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
5047                                 "RF_KILL bit toggled to %s.\n",
5048                                 hw_rf_kill ? "disable radio":"enable radio");
5049
5050                 /* Queue restart only if RF_KILL switch was set to "kill"
5051                  *   when we loaded driver, and is now set to "enable".
5052                  * After we're Alive, RF_KILL gets handled by
5053                  *   iwl_rx_card_state_notif() */
5054                 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status))
5055                         queue_work(priv->workqueue, &priv->restart);
5056
5057                 handled |= CSR_INT_BIT_RF_KILL;
5058         }
5059
5060         /* Chip got too hot and stopped itself (4965 only) */
5061         if (inta & CSR_INT_BIT_CT_KILL) {
5062                 IWL_ERROR("Microcode CT kill error detected.\n");
5063                 handled |= CSR_INT_BIT_CT_KILL;
5064         }
5065
5066         /* Error detected by uCode */
5067         if (inta & CSR_INT_BIT_SW_ERR) {
5068                 IWL_ERROR("Microcode SW error detected.  Restarting 0x%X.\n",
5069                           inta);
5070                 iwl_irq_handle_error(priv);
5071                 handled |= CSR_INT_BIT_SW_ERR;
5072         }
5073
5074         /* uCode wakes up after power-down sleep */
5075         if (inta & CSR_INT_BIT_WAKEUP) {
5076                 IWL_DEBUG_ISR("Wakeup interrupt\n");
5077                 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
5078                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]);
5079                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]);
5080                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]);
5081                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]);
5082                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]);
5083                 iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]);
5084
5085                 handled |= CSR_INT_BIT_WAKEUP;
5086         }
5087
5088         /* All uCode command responses, including Tx command responses,
5089          * Rx "responses" (frame-received notification), and other
5090          * notifications from uCode come through here*/
5091         if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
5092                 iwl_rx_handle(priv);
5093                 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
5094         }
5095
5096         if (inta & CSR_INT_BIT_FH_TX) {
5097                 IWL_DEBUG_ISR("Tx interrupt\n");
5098                 handled |= CSR_INT_BIT_FH_TX;
5099         }
5100
5101         if (inta & ~handled)
5102                 IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
5103
5104         if (inta & ~CSR_INI_SET_MASK) {
5105                 IWL_WARNING("Disabled INTA bits 0x%08x were pending\n",
5106                          inta & ~CSR_INI_SET_MASK);
5107                 IWL_WARNING("   with FH_INT = 0x%08x\n", inta_fh);
5108         }
5109
5110         /* Re-enable all interrupts */
5111         iwl_enable_interrupts(priv);
5112
5113 #ifdef CONFIG_IWLWIFI_DEBUG
5114         if (iwl_debug_level & (IWL_DL_ISR)) {
5115                 inta = iwl_read32(priv, CSR_INT);
5116                 inta_mask = iwl_read32(priv, CSR_INT_MASK);
5117                 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5118                 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5119                         "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5120         }
5121 #endif
5122         spin_unlock_irqrestore(&priv->lock, flags);
5123 }
5124
5125 static irqreturn_t iwl_isr(int irq, void *data)
5126 {
5127         struct iwl_priv *priv = data;
5128         u32 inta, inta_mask;
5129         u32 inta_fh;
5130         if (!priv)
5131                 return IRQ_NONE;
5132
5133         spin_lock(&priv->lock);
5134
5135         /* Disable (but don't clear!) interrupts here to avoid
5136          *    back-to-back ISRs and sporadic interrupts from our NIC.
5137          * If we have something to service, the tasklet will re-enable ints.
5138          * If we *don't* have something, we'll re-enable before leaving here. */
5139         inta_mask = iwl_read32(priv, CSR_INT_MASK);  /* just for debug */
5140         iwl_write32(priv, CSR_INT_MASK, 0x00000000);
5141
5142         /* Discover which interrupts are active/pending */
5143         inta = iwl_read32(priv, CSR_INT);
5144         inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5145
5146         /* Ignore interrupt if there's nothing in NIC to service.
5147          * This may be due to IRQ shared with another device,
5148          * or due to sporadic interrupts thrown from our NIC. */
5149         if (!inta && !inta_fh) {
5150                 IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
5151                 goto none;
5152         }
5153
5154         if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
5155                 /* Hardware disappeared */
5156                 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
5157                 goto none;
5158         }
5159
5160         IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5161                       inta, inta_mask, inta_fh);
5162
5163         /* iwl_irq_tasklet() will service interrupts and re-enable them */
5164         tasklet_schedule(&priv->irq_tasklet);
5165         spin_unlock(&priv->lock);
5166
5167         return IRQ_HANDLED;
5168
5169  none:
5170         /* re-enable interrupts here since we don't have anything to service. */
5171         iwl_enable_interrupts(priv);
5172         spin_unlock(&priv->lock);
5173         return IRQ_NONE;
5174 }
5175
5176 /************************** EEPROM BANDS ****************************
5177  *
5178  * The iwl_eeprom_band definitions below provide the mapping from the
5179  * EEPROM contents to the specific channel number supported for each
5180  * band.
5181  *
5182  * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
5183  * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5184  * The specific geography and calibration information for that channel
5185  * is contained in the eeprom map itself.
5186  *
5187  * During init, we copy the eeprom information and channel map
5188  * information into priv->channel_info_24/52 and priv->channel_map_24/52
5189  *
5190  * channel_map_24/52 provides the index in the channel_info array for a
5191  * given channel.  We have to have two separate maps as there is channel
5192  * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5193  * band_2
5194  *
5195  * A value of 0xff stored in the channel_map indicates that the channel
5196  * is not supported by the hardware at all.
5197  *
5198  * A value of 0xfe in the channel_map indicates that the channel is not
5199  * valid for Tx with the current hardware.  This means that
5200  * while the system can tune and receive on a given channel, it may not
5201  * be able to associate or transmit any frames on that
5202  * channel.  There is no corresponding channel information for that
5203  * entry.
5204  *
5205  *********************************************************************/
5206
5207 /* 2.4 GHz */
5208 static const u8 iwl_eeprom_band_1[14] = {
5209         1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5210 };
5211
5212 /* 5.2 GHz bands */
5213 static const u8 iwl_eeprom_band_2[] = {
5214         183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5215 };
5216
5217 static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */
5218         34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5219 };
5220
5221 static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
5222         100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5223 };
5224
5225 static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
5226         145, 149, 153, 157, 161, 165
5227 };
5228
5229 static u8 iwl_eeprom_band_6[] = {       /* 2.4 FAT channel */
5230         1, 2, 3, 4, 5, 6, 7
5231 };
5232
5233 static u8 iwl_eeprom_band_7[] = {       /* 5.2 FAT channel */
5234         36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5235 };
5236
5237 static void iwl_init_band_reference(const struct iwl_priv *priv, int band,
5238                                     int *eeprom_ch_count,
5239                                     const struct iwl_eeprom_channel
5240                                     **eeprom_ch_info,
5241                                     const u8 **eeprom_ch_index)
5242 {
5243         switch (band) {
5244         case 1:         /* 2.4GHz band */
5245                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
5246                 *eeprom_ch_info = priv->eeprom.band_1_channels;
5247                 *eeprom_ch_index = iwl_eeprom_band_1;
5248                 break;
5249         case 2:         /* 5.2GHz band */
5250                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
5251                 *eeprom_ch_info = priv->eeprom.band_2_channels;
5252                 *eeprom_ch_index = iwl_eeprom_band_2;
5253                 break;
5254         case 3:         /* 5.2GHz band */
5255                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
5256                 *eeprom_ch_info = priv->eeprom.band_3_channels;
5257                 *eeprom_ch_index = iwl_eeprom_band_3;
5258                 break;
5259         case 4:         /* 5.2GHz band */
5260                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
5261                 *eeprom_ch_info = priv->eeprom.band_4_channels;
5262                 *eeprom_ch_index = iwl_eeprom_band_4;
5263                 break;
5264         case 5:         /* 5.2GHz band */
5265                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
5266                 *eeprom_ch_info = priv->eeprom.band_5_channels;
5267                 *eeprom_ch_index = iwl_eeprom_band_5;
5268                 break;
5269         case 6:
5270                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
5271                 *eeprom_ch_info = priv->eeprom.band_24_channels;
5272                 *eeprom_ch_index = iwl_eeprom_band_6;
5273                 break;
5274         case 7:
5275                 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
5276                 *eeprom_ch_info = priv->eeprom.band_52_channels;
5277                 *eeprom_ch_index = iwl_eeprom_band_7;
5278                 break;
5279         default:
5280                 BUG();
5281                 return;
5282         }
5283 }
5284
5285 const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
5286                                                     int phymode, u16 channel)
5287 {
5288         int i;
5289
5290         switch (phymode) {
5291         case MODE_IEEE80211A:
5292                 for (i = 14; i < priv->channel_count; i++) {
5293                         if (priv->channel_info[i].channel == channel)
5294                                 return &priv->channel_info[i];
5295                 }
5296                 break;
5297
5298         case MODE_IEEE80211B:
5299         case MODE_IEEE80211G:
5300                 if (channel >= 1 && channel <= 14)
5301                         return &priv->channel_info[channel - 1];
5302                 break;
5303
5304         }
5305
5306         return NULL;
5307 }
5308
5309 #define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5310                             ? # x " " : "")
5311
5312 static int iwl_init_channel_map(struct iwl_priv *priv)
5313 {
5314         int eeprom_ch_count = 0;
5315         const u8 *eeprom_ch_index = NULL;
5316         const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
5317         int band, ch;
5318         struct iwl_channel_info *ch_info;
5319
5320         if (priv->channel_count) {
5321                 IWL_DEBUG_INFO("Channel map already initialized.\n");
5322                 return 0;
5323         }
5324
5325         if (priv->eeprom.version < 0x2f) {
5326                 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5327                             priv->eeprom.version);
5328                 return -EINVAL;
5329         }
5330
5331         IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5332
5333         priv->channel_count =
5334             ARRAY_SIZE(iwl_eeprom_band_1) +
5335             ARRAY_SIZE(iwl_eeprom_band_2) +
5336             ARRAY_SIZE(iwl_eeprom_band_3) +
5337             ARRAY_SIZE(iwl_eeprom_band_4) +
5338             ARRAY_SIZE(iwl_eeprom_band_5);
5339
5340         IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5341
5342         priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
5343                                      priv->channel_count, GFP_KERNEL);
5344         if (!priv->channel_info) {
5345                 IWL_ERROR("Could not allocate channel_info\n");
5346                 priv->channel_count = 0;
5347                 return -ENOMEM;
5348         }
5349
5350         ch_info = priv->channel_info;
5351
5352         /* Loop through the 5 EEPROM bands adding them in order to the
5353          * channel map we maintain (that contains additional information than
5354          * what just in the EEPROM) */
5355         for (band = 1; band <= 5; band++) {
5356
5357                 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5358                                         &eeprom_ch_info, &eeprom_ch_index);
5359
5360                 /* Loop through each band adding each of the channels */
5361                 for (ch = 0; ch < eeprom_ch_count; ch++) {
5362                         ch_info->channel = eeprom_ch_index[ch];
5363                         ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5364                             MODE_IEEE80211A;
5365
5366                         /* permanently store EEPROM's channel regulatory flags
5367                          *   and max power in channel info database. */
5368                         ch_info->eeprom = eeprom_ch_info[ch];
5369
5370                         /* Copy the run-time flags so they are there even on
5371                          * invalid channels */
5372                         ch_info->flags = eeprom_ch_info[ch].flags;
5373
5374                         if (!(is_channel_valid(ch_info))) {
5375                                 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5376                                                "No traffic\n",
5377                                                ch_info->channel,
5378                                                ch_info->flags,
5379                                                is_channel_a_band(ch_info) ?
5380                                                "5.2" : "2.4");
5381                                 ch_info++;
5382                                 continue;
5383                         }
5384
5385                         /* Initialize regulatory-based run-time data */
5386                         ch_info->max_power_avg = ch_info->curr_txpow =
5387                             eeprom_ch_info[ch].max_power_avg;
5388                         ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5389                         ch_info->min_power = 0;
5390
5391                         IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5392                                        " %ddBm): Ad-Hoc %ssupported\n",
5393                                        ch_info->channel,
5394                                        is_channel_a_band(ch_info) ?
5395                                        "5.2" : "2.4",
5396                                        CHECK_AND_PRINT(IBSS),
5397                                        CHECK_AND_PRINT(ACTIVE),
5398                                        CHECK_AND_PRINT(RADAR),
5399                                        CHECK_AND_PRINT(WIDE),
5400                                        CHECK_AND_PRINT(NARROW),
5401                                        CHECK_AND_PRINT(DFS),
5402                                        eeprom_ch_info[ch].flags,
5403                                        eeprom_ch_info[ch].max_power_avg,
5404                                        ((eeprom_ch_info[ch].
5405                                          flags & EEPROM_CHANNEL_IBSS)
5406                                         && !(eeprom_ch_info[ch].
5407                                              flags & EEPROM_CHANNEL_RADAR))
5408                                        ? "" : "not ");
5409
5410                         /* Set the user_txpower_limit to the highest power
5411                          * supported by any channel */
5412                         if (eeprom_ch_info[ch].max_power_avg >
5413                             priv->user_txpower_limit)
5414                                 priv->user_txpower_limit =
5415                                     eeprom_ch_info[ch].max_power_avg;
5416
5417                         ch_info++;
5418                 }
5419         }
5420
5421         for (band = 6; band <= 7; band++) {
5422                 int phymode;
5423                 u8 fat_extension_chan;
5424
5425                 iwl_init_band_reference(priv, band, &eeprom_ch_count,
5426                                         &eeprom_ch_info, &eeprom_ch_index);
5427
5428                 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
5429                 /* Loop through each band adding each of the channels */
5430                 for (ch = 0; ch < eeprom_ch_count; ch++) {
5431
5432                         if ((band == 6) &&
5433                             ((eeprom_ch_index[ch] == 5) ||
5434                             (eeprom_ch_index[ch] == 6) ||
5435                             (eeprom_ch_index[ch] == 7)))
5436                                fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5437                         else
5438                                 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5439
5440                         iwl4965_set_fat_chan_info(priv, phymode,
5441                                                   eeprom_ch_index[ch],
5442                                                   &(eeprom_ch_info[ch]),
5443                                                   fat_extension_chan);
5444
5445                         iwl4965_set_fat_chan_info(priv, phymode,
5446                                                   (eeprom_ch_index[ch] + 4),
5447                                                   &(eeprom_ch_info[ch]),
5448                                                   HT_IE_EXT_CHANNEL_BELOW);
5449                 }
5450         }
5451
5452         return 0;
5453 }
5454
5455 /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5456  * sending probe req.  This should be set long enough to hear probe responses
5457  * from more than one AP.  */
5458 #define IWL_ACTIVE_DWELL_TIME_24    (20)        /* all times in msec */
5459 #define IWL_ACTIVE_DWELL_TIME_52    (10)
5460
5461 /* For faster active scanning, scan will move to the next channel if fewer than
5462  * PLCP_QUIET_THRESH packets are heard on this channel within
5463  * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
5464  * time if it's a quiet channel (nothing responded to our probe, and there's
5465  * no other traffic).
5466  * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
5467 #define IWL_PLCP_QUIET_THRESH       __constant_cpu_to_le16(1)   /* packets */
5468 #define IWL_ACTIVE_QUIET_TIME       __constant_cpu_to_le16(5)   /* msec */
5469
5470 /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
5471  * Must be set longer than active dwell time.
5472  * For the most reliable scan, set > AP beacon interval (typically 100msec). */
5473 #define IWL_PASSIVE_DWELL_TIME_24   (20)        /* all times in msec */
5474 #define IWL_PASSIVE_DWELL_TIME_52   (10)
5475 #define IWL_PASSIVE_DWELL_BASE      (100)
5476 #define IWL_CHANNEL_TUNE_TIME       5
5477
5478 static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode)
5479 {
5480         if (phymode == MODE_IEEE80211A)
5481                 return IWL_ACTIVE_DWELL_TIME_52;
5482         else
5483                 return IWL_ACTIVE_DWELL_TIME_24;
5484 }
5485
5486 static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode)
5487 {
5488         u16 active = iwl_get_active_dwell_time(priv, phymode);
5489         u16 passive = (phymode != MODE_IEEE80211A) ?
5490             IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5491             IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5492
5493         if (iwl_is_associated(priv)) {
5494                 /* If we're associated, we clamp the maximum passive
5495                  * dwell time to be 98% of the beacon interval (minus
5496                  * 2 * channel tune time) */
5497                 passive = priv->beacon_int;
5498                 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
5499                         passive = IWL_PASSIVE_DWELL_BASE;
5500                 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
5501         }
5502
5503         if (passive <= active)
5504                 passive = active + 1;
5505
5506         return passive;
5507 }
5508
5509 static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode,
5510                                      u8 is_active, u8 direct_mask,
5511                                      struct iwl_scan_channel *scan_ch)
5512 {
5513         const struct ieee80211_channel *channels = NULL;
5514         const struct ieee80211_hw_mode *hw_mode;
5515         const struct iwl_channel_info *ch_info;
5516         u16 passive_dwell = 0;
5517         u16 active_dwell = 0;
5518         int added, i;
5519
5520         hw_mode = iwl_get_hw_mode(priv, phymode);
5521         if (!hw_mode)
5522                 return 0;
5523
5524         channels = hw_mode->channels;
5525
5526         active_dwell = iwl_get_active_dwell_time(priv, phymode);
5527         passive_dwell = iwl_get_passive_dwell_time(priv, phymode);
5528
5529         for (i = 0, added = 0; i < hw_mode->num_channels; i++) {
5530                 if (channels[i].chan ==
5531                     le16_to_cpu(priv->active_rxon.channel)) {
5532                         if (iwl_is_associated(priv)) {
5533                                 IWL_DEBUG_SCAN
5534                                     ("Skipping current channel %d\n",
5535                                      le16_to_cpu(priv->active_rxon.channel));
5536                                 continue;
5537                         }
5538                 } else if (priv->only_active_channel)
5539                         continue;
5540
5541                 scan_ch->channel = channels[i].chan;
5542
5543                 ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel);
5544                 if (!is_channel_valid(ch_info)) {
5545                         IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5546                                        scan_ch->channel);
5547                         continue;
5548                 }
5549
5550                 if (!is_active || is_channel_passive(ch_info) ||
5551                     !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN))
5552                         scan_ch->type = 0;      /* passive */
5553                 else
5554                         scan_ch->type = 1;      /* active */
5555
5556                 if (scan_ch->type & 1)
5557                         scan_ch->type |= (direct_mask << 1);
5558
5559                 if (is_channel_narrow(ch_info))
5560                         scan_ch->type |= (1 << 7);
5561
5562                 scan_ch->active_dwell = cpu_to_le16(active_dwell);
5563                 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
5564
5565                 /* Set power levels to defaults */
5566                 scan_ch->tpc.dsp_atten = 110;
5567                 /* scan_pwr_info->tpc.dsp_atten; */
5568
5569                 /*scan_pwr_info->tpc.tx_gain; */
5570                 if (phymode == MODE_IEEE80211A)
5571                         scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5572                 else {
5573                         scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
5574                         /* NOTE: if we were doing 6Mb OFDM for scans we'd use
5575                          * power level
5576                          scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3;
5577                          */
5578                 }
5579
5580                 IWL_DEBUG_SCAN("Scanning %d [%s %d]\n",
5581                                scan_ch->channel,
5582                                (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
5583                                (scan_ch->type & 1) ?
5584                                active_dwell : passive_dwell);
5585
5586                 scan_ch++;
5587                 added++;
5588         }
5589
5590         IWL_DEBUG_SCAN("total channels to scan %d \n", added);
5591         return added;
5592 }
5593
5594 static void iwl_reset_channel_flag(struct iwl_priv *priv)
5595 {
5596         int i, j;
5597         for (i = 0; i < 3; i++) {
5598                 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5599                 for (j = 0; j < hw_mode->num_channels; j++)
5600                         hw_mode->channels[j].flag = hw_mode->channels[j].val;
5601         }
5602 }
5603
5604 static void iwl_init_hw_rates(struct iwl_priv *priv,
5605                               struct ieee80211_rate *rates)
5606 {
5607         int i;
5608
5609         for (i = 0; i < IWL_RATE_COUNT; i++) {
5610                 rates[i].rate = iwl_rates[i].ieee * 5;
5611                 rates[i].val = i; /* Rate scaling will work on indexes */
5612                 rates[i].val2 = i;
5613                 rates[i].flags = IEEE80211_RATE_SUPPORTED;
5614                 /* Only OFDM have the bits-per-symbol set */
5615                 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5616                         rates[i].flags |= IEEE80211_RATE_OFDM;
5617                 else {
5618                         /*
5619                          * If CCK 1M then set rate flag to CCK else CCK_2
5620                          * which is CCK | PREAMBLE2
5621                          */
5622                         rates[i].flags |= (iwl_rates[i].plcp == 10) ?
5623                                 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2;
5624                 }
5625
5626                 /* Set up which ones are basic rates... */
5627                 if (IWL_BASIC_RATES_MASK & (1 << i))
5628                         rates[i].flags |= IEEE80211_RATE_BASIC;
5629         }
5630
5631         iwl4965_init_hw_rates(priv, rates);
5632 }
5633
5634 /**
5635  * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
5636  */
5637 static int iwl_init_geos(struct iwl_priv *priv)
5638 {
5639         struct iwl_channel_info *ch;
5640         struct ieee80211_hw_mode *modes;
5641         struct ieee80211_channel *channels;
5642         struct ieee80211_channel *geo_ch;
5643         struct ieee80211_rate *rates;
5644         int i = 0;
5645         enum {
5646                 A = 0,
5647                 B = 1,
5648                 G = 2,
5649                 A_11N = 3,
5650                 G_11N = 4,
5651         };
5652         int mode_count = 5;
5653
5654         if (priv->modes) {
5655                 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5656                 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5657                 return 0;
5658         }
5659
5660         modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5661                         GFP_KERNEL);
5662         if (!modes)
5663                 return -ENOMEM;
5664
5665         channels = kzalloc(sizeof(struct ieee80211_channel) *
5666                            priv->channel_count, GFP_KERNEL);
5667         if (!channels) {
5668                 kfree(modes);
5669                 return -ENOMEM;
5670         }
5671
5672         rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)),
5673                         GFP_KERNEL);
5674         if (!rates) {
5675                 kfree(modes);
5676                 kfree(channels);
5677                 return -ENOMEM;
5678         }
5679
5680         /* 0 = 802.11a
5681          * 1 = 802.11b
5682          * 2 = 802.11g
5683          */
5684
5685         /* 5.2GHz channels start after the 2.4GHz channels */
5686         modes[A].mode = MODE_IEEE80211A;
5687         modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5688         modes[A].rates = rates;
5689         modes[A].num_rates = 8; /* just OFDM */
5690         modes[A].rates = &rates[4];
5691         modes[A].num_channels = 0;
5692
5693         modes[B].mode = MODE_IEEE80211B;
5694         modes[B].channels = channels;
5695         modes[B].rates = rates;
5696         modes[B].num_rates = 4; /* just CCK */
5697         modes[B].num_channels = 0;
5698
5699         modes[G].mode = MODE_IEEE80211G;
5700         modes[G].channels = channels;
5701         modes[G].rates = rates;
5702         modes[G].num_rates = 12;        /* OFDM & CCK */
5703         modes[G].num_channels = 0;
5704
5705         modes[G_11N].mode = MODE_IEEE80211G;
5706         modes[G_11N].channels = channels;
5707         modes[G_11N].num_rates = 13;        /* OFDM & CCK */
5708         modes[G_11N].rates = rates;
5709         modes[G_11N].num_channels = 0;
5710
5711         modes[A_11N].mode = MODE_IEEE80211A;
5712         modes[A_11N].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5713         modes[A_11N].rates = &rates[4];
5714         modes[A_11N].num_rates = 9; /* just OFDM */
5715         modes[A_11N].num_channels = 0;
5716
5717         priv->ieee_channels = channels;
5718         priv->ieee_rates = rates;
5719
5720         iwl_init_hw_rates(priv, rates);
5721
5722         for (i = 0, geo_ch = channels; i < priv->channel_count; i++) {
5723                 ch = &priv->channel_info[i];
5724
5725                 if (!is_channel_valid(ch)) {
5726                         IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- "
5727                                     "skipping.\n",
5728                                     ch->channel, is_channel_a_band(ch) ?
5729                                     "5.2" : "2.4");
5730                         continue;
5731                 }
5732
5733                 if (is_channel_a_band(ch)) {
5734                         geo_ch = &modes[A].channels[modes[A].num_channels++];
5735                         modes[A_11N].num_channels++;
5736                 } else {
5737                         geo_ch = &modes[B].channels[modes[B].num_channels++];
5738                         modes[G].num_channels++;
5739                         modes[G_11N].num_channels++;
5740                 }
5741
5742                 geo_ch->freq = ieee80211chan2mhz(ch->channel);
5743                 geo_ch->chan = ch->channel;
5744                 geo_ch->power_level = ch->max_power_avg;
5745                 geo_ch->antenna_max = 0xff;
5746
5747                 if (is_channel_valid(ch)) {
5748                         geo_ch->flag = IEEE80211_CHAN_W_SCAN;
5749                         if (ch->flags & EEPROM_CHANNEL_IBSS)
5750                                 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5751
5752                         if (ch->flags & EEPROM_CHANNEL_ACTIVE)
5753                                 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN;
5754
5755                         if (ch->flags & EEPROM_CHANNEL_RADAR)
5756                                 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT;
5757
5758                         if (ch->max_power_avg > priv->max_channel_txpower_limit)
5759                                 priv->max_channel_txpower_limit =
5760                                     ch->max_power_avg;
5761                 }
5762
5763                 geo_ch->val = geo_ch->flag;
5764         }
5765
5766         if ((modes[A].num_channels == 0) && priv->is_abg) {
5767                 printk(KERN_INFO DRV_NAME
5768                        ": Incorrectly detected BG card as ABG.  Please send "
5769                        "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5770                        priv->pci_dev->device, priv->pci_dev->subsystem_device);
5771                 priv->is_abg = 0;
5772         }
5773
5774         printk(KERN_INFO DRV_NAME
5775                ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5776                modes[G].num_channels, modes[A].num_channels);
5777
5778         /*
5779          * NOTE:  We register these in preference of order -- the
5780          * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5781          * a phymode based on rates or AP capabilities but seems to
5782          * configure it purely on if the channel being configured
5783          * is supported by a mode -- and the first match is taken
5784          */
5785
5786         if (modes[G].num_channels)
5787                 ieee80211_register_hwmode(priv->hw, &modes[G]);
5788         if (modes[B].num_channels)
5789                 ieee80211_register_hwmode(priv->hw, &modes[B]);
5790         if (modes[A].num_channels)
5791                 ieee80211_register_hwmode(priv->hw, &modes[A]);
5792
5793         priv->modes = modes;
5794         set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5795
5796         return 0;
5797 }
5798
5799 /******************************************************************************
5800  *
5801  * uCode download functions
5802  *
5803  ******************************************************************************/
5804
5805 static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
5806 {
5807         if (priv->ucode_code.v_addr != NULL) {
5808                 pci_free_consistent(priv->pci_dev,
5809                                     priv->ucode_code.len,
5810                                     priv->ucode_code.v_addr,
5811                                     priv->ucode_code.p_addr);
5812                 priv->ucode_code.v_addr = NULL;
5813         }
5814         if (priv->ucode_data.v_addr != NULL) {
5815                 pci_free_consistent(priv->pci_dev,
5816                                     priv->ucode_data.len,
5817                                     priv->ucode_data.v_addr,
5818                                     priv->ucode_data.p_addr);
5819                 priv->ucode_data.v_addr = NULL;
5820         }
5821         if (priv->ucode_data_backup.v_addr != NULL) {
5822                 pci_free_consistent(priv->pci_dev,
5823                                     priv->ucode_data_backup.len,
5824                                     priv->ucode_data_backup.v_addr,
5825                                     priv->ucode_data_backup.p_addr);
5826                 priv->ucode_data_backup.v_addr = NULL;
5827         }
5828         if (priv->ucode_init.v_addr != NULL) {
5829                 pci_free_consistent(priv->pci_dev,
5830                                     priv->ucode_init.len,
5831                                     priv->ucode_init.v_addr,
5832                                     priv->ucode_init.p_addr);
5833                 priv->ucode_init.v_addr = NULL;
5834         }
5835         if (priv->ucode_init_data.v_addr != NULL) {
5836                 pci_free_consistent(priv->pci_dev,
5837                                     priv->ucode_init_data.len,
5838                                     priv->ucode_init_data.v_addr,
5839                                     priv->ucode_init_data.p_addr);
5840                 priv->ucode_init_data.v_addr = NULL;
5841         }
5842         if (priv->ucode_boot.v_addr != NULL) {
5843                 pci_free_consistent(priv->pci_dev,
5844                                     priv->ucode_boot.len,
5845                                     priv->ucode_boot.v_addr,
5846                                     priv->ucode_boot.p_addr);
5847                 priv->ucode_boot.v_addr = NULL;
5848         }
5849 }
5850
5851 /**
5852  * iwl_verify_inst_full - verify runtime uCode image in card vs. host,
5853  *     looking at all data.
5854  */
5855 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len)
5856 {
5857         u32 val;
5858         u32 save_len = len;
5859         int rc = 0;
5860         u32 errcnt;
5861
5862         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5863
5864         rc = iwl_grab_restricted_access(priv);
5865         if (rc)
5866                 return rc;
5867
5868         iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
5869
5870         errcnt = 0;
5871         for (; len > 0; len -= sizeof(u32), image++) {
5872                 /* read data comes through single port, auto-incr addr */
5873                 /* NOTE: Use the debugless read so we don't flood kernel log
5874                  * if IWL_DL_IO is set */
5875                 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5876                 if (val != le32_to_cpu(*image)) {
5877                         IWL_ERROR("uCode INST section is invalid at "
5878                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
5879                                   save_len - len, val, le32_to_cpu(*image));
5880                         rc = -EIO;
5881                         errcnt++;
5882                         if (errcnt >= 20)
5883                                 break;
5884                 }
5885         }
5886
5887         iwl_release_restricted_access(priv);
5888
5889         if (!errcnt)
5890                 IWL_DEBUG_INFO
5891                     ("ucode image in INSTRUCTION memory is good\n");
5892
5893         return rc;
5894 }
5895
5896
5897 /**
5898  * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
5899  *   using sample data 100 bytes apart.  If these sample points are good,
5900  *   it's a pretty good bet that everything between them is good, too.
5901  */
5902 static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
5903 {
5904         u32 val;
5905         int rc = 0;
5906         u32 errcnt = 0;
5907         u32 i;
5908
5909         IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5910
5911         rc = iwl_grab_restricted_access(priv);
5912         if (rc)
5913                 return rc;
5914
5915         for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
5916                 /* read data comes through single port, auto-incr addr */
5917                 /* NOTE: Use the debugless read so we don't flood kernel log
5918                  * if IWL_DL_IO is set */
5919                 iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR,
5920                         i + RTC_INST_LOWER_BOUND);
5921                 val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT);
5922                 if (val != le32_to_cpu(*image)) {
5923 #if 0 /* Enable this if you want to see details */
5924                         IWL_ERROR("uCode INST section is invalid at "
5925                                   "offset 0x%x, is 0x%x, s/b 0x%x\n",
5926                                   i, val, *image);
5927 #endif
5928                         rc = -EIO;
5929                         errcnt++;
5930                         if (errcnt >= 3)
5931                                 break;
5932                 }
5933         }
5934
5935         iwl_release_restricted_access(priv);
5936
5937         return rc;
5938 }
5939
5940
5941 /**
5942  * iwl_verify_ucode - determine which instruction image is in SRAM,
5943  *    and verify its contents
5944  */
5945 static int iwl_verify_ucode(struct iwl_priv *priv)
5946 {
5947         __le32 *image;
5948         u32 len;
5949         int rc = 0;
5950
5951         /* Try bootstrap */
5952         image = (__le32 *)priv->ucode_boot.v_addr;
5953         len = priv->ucode_boot.len;
5954         rc = iwl_verify_inst_sparse(priv, image, len);
5955         if (rc == 0) {
5956                 IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n");
5957                 return 0;
5958         }
5959
5960         /* Try initialize */
5961         image = (__le32 *)priv->ucode_init.v_addr;
5962         len = priv->ucode_init.len;
5963         rc = iwl_verify_inst_sparse(priv, image, len);
5964         if (rc == 0) {
5965                 IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n");
5966                 return 0;
5967         }
5968
5969         /* Try runtime/protocol */
5970         image = (__le32 *)priv->ucode_code.v_addr;
5971         len = priv->ucode_code.len;
5972         rc = iwl_verify_inst_sparse(priv, image, len);
5973         if (rc == 0) {
5974                 IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n");
5975                 return 0;
5976         }
5977
5978         IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
5979
5980         /* Show first several data entries in instruction SRAM.
5981          * Selection of bootstrap image is arbitrary. */