d5eeb3e19b43ff9f401330663273f998c4a2eb79
[linux-2.6.git] / drivers / net / wireless / iwlwifi / iwl-agn.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 /* TODO: remove include to PCI*.h when no PCI will be needed here */
36 #include <linux/pci.h>
37 #include <linux/pci-aspm.h>
38 #include <linux/slab.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/delay.h>
41 #include <linux/sched.h>
42 #include <linux/skbuff.h>
43 #include <linux/netdevice.h>
44 #include <linux/wireless.h>
45 #include <linux/firmware.h>
46 #include <linux/etherdevice.h>
47 #include <linux/if_arp.h>
48
49 #include <net/mac80211.h>
50
51 #include <asm/div64.h>
52
53 #include "iwl-eeprom.h"
54 #include "iwl-dev.h"
55 #include "iwl-core.h"
56 #include "iwl-io.h"
57 #include "iwl-helpers.h"
58 #include "iwl-sta.h"
59 #include "iwl-agn-calib.h"
60 #include "iwl-agn.h"
61 #include "iwl-pci.h"
62
63
64 /******************************************************************************
65  *
66  * module boiler plate
67  *
68  ******************************************************************************/
69
70 /*
71  * module name, copyright, version, etc.
72  */
73 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
74
75 #ifdef CONFIG_IWLWIFI_DEBUG
76 #define VD "d"
77 #else
78 #define VD
79 #endif
80
81 #define DRV_VERSION     IWLWIFI_VERSION VD
82
83
84 MODULE_DESCRIPTION(DRV_DESCRIPTION);
85 MODULE_VERSION(DRV_VERSION);
86 MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87 MODULE_LICENSE("GPL");
88
89 static int iwlagn_ant_coupling;
90 static bool iwlagn_bt_ch_announce = 1;
91
92 void iwl_update_chain_flags(struct iwl_priv *priv)
93 {
94         struct iwl_rxon_context *ctx;
95
96         if (priv->cfg->ops->hcmd->set_rxon_chain) {
97                 for_each_context(priv, ctx) {
98                         priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
99                         if (ctx->active.rx_chain != ctx->staging.rx_chain)
100                                 iwlagn_commit_rxon(priv, ctx);
101                 }
102         }
103 }
104
105 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
106 static void iwl_set_beacon_tim(struct iwl_priv *priv,
107                                struct iwl_tx_beacon_cmd *tx_beacon_cmd,
108                                u8 *beacon, u32 frame_size)
109 {
110         u16 tim_idx;
111         struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
112
113         /*
114          * The index is relative to frame start but we start looking at the
115          * variable-length part of the beacon.
116          */
117         tim_idx = mgmt->u.beacon.variable - beacon;
118
119         /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
120         while ((tim_idx < (frame_size - 2)) &&
121                         (beacon[tim_idx] != WLAN_EID_TIM))
122                 tim_idx += beacon[tim_idx+1] + 2;
123
124         /* If TIM field was found, set variables */
125         if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
126                 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
127                 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
128         } else
129                 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
130 }
131
132 int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
133 {
134         struct iwl_tx_beacon_cmd *tx_beacon_cmd;
135         struct iwl_host_cmd cmd = {
136                 .id = REPLY_TX_BEACON,
137         };
138         u32 frame_size;
139         u32 rate_flags;
140         u32 rate;
141
142         /*
143          * We have to set up the TX command, the TX Beacon command, and the
144          * beacon contents.
145          */
146
147         lockdep_assert_held(&priv->mutex);
148
149         if (!priv->beacon_ctx) {
150                 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
151                 return 0;
152         }
153
154         if (WARN_ON(!priv->beacon_skb))
155                 return -EINVAL;
156
157         /* Allocate beacon command */
158         if (!priv->beacon_cmd)
159                 priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
160         tx_beacon_cmd = priv->beacon_cmd;
161         if (!tx_beacon_cmd)
162                 return -ENOMEM;
163
164         frame_size = priv->beacon_skb->len;
165
166         /* Set up TX command fields */
167         tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
168         tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
169         tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
170         tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
171                 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
172
173         /* Set up TX beacon command fields */
174         iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
175                            frame_size);
176
177         /* Set up packet rate and flags */
178         rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
179         priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
180                                               priv->hw_params.valid_tx_ant);
181         rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
182         if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
183                 rate_flags |= RATE_MCS_CCK_MSK;
184         tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
185                         rate_flags);
186
187         /* Submit command */
188         cmd.len[0] = sizeof(*tx_beacon_cmd);
189         cmd.data[0] = tx_beacon_cmd;
190         cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
191         cmd.len[1] = frame_size;
192         cmd.data[1] = priv->beacon_skb->data;
193         cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
194
195         return iwl_send_cmd_sync(priv, &cmd);
196 }
197
198 static void iwl_bg_beacon_update(struct work_struct *work)
199 {
200         struct iwl_priv *priv =
201                 container_of(work, struct iwl_priv, beacon_update);
202         struct sk_buff *beacon;
203
204         mutex_lock(&priv->mutex);
205         if (!priv->beacon_ctx) {
206                 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
207                 goto out;
208         }
209
210         if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
211                 /*
212                  * The ucode will send beacon notifications even in
213                  * IBSS mode, but we don't want to process them. But
214                  * we need to defer the type check to here due to
215                  * requiring locking around the beacon_ctx access.
216                  */
217                 goto out;
218         }
219
220         /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
221         beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
222         if (!beacon) {
223                 IWL_ERR(priv, "update beacon failed -- keeping old\n");
224                 goto out;
225         }
226
227         /* new beacon skb is allocated every time; dispose previous.*/
228         dev_kfree_skb(priv->beacon_skb);
229
230         priv->beacon_skb = beacon;
231
232         iwlagn_send_beacon_cmd(priv);
233  out:
234         mutex_unlock(&priv->mutex);
235 }
236
237 static void iwl_bg_bt_runtime_config(struct work_struct *work)
238 {
239         struct iwl_priv *priv =
240                 container_of(work, struct iwl_priv, bt_runtime_config);
241
242         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
243                 return;
244
245         /* dont send host command if rf-kill is on */
246         if (!iwl_is_ready_rf(priv))
247                 return;
248         priv->cfg->ops->hcmd->send_bt_config(priv);
249 }
250
251 static void iwl_bg_bt_full_concurrency(struct work_struct *work)
252 {
253         struct iwl_priv *priv =
254                 container_of(work, struct iwl_priv, bt_full_concurrency);
255         struct iwl_rxon_context *ctx;
256
257         mutex_lock(&priv->mutex);
258
259         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
260                 goto out;
261
262         /* dont send host command if rf-kill is on */
263         if (!iwl_is_ready_rf(priv))
264                 goto out;
265
266         IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
267                        priv->bt_full_concurrent ?
268                        "full concurrency" : "3-wire");
269
270         /*
271          * LQ & RXON updated cmds must be sent before BT Config cmd
272          * to avoid 3-wire collisions
273          */
274         for_each_context(priv, ctx) {
275                 if (priv->cfg->ops->hcmd->set_rxon_chain)
276                         priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
277                 iwlagn_commit_rxon(priv, ctx);
278         }
279
280         priv->cfg->ops->hcmd->send_bt_config(priv);
281 out:
282         mutex_unlock(&priv->mutex);
283 }
284
285 /**
286  * iwl_bg_statistics_periodic - Timer callback to queue statistics
287  *
288  * This callback is provided in order to send a statistics request.
289  *
290  * This timer function is continually reset to execute within
291  * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
292  * was received.  We need to ensure we receive the statistics in order
293  * to update the temperature used for calibrating the TXPOWER.
294  */
295 static void iwl_bg_statistics_periodic(unsigned long data)
296 {
297         struct iwl_priv *priv = (struct iwl_priv *)data;
298
299         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
300                 return;
301
302         /* dont send host command if rf-kill is on */
303         if (!iwl_is_ready_rf(priv))
304                 return;
305
306         iwl_send_statistics_request(priv, CMD_ASYNC, false);
307 }
308
309
310 static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
311                                         u32 start_idx, u32 num_events,
312                                         u32 mode)
313 {
314         u32 i;
315         u32 ptr;        /* SRAM byte address of log data */
316         u32 ev, time, data; /* event log data */
317         unsigned long reg_flags;
318
319         if (mode == 0)
320                 ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
321         else
322                 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
323
324         /* Make sure device is powered up for SRAM reads */
325         spin_lock_irqsave(&priv->reg_lock, reg_flags);
326         if (iwl_grab_nic_access(priv)) {
327                 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
328                 return;
329         }
330
331         /* Set starting address; reads will auto-increment */
332         iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
333         rmb();
334
335         /*
336          * "time" is actually "data" for mode 0 (no timestamp).
337          * place event id # at far right for easier visual parsing.
338          */
339         for (i = 0; i < num_events; i++) {
340                 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
341                 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
342                 if (mode == 0) {
343                         trace_iwlwifi_dev_ucode_cont_event(priv,
344                                                         0, time, ev);
345                 } else {
346                         data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
347                         trace_iwlwifi_dev_ucode_cont_event(priv,
348                                                 time, data, ev);
349                 }
350         }
351         /* Allow device to power down */
352         iwl_release_nic_access(priv);
353         spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
354 }
355
356 static void iwl_continuous_event_trace(struct iwl_priv *priv)
357 {
358         u32 capacity;   /* event log capacity in # entries */
359         u32 base;       /* SRAM byte address of event log header */
360         u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
361         u32 num_wraps;  /* # times uCode wrapped to top of log */
362         u32 next_entry; /* index of next entry to be written by uCode */
363
364         base = priv->device_pointers.error_event_table;
365         if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
366                 capacity = iwl_read_targ_mem(priv, base);
367                 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
368                 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
369                 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
370         } else
371                 return;
372
373         if (num_wraps == priv->event_log.num_wraps) {
374                 iwl_print_cont_event_trace(priv,
375                                        base, priv->event_log.next_entry,
376                                        next_entry - priv->event_log.next_entry,
377                                        mode);
378                 priv->event_log.non_wraps_count++;
379         } else {
380                 if ((num_wraps - priv->event_log.num_wraps) > 1)
381                         priv->event_log.wraps_more_count++;
382                 else
383                         priv->event_log.wraps_once_count++;
384                 trace_iwlwifi_dev_ucode_wrap_event(priv,
385                                 num_wraps - priv->event_log.num_wraps,
386                                 next_entry, priv->event_log.next_entry);
387                 if (next_entry < priv->event_log.next_entry) {
388                         iwl_print_cont_event_trace(priv, base,
389                                priv->event_log.next_entry,
390                                capacity - priv->event_log.next_entry,
391                                mode);
392
393                         iwl_print_cont_event_trace(priv, base, 0,
394                                 next_entry, mode);
395                 } else {
396                         iwl_print_cont_event_trace(priv, base,
397                                next_entry, capacity - next_entry,
398                                mode);
399
400                         iwl_print_cont_event_trace(priv, base, 0,
401                                 next_entry, mode);
402                 }
403         }
404         priv->event_log.num_wraps = num_wraps;
405         priv->event_log.next_entry = next_entry;
406 }
407
408 /**
409  * iwl_bg_ucode_trace - Timer callback to log ucode event
410  *
411  * The timer is continually set to execute every
412  * UCODE_TRACE_PERIOD milliseconds after the last timer expired
413  * this function is to perform continuous uCode event logging operation
414  * if enabled
415  */
416 static void iwl_bg_ucode_trace(unsigned long data)
417 {
418         struct iwl_priv *priv = (struct iwl_priv *)data;
419
420         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
421                 return;
422
423         if (priv->event_log.ucode_trace) {
424                 iwl_continuous_event_trace(priv);
425                 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
426                 mod_timer(&priv->ucode_trace,
427                          jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
428         }
429 }
430
431 static void iwl_bg_tx_flush(struct work_struct *work)
432 {
433         struct iwl_priv *priv =
434                 container_of(work, struct iwl_priv, tx_flush);
435
436         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
437                 return;
438
439         /* do nothing if rf-kill is on */
440         if (!iwl_is_ready_rf(priv))
441                 return;
442
443         IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
444         iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
445 }
446
447 /**
448  * iwl_rx_handle - Main entry function for receiving responses from uCode
449  *
450  * Uses the priv->rx_handlers callback function array to invoke
451  * the appropriate handlers, including command responses,
452  * frame-received notifications, and other notifications.
453  */
454 static void iwl_rx_handle(struct iwl_priv *priv)
455 {
456         struct iwl_rx_mem_buffer *rxb;
457         struct iwl_rx_packet *pkt;
458         struct iwl_rx_queue *rxq = &priv->rxq;
459         u32 r, i;
460         int reclaim;
461         unsigned long flags;
462         u8 fill_rx = 0;
463         u32 count = 8;
464         int total_empty;
465
466         /* uCode's read index (stored in shared DRAM) indicates the last Rx
467          * buffer that the driver may process (last buffer filled by ucode). */
468         r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
469         i = rxq->read;
470
471         /* Rx interrupt, but nothing sent from uCode */
472         if (i == r)
473                 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
474
475         /* calculate total frames need to be restock after handling RX */
476         total_empty = r - rxq->write_actual;
477         if (total_empty < 0)
478                 total_empty += RX_QUEUE_SIZE;
479
480         if (total_empty > (RX_QUEUE_SIZE / 2))
481                 fill_rx = 1;
482
483         while (i != r) {
484                 int len;
485
486                 rxb = rxq->queue[i];
487
488                 /* If an RXB doesn't have a Rx queue slot associated with it,
489                  * then a bug has been introduced in the queue refilling
490                  * routines -- catch it here */
491                 if (WARN_ON(rxb == NULL)) {
492                         i = (i + 1) & RX_QUEUE_MASK;
493                         continue;
494                 }
495
496                 rxq->queue[i] = NULL;
497
498                 pci_unmap_page(priv->pci_dev, rxb->page_dma,
499                                PAGE_SIZE << priv->hw_params.rx_page_order,
500                                PCI_DMA_FROMDEVICE);
501                 pkt = rxb_addr(rxb);
502
503                 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
504                 len += sizeof(u32); /* account for status word */
505                 trace_iwlwifi_dev_rx(priv, pkt, len);
506
507                 /* Reclaim a command buffer only if this packet is a response
508                  *   to a (driver-originated) command.
509                  * If the packet (e.g. Rx frame) originated from uCode,
510                  *   there is no command buffer to reclaim.
511                  * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
512                  *   but apparently a few don't get set; catch them here. */
513                 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
514                         (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
515                         (pkt->hdr.cmd != REPLY_RX) &&
516                         (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
517                         (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
518                         (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
519                         (pkt->hdr.cmd != REPLY_TX);
520
521                 /*
522                  * Do the notification wait before RX handlers so
523                  * even if the RX handler consumes the RXB we have
524                  * access to it in the notification wait entry.
525                  */
526                 if (!list_empty(&priv->_agn.notif_waits)) {
527                         struct iwl_notification_wait *w;
528
529                         spin_lock(&priv->_agn.notif_wait_lock);
530                         list_for_each_entry(w, &priv->_agn.notif_waits, list) {
531                                 if (w->cmd == pkt->hdr.cmd) {
532                                         w->triggered = true;
533                                         if (w->fn)
534                                                 w->fn(priv, pkt, w->fn_data);
535                                 }
536                         }
537                         spin_unlock(&priv->_agn.notif_wait_lock);
538
539                         wake_up_all(&priv->_agn.notif_waitq);
540                 }
541                 if (priv->pre_rx_handler)
542                         priv->pre_rx_handler(priv, rxb);
543
544                 /* Based on type of command response or notification,
545                  *   handle those that need handling via function in
546                  *   rx_handlers table.  See iwl_setup_rx_handlers() */
547                 if (priv->rx_handlers[pkt->hdr.cmd]) {
548                         IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
549                                 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
550                         priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
551                         priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
552                 } else {
553                         /* No handling needed */
554                         IWL_DEBUG_RX(priv,
555                                 "r %d i %d No handler needed for %s, 0x%02x\n",
556                                 r, i, get_cmd_string(pkt->hdr.cmd),
557                                 pkt->hdr.cmd);
558                 }
559
560                 /*
561                  * XXX: After here, we should always check rxb->page
562                  * against NULL before touching it or its virtual
563                  * memory (pkt). Because some rx_handler might have
564                  * already taken or freed the pages.
565                  */
566
567                 if (reclaim) {
568                         /* Invoke any callbacks, transfer the buffer to caller,
569                          * and fire off the (possibly) blocking iwl_send_cmd()
570                          * as we reclaim the driver command queue */
571                         if (rxb->page)
572                                 iwl_tx_cmd_complete(priv, rxb);
573                         else
574                                 IWL_WARN(priv, "Claim null rxb?\n");
575                 }
576
577                 /* Reuse the page if possible. For notification packets and
578                  * SKBs that fail to Rx correctly, add them back into the
579                  * rx_free list for reuse later. */
580                 spin_lock_irqsave(&rxq->lock, flags);
581                 if (rxb->page != NULL) {
582                         rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
583                                 0, PAGE_SIZE << priv->hw_params.rx_page_order,
584                                 PCI_DMA_FROMDEVICE);
585                         list_add_tail(&rxb->list, &rxq->rx_free);
586                         rxq->free_count++;
587                 } else
588                         list_add_tail(&rxb->list, &rxq->rx_used);
589
590                 spin_unlock_irqrestore(&rxq->lock, flags);
591
592                 i = (i + 1) & RX_QUEUE_MASK;
593                 /* If there are a lot of unused frames,
594                  * restock the Rx queue so ucode wont assert. */
595                 if (fill_rx) {
596                         count++;
597                         if (count >= 8) {
598                                 rxq->read = i;
599                                 iwlagn_rx_replenish_now(priv);
600                                 count = 0;
601                         }
602                 }
603         }
604
605         /* Backtrack one entry */
606         rxq->read = i;
607         if (fill_rx)
608                 iwlagn_rx_replenish_now(priv);
609         else
610                 iwlagn_rx_queue_restock(priv);
611 }
612
613 /* tasklet for iwlagn interrupt */
614 static void iwl_irq_tasklet(struct iwl_priv *priv)
615 {
616         u32 inta = 0;
617         u32 handled = 0;
618         unsigned long flags;
619         u32 i;
620 #ifdef CONFIG_IWLWIFI_DEBUG
621         u32 inta_mask;
622 #endif
623
624         spin_lock_irqsave(&priv->lock, flags);
625
626         /* Ack/clear/reset pending uCode interrupts.
627          * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
628          */
629         /* There is a hardware bug in the interrupt mask function that some
630          * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
631          * they are disabled in the CSR_INT_MASK register. Furthermore the
632          * ICT interrupt handling mechanism has another bug that might cause
633          * these unmasked interrupts fail to be detected. We workaround the
634          * hardware bugs here by ACKing all the possible interrupts so that
635          * interrupt coalescing can still be achieved.
636          */
637         iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
638
639         inta = priv->_agn.inta;
640
641 #ifdef CONFIG_IWLWIFI_DEBUG
642         if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
643                 /* just for debug */
644                 inta_mask = iwl_read32(priv, CSR_INT_MASK);
645                 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
646                                 inta, inta_mask);
647         }
648 #endif
649
650         spin_unlock_irqrestore(&priv->lock, flags);
651
652         /* saved interrupt in inta variable now we can reset priv->_agn.inta */
653         priv->_agn.inta = 0;
654
655         /* Now service all interrupt bits discovered above. */
656         if (inta & CSR_INT_BIT_HW_ERR) {
657                 IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
658
659                 /* Tell the device to stop sending interrupts */
660                 iwl_disable_interrupts(priv);
661
662                 priv->isr_stats.hw++;
663                 iwl_irq_handle_error(priv);
664
665                 handled |= CSR_INT_BIT_HW_ERR;
666
667                 return;
668         }
669
670 #ifdef CONFIG_IWLWIFI_DEBUG
671         if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
672                 /* NIC fires this, but we don't use it, redundant with WAKEUP */
673                 if (inta & CSR_INT_BIT_SCD) {
674                         IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
675                                       "the frame/frames.\n");
676                         priv->isr_stats.sch++;
677                 }
678
679                 /* Alive notification via Rx interrupt will do the real work */
680                 if (inta & CSR_INT_BIT_ALIVE) {
681                         IWL_DEBUG_ISR(priv, "Alive interrupt\n");
682                         priv->isr_stats.alive++;
683                 }
684         }
685 #endif
686         /* Safely ignore these bits for debug checks below */
687         inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
688
689         /* HW RF KILL switch toggled */
690         if (inta & CSR_INT_BIT_RF_KILL) {
691                 int hw_rf_kill = 0;
692                 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
693                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
694                         hw_rf_kill = 1;
695
696                 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
697                                 hw_rf_kill ? "disable radio" : "enable radio");
698
699                 priv->isr_stats.rfkill++;
700
701                 /* driver only loads ucode once setting the interface up.
702                  * the driver allows loading the ucode even if the radio
703                  * is killed. Hence update the killswitch state here. The
704                  * rfkill handler will care about restarting if needed.
705                  */
706                 if (!test_bit(STATUS_ALIVE, &priv->status)) {
707                         if (hw_rf_kill)
708                                 set_bit(STATUS_RF_KILL_HW, &priv->status);
709                         else
710                                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
711                         wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
712                 }
713
714                 handled |= CSR_INT_BIT_RF_KILL;
715         }
716
717         /* Chip got too hot and stopped itself */
718         if (inta & CSR_INT_BIT_CT_KILL) {
719                 IWL_ERR(priv, "Microcode CT kill error detected.\n");
720                 priv->isr_stats.ctkill++;
721                 handled |= CSR_INT_BIT_CT_KILL;
722         }
723
724         /* Error detected by uCode */
725         if (inta & CSR_INT_BIT_SW_ERR) {
726                 IWL_ERR(priv, "Microcode SW error detected. "
727                         " Restarting 0x%X.\n", inta);
728                 priv->isr_stats.sw++;
729                 iwl_irq_handle_error(priv);
730                 handled |= CSR_INT_BIT_SW_ERR;
731         }
732
733         /* uCode wakes up after power-down sleep */
734         if (inta & CSR_INT_BIT_WAKEUP) {
735                 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
736                 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
737                 for (i = 0; i < priv->hw_params.max_txq_num; i++)
738                         iwl_txq_update_write_ptr(priv, &priv->txq[i]);
739
740                 priv->isr_stats.wakeup++;
741
742                 handled |= CSR_INT_BIT_WAKEUP;
743         }
744
745         /* All uCode command responses, including Tx command responses,
746          * Rx "responses" (frame-received notification), and other
747          * notifications from uCode come through here*/
748         if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
749                         CSR_INT_BIT_RX_PERIODIC)) {
750                 IWL_DEBUG_ISR(priv, "Rx interrupt\n");
751                 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
752                         handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
753                         iwl_write32(priv, CSR_FH_INT_STATUS,
754                                         CSR_FH_INT_RX_MASK);
755                 }
756                 if (inta & CSR_INT_BIT_RX_PERIODIC) {
757                         handled |= CSR_INT_BIT_RX_PERIODIC;
758                         iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
759                 }
760                 /* Sending RX interrupt require many steps to be done in the
761                  * the device:
762                  * 1- write interrupt to current index in ICT table.
763                  * 2- dma RX frame.
764                  * 3- update RX shared data to indicate last write index.
765                  * 4- send interrupt.
766                  * This could lead to RX race, driver could receive RX interrupt
767                  * but the shared data changes does not reflect this;
768                  * periodic interrupt will detect any dangling Rx activity.
769                  */
770
771                 /* Disable periodic interrupt; we use it as just a one-shot. */
772                 iwl_write8(priv, CSR_INT_PERIODIC_REG,
773                             CSR_INT_PERIODIC_DIS);
774                 iwl_rx_handle(priv);
775
776                 /*
777                  * Enable periodic interrupt in 8 msec only if we received
778                  * real RX interrupt (instead of just periodic int), to catch
779                  * any dangling Rx interrupt.  If it was just the periodic
780                  * interrupt, there was no dangling Rx activity, and no need
781                  * to extend the periodic interrupt; one-shot is enough.
782                  */
783                 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
784                         iwl_write8(priv, CSR_INT_PERIODIC_REG,
785                                     CSR_INT_PERIODIC_ENA);
786
787                 priv->isr_stats.rx++;
788         }
789
790         /* This "Tx" DMA channel is used only for loading uCode */
791         if (inta & CSR_INT_BIT_FH_TX) {
792                 iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
793                 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
794                 priv->isr_stats.tx++;
795                 handled |= CSR_INT_BIT_FH_TX;
796                 /* Wake up uCode load routine, now that load is complete */
797                 priv->ucode_write_complete = 1;
798                 wake_up_interruptible(&priv->wait_command_queue);
799         }
800
801         if (inta & ~handled) {
802                 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
803                 priv->isr_stats.unhandled++;
804         }
805
806         if (inta & ~(priv->inta_mask)) {
807                 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
808                          inta & ~priv->inta_mask);
809         }
810
811         /* Re-enable all interrupts */
812         /* only Re-enable if disabled by irq */
813         if (test_bit(STATUS_INT_ENABLED, &priv->status))
814                 iwl_enable_interrupts(priv);
815         /* Re-enable RF_KILL if it occurred */
816         else if (handled & CSR_INT_BIT_RF_KILL)
817                 iwl_enable_rfkill_int(priv);
818 }
819
820 /*****************************************************************************
821  *
822  * sysfs attributes
823  *
824  *****************************************************************************/
825
826 #ifdef CONFIG_IWLWIFI_DEBUG
827
828 /*
829  * The following adds a new attribute to the sysfs representation
830  * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
831  * used for controlling the debug level.
832  *
833  * See the level definitions in iwl for details.
834  *
835  * The debug_level being managed using sysfs below is a per device debug
836  * level that is used instead of the global debug level if it (the per
837  * device debug level) is set.
838  */
839 static ssize_t show_debug_level(struct device *d,
840                                 struct device_attribute *attr, char *buf)
841 {
842         struct iwl_priv *priv = dev_get_drvdata(d);
843         return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
844 }
845 static ssize_t store_debug_level(struct device *d,
846                                 struct device_attribute *attr,
847                                  const char *buf, size_t count)
848 {
849         struct iwl_priv *priv = dev_get_drvdata(d);
850         unsigned long val;
851         int ret;
852
853         ret = strict_strtoul(buf, 0, &val);
854         if (ret)
855                 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
856         else {
857                 priv->debug_level = val;
858                 if (iwl_alloc_traffic_mem(priv))
859                         IWL_ERR(priv,
860                                 "Not enough memory to generate traffic log\n");
861         }
862         return strnlen(buf, count);
863 }
864
865 static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
866                         show_debug_level, store_debug_level);
867
868
869 #endif /* CONFIG_IWLWIFI_DEBUG */
870
871
872 static ssize_t show_temperature(struct device *d,
873                                 struct device_attribute *attr, char *buf)
874 {
875         struct iwl_priv *priv = dev_get_drvdata(d);
876
877         if (!iwl_is_alive(priv))
878                 return -EAGAIN;
879
880         return sprintf(buf, "%d\n", priv->temperature);
881 }
882
883 static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
884
885 static ssize_t show_tx_power(struct device *d,
886                              struct device_attribute *attr, char *buf)
887 {
888         struct iwl_priv *priv = dev_get_drvdata(d);
889
890         if (!iwl_is_ready_rf(priv))
891                 return sprintf(buf, "off\n");
892         else
893                 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
894 }
895
896 static ssize_t store_tx_power(struct device *d,
897                               struct device_attribute *attr,
898                               const char *buf, size_t count)
899 {
900         struct iwl_priv *priv = dev_get_drvdata(d);
901         unsigned long val;
902         int ret;
903
904         ret = strict_strtoul(buf, 10, &val);
905         if (ret)
906                 IWL_INFO(priv, "%s is not in decimal form.\n", buf);
907         else {
908                 ret = iwl_set_tx_power(priv, val, false);
909                 if (ret)
910                         IWL_ERR(priv, "failed setting tx power (0x%d).\n",
911                                 ret);
912                 else
913                         ret = count;
914         }
915         return ret;
916 }
917
918 static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
919
920 static struct attribute *iwl_sysfs_entries[] = {
921         &dev_attr_temperature.attr,
922         &dev_attr_tx_power.attr,
923 #ifdef CONFIG_IWLWIFI_DEBUG
924         &dev_attr_debug_level.attr,
925 #endif
926         NULL
927 };
928
929 static struct attribute_group iwl_attribute_group = {
930         .name = NULL,           /* put in device directory */
931         .attrs = iwl_sysfs_entries,
932 };
933
934 /******************************************************************************
935  *
936  * uCode download functions
937  *
938  ******************************************************************************/
939
940 static void iwl_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
941 {
942         if (desc->v_addr)
943                 dma_free_coherent(&pci_dev->dev, desc->len,
944                                   desc->v_addr, desc->p_addr);
945         desc->v_addr = NULL;
946         desc->len = 0;
947 }
948
949 static void iwl_free_fw_img(struct pci_dev *pci_dev, struct fw_img *img)
950 {
951         iwl_free_fw_desc(pci_dev, &img->code);
952         iwl_free_fw_desc(pci_dev, &img->data);
953 }
954
955 static int iwl_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc,
956                              const void *data, size_t len)
957 {
958         if (!len) {
959                 desc->v_addr = NULL;
960                 return -EINVAL;
961         }
962
963         desc->v_addr = dma_alloc_coherent(&pci_dev->dev, len,
964                                           &desc->p_addr, GFP_KERNEL);
965         if (!desc->v_addr)
966                 return -ENOMEM;
967         desc->len = len;
968         memcpy(desc->v_addr, data, len);
969         return 0;
970 }
971
972 static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
973 {
974         iwl_free_fw_img(priv->pci_dev, &priv->ucode_rt);
975         iwl_free_fw_img(priv->pci_dev, &priv->ucode_init);
976 }
977
978 struct iwlagn_ucode_capabilities {
979         u32 max_probe_length;
980         u32 standard_phy_calibration_size;
981         u32 flags;
982 };
983
984 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
985 static int iwl_mac_setup_register(struct iwl_priv *priv,
986                                   struct iwlagn_ucode_capabilities *capa);
987
988 #define UCODE_EXPERIMENTAL_INDEX        100
989 #define UCODE_EXPERIMENTAL_TAG          "exp"
990
991 static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
992 {
993         const char *name_pre = priv->cfg->fw_name_pre;
994         char tag[8];
995
996         if (first) {
997 #ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
998                 priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
999                 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
1000         } else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
1001 #endif
1002                 priv->fw_index = priv->cfg->ucode_api_max;
1003                 sprintf(tag, "%d", priv->fw_index);
1004         } else {
1005                 priv->fw_index--;
1006                 sprintf(tag, "%d", priv->fw_index);
1007         }
1008
1009         if (priv->fw_index < priv->cfg->ucode_api_min) {
1010                 IWL_ERR(priv, "no suitable firmware found!\n");
1011                 return -ENOENT;
1012         }
1013
1014         sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1015
1016         IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
1017                        (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
1018                                 ? "EXPERIMENTAL " : "",
1019                        priv->firmware_name);
1020
1021         return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
1022                                        &priv->pci_dev->dev, GFP_KERNEL, priv,
1023                                        iwl_ucode_callback);
1024 }
1025
1026 struct iwlagn_firmware_pieces {
1027         const void *inst, *data, *init, *init_data;
1028         size_t inst_size, data_size, init_size, init_data_size;
1029
1030         u32 build;
1031
1032         u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
1033         u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1034 };
1035
1036 static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
1037                                        const struct firmware *ucode_raw,
1038                                        struct iwlagn_firmware_pieces *pieces)
1039 {
1040         struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1041         u32 api_ver, hdr_size;
1042         const u8 *src;
1043
1044         priv->ucode_ver = le32_to_cpu(ucode->ver);
1045         api_ver = IWL_UCODE_API(priv->ucode_ver);
1046
1047         switch (api_ver) {
1048         default:
1049                 hdr_size = 28;
1050                 if (ucode_raw->size < hdr_size) {
1051                         IWL_ERR(priv, "File size too small!\n");
1052                         return -EINVAL;
1053                 }
1054                 pieces->build = le32_to_cpu(ucode->u.v2.build);
1055                 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
1056                 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
1057                 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
1058                 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
1059                 src = ucode->u.v2.data;
1060                 break;
1061         case 0:
1062         case 1:
1063         case 2:
1064                 hdr_size = 24;
1065                 if (ucode_raw->size < hdr_size) {
1066                         IWL_ERR(priv, "File size too small!\n");
1067                         return -EINVAL;
1068                 }
1069                 pieces->build = 0;
1070                 pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
1071                 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
1072                 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
1073                 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
1074                 src = ucode->u.v1.data;
1075                 break;
1076         }
1077
1078         /* Verify size of file vs. image size info in file's header */
1079         if (ucode_raw->size != hdr_size + pieces->inst_size +
1080                                 pieces->data_size + pieces->init_size +
1081                                 pieces->init_data_size) {
1082
1083                 IWL_ERR(priv,
1084                         "uCode file size %d does not match expected size\n",
1085                         (int)ucode_raw->size);
1086                 return -EINVAL;
1087         }
1088
1089         pieces->inst = src;
1090         src += pieces->inst_size;
1091         pieces->data = src;
1092         src += pieces->data_size;
1093         pieces->init = src;
1094         src += pieces->init_size;
1095         pieces->init_data = src;
1096         src += pieces->init_data_size;
1097
1098         return 0;
1099 }
1100
1101 static int iwlagn_wanted_ucode_alternative = 1;
1102
1103 static int iwlagn_load_firmware(struct iwl_priv *priv,
1104                                 const struct firmware *ucode_raw,
1105                                 struct iwlagn_firmware_pieces *pieces,
1106                                 struct iwlagn_ucode_capabilities *capa)
1107 {
1108         struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
1109         struct iwl_ucode_tlv *tlv;
1110         size_t len = ucode_raw->size;
1111         const u8 *data;
1112         int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
1113         u64 alternatives;
1114         u32 tlv_len;
1115         enum iwl_ucode_tlv_type tlv_type;
1116         const u8 *tlv_data;
1117
1118         if (len < sizeof(*ucode)) {
1119                 IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
1120                 return -EINVAL;
1121         }
1122
1123         if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
1124                 IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
1125                         le32_to_cpu(ucode->magic));
1126                 return -EINVAL;
1127         }
1128
1129         /*
1130          * Check which alternatives are present, and "downgrade"
1131          * when the chosen alternative is not present, warning
1132          * the user when that happens. Some files may not have
1133          * any alternatives, so don't warn in that case.
1134          */
1135         alternatives = le64_to_cpu(ucode->alternatives);
1136         tmp = wanted_alternative;
1137         if (wanted_alternative > 63)
1138                 wanted_alternative = 63;
1139         while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
1140                 wanted_alternative--;
1141         if (wanted_alternative && wanted_alternative != tmp)
1142                 IWL_WARN(priv,
1143                          "uCode alternative %d not available, choosing %d\n",
1144                          tmp, wanted_alternative);
1145
1146         priv->ucode_ver = le32_to_cpu(ucode->ver);
1147         pieces->build = le32_to_cpu(ucode->build);
1148         data = ucode->data;
1149
1150         len -= sizeof(*ucode);
1151
1152         while (len >= sizeof(*tlv)) {
1153                 u16 tlv_alt;
1154
1155                 len -= sizeof(*tlv);
1156                 tlv = (void *)data;
1157
1158                 tlv_len = le32_to_cpu(tlv->length);
1159                 tlv_type = le16_to_cpu(tlv->type);
1160                 tlv_alt = le16_to_cpu(tlv->alternative);
1161                 tlv_data = tlv->data;
1162
1163                 if (len < tlv_len) {
1164                         IWL_ERR(priv, "invalid TLV len: %zd/%u\n",
1165                                 len, tlv_len);
1166                         return -EINVAL;
1167                 }
1168                 len -= ALIGN(tlv_len, 4);
1169                 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
1170
1171                 /*
1172                  * Alternative 0 is always valid.
1173                  *
1174                  * Skip alternative TLVs that are not selected.
1175                  */
1176                 if (tlv_alt != 0 && tlv_alt != wanted_alternative)
1177                         continue;
1178
1179                 switch (tlv_type) {
1180                 case IWL_UCODE_TLV_INST:
1181                         pieces->inst = tlv_data;
1182                         pieces->inst_size = tlv_len;
1183                         break;
1184                 case IWL_UCODE_TLV_DATA:
1185                         pieces->data = tlv_data;
1186                         pieces->data_size = tlv_len;
1187                         break;
1188                 case IWL_UCODE_TLV_INIT:
1189                         pieces->init = tlv_data;
1190                         pieces->init_size = tlv_len;
1191                         break;
1192                 case IWL_UCODE_TLV_INIT_DATA:
1193                         pieces->init_data = tlv_data;
1194                         pieces->init_data_size = tlv_len;
1195                         break;
1196                 case IWL_UCODE_TLV_BOOT:
1197                         IWL_ERR(priv, "Found unexpected BOOT ucode\n");
1198                         break;
1199                 case IWL_UCODE_TLV_PROBE_MAX_LEN:
1200                         if (tlv_len != sizeof(u32))
1201                                 goto invalid_tlv_len;
1202                         capa->max_probe_length =
1203                                         le32_to_cpup((__le32 *)tlv_data);
1204                         break;
1205                 case IWL_UCODE_TLV_PAN:
1206                         if (tlv_len)
1207                                 goto invalid_tlv_len;
1208                         capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
1209                         break;
1210                 case IWL_UCODE_TLV_FLAGS:
1211                         /* must be at least one u32 */
1212                         if (tlv_len < sizeof(u32))
1213                                 goto invalid_tlv_len;
1214                         /* and a proper number of u32s */
1215                         if (tlv_len % sizeof(u32))
1216                                 goto invalid_tlv_len;
1217                         /*
1218                          * This driver only reads the first u32 as
1219                          * right now no more features are defined,
1220                          * if that changes then either the driver
1221                          * will not work with the new firmware, or
1222                          * it'll not take advantage of new features.
1223                          */
1224                         capa->flags = le32_to_cpup((__le32 *)tlv_data);
1225                         break;
1226                 case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
1227                         if (tlv_len != sizeof(u32))
1228                                 goto invalid_tlv_len;
1229                         pieces->init_evtlog_ptr =
1230                                         le32_to_cpup((__le32 *)tlv_data);
1231                         break;
1232                 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
1233                         if (tlv_len != sizeof(u32))
1234                                 goto invalid_tlv_len;
1235                         pieces->init_evtlog_size =
1236                                         le32_to_cpup((__le32 *)tlv_data);
1237                         break;
1238                 case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
1239                         if (tlv_len != sizeof(u32))
1240                                 goto invalid_tlv_len;
1241                         pieces->init_errlog_ptr =
1242                                         le32_to_cpup((__le32 *)tlv_data);
1243                         break;
1244                 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
1245                         if (tlv_len != sizeof(u32))
1246                                 goto invalid_tlv_len;
1247                         pieces->inst_evtlog_ptr =
1248                                         le32_to_cpup((__le32 *)tlv_data);
1249                         break;
1250                 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
1251                         if (tlv_len != sizeof(u32))
1252                                 goto invalid_tlv_len;
1253                         pieces->inst_evtlog_size =
1254                                         le32_to_cpup((__le32 *)tlv_data);
1255                         break;
1256                 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
1257                         if (tlv_len != sizeof(u32))
1258                                 goto invalid_tlv_len;
1259                         pieces->inst_errlog_ptr =
1260                                         le32_to_cpup((__le32 *)tlv_data);
1261                         break;
1262                 case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
1263                         if (tlv_len)
1264                                 goto invalid_tlv_len;
1265                         priv->enhance_sensitivity_table = true;
1266                         break;
1267                 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
1268                         if (tlv_len != sizeof(u32))
1269                                 goto invalid_tlv_len;
1270                         capa->standard_phy_calibration_size =
1271                                         le32_to_cpup((__le32 *)tlv_data);
1272                         break;
1273                 default:
1274                         IWL_DEBUG_INFO(priv, "unknown TLV: %d\n", tlv_type);
1275                         break;
1276                 }
1277         }
1278
1279         if (len) {
1280                 IWL_ERR(priv, "invalid TLV after parsing: %zd\n", len);
1281                 iwl_print_hex_dump(priv, IWL_DL_FW, (u8 *)data, len);
1282                 return -EINVAL;
1283         }
1284
1285         return 0;
1286
1287  invalid_tlv_len:
1288         IWL_ERR(priv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
1289         iwl_print_hex_dump(priv, IWL_DL_FW, tlv_data, tlv_len);
1290
1291         return -EINVAL;
1292 }
1293
1294 /**
1295  * iwl_ucode_callback - callback when firmware was loaded
1296  *
1297  * If loaded successfully, copies the firmware into buffers
1298  * for the card to fetch (via DMA).
1299  */
1300 static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1301 {
1302         struct iwl_priv *priv = context;
1303         struct iwl_ucode_header *ucode;
1304         int err;
1305         struct iwlagn_firmware_pieces pieces;
1306         const unsigned int api_max = priv->cfg->ucode_api_max;
1307         const unsigned int api_min = priv->cfg->ucode_api_min;
1308         u32 api_ver;
1309         char buildstr[25];
1310         u32 build;
1311         struct iwlagn_ucode_capabilities ucode_capa = {
1312                 .max_probe_length = 200,
1313                 .standard_phy_calibration_size =
1314                         IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE,
1315         };
1316
1317         memset(&pieces, 0, sizeof(pieces));
1318
1319         if (!ucode_raw) {
1320                 if (priv->fw_index <= priv->cfg->ucode_api_max)
1321                         IWL_ERR(priv,
1322                                 "request for firmware file '%s' failed.\n",
1323                                 priv->firmware_name);
1324                 goto try_again;
1325         }
1326
1327         IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1328                        priv->firmware_name, ucode_raw->size);
1329
1330         /* Make sure that we got at least the API version number */
1331         if (ucode_raw->size < 4) {
1332                 IWL_ERR(priv, "File size way too small!\n");
1333                 goto try_again;
1334         }
1335
1336         /* Data from ucode file:  header followed by uCode images */
1337         ucode = (struct iwl_ucode_header *)ucode_raw->data;
1338
1339         if (ucode->ver)
1340                 err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
1341         else
1342                 err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
1343                                            &ucode_capa);
1344
1345         if (err)
1346                 goto try_again;
1347
1348         api_ver = IWL_UCODE_API(priv->ucode_ver);
1349         build = pieces.build;
1350
1351         /*
1352          * api_ver should match the api version forming part of the
1353          * firmware filename ... but we don't check for that and only rely
1354          * on the API version read from firmware header from here on forward
1355          */
1356         /* no api version check required for experimental uCode */
1357         if (priv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
1358                 if (api_ver < api_min || api_ver > api_max) {
1359                         IWL_ERR(priv,
1360                                 "Driver unable to support your firmware API. "
1361                                 "Driver supports v%u, firmware is v%u.\n",
1362                                 api_max, api_ver);
1363                         goto try_again;
1364                 }
1365
1366                 if (api_ver != api_max)
1367                         IWL_ERR(priv,
1368                                 "Firmware has old API version. Expected v%u, "
1369                                 "got v%u. New firmware can be obtained "
1370                                 "from http://www.intellinuxwireless.org.\n",
1371                                 api_max, api_ver);
1372         }
1373
1374         if (build)
1375                 sprintf(buildstr, " build %u%s", build,
1376                        (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
1377                                 ? " (EXP)" : "");
1378         else
1379                 buildstr[0] = '\0';
1380
1381         IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
1382                  IWL_UCODE_MAJOR(priv->ucode_ver),
1383                  IWL_UCODE_MINOR(priv->ucode_ver),
1384                  IWL_UCODE_API(priv->ucode_ver),
1385                  IWL_UCODE_SERIAL(priv->ucode_ver),
1386                  buildstr);
1387
1388         snprintf(priv->hw->wiphy->fw_version,
1389                  sizeof(priv->hw->wiphy->fw_version),
1390                  "%u.%u.%u.%u%s",
1391                  IWL_UCODE_MAJOR(priv->ucode_ver),
1392                  IWL_UCODE_MINOR(priv->ucode_ver),
1393                  IWL_UCODE_API(priv->ucode_ver),
1394                  IWL_UCODE_SERIAL(priv->ucode_ver),
1395                  buildstr);
1396
1397         /*
1398          * For any of the failures below (before allocating pci memory)
1399          * we will try to load a version with a smaller API -- maybe the
1400          * user just got a corrupted version of the latest API.
1401          */
1402
1403         IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1404                        priv->ucode_ver);
1405         IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1406                        pieces.inst_size);
1407         IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1408                        pieces.data_size);
1409         IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1410                        pieces.init_size);
1411         IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1412                        pieces.init_data_size);
1413
1414         /* Verify that uCode images will fit in card's SRAM */
1415         if (pieces.inst_size > priv->hw_params.max_inst_size) {
1416                 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1417                         pieces.inst_size);
1418                 goto try_again;
1419         }
1420
1421         if (pieces.data_size > priv->hw_params.max_data_size) {
1422                 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1423                         pieces.data_size);
1424                 goto try_again;
1425         }
1426
1427         if (pieces.init_size > priv->hw_params.max_inst_size) {
1428                 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1429                         pieces.init_size);
1430                 goto try_again;
1431         }
1432
1433         if (pieces.init_data_size > priv->hw_params.max_data_size) {
1434                 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1435                         pieces.init_data_size);
1436                 goto try_again;
1437         }
1438
1439         /* Allocate ucode buffers for card's bus-master loading ... */
1440
1441         /* Runtime instructions and 2 copies of data:
1442          * 1) unmodified from disk
1443          * 2) backup cache for save/restore during power-downs */
1444         if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.code,
1445                               pieces.inst, pieces.inst_size))
1446                 goto err_pci_alloc;
1447         if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_rt.data,
1448                               pieces.data, pieces.data_size))
1449                 goto err_pci_alloc;
1450
1451         /* Initialization instructions and data */
1452         if (pieces.init_size && pieces.init_data_size) {
1453                 if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.code,
1454                                       pieces.init, pieces.init_size))
1455                         goto err_pci_alloc;
1456                 if (iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init.data,
1457                                       pieces.init_data, pieces.init_data_size))
1458                         goto err_pci_alloc;
1459         }
1460
1461         /* Now that we can no longer fail, copy information */
1462
1463         /*
1464          * The (size - 16) / 12 formula is based on the information recorded
1465          * for each event, which is of mode 1 (including timestamp) for all
1466          * new microcodes that include this information.
1467          */
1468         priv->_agn.init_evtlog_ptr = pieces.init_evtlog_ptr;
1469         if (pieces.init_evtlog_size)
1470                 priv->_agn.init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
1471         else
1472                 priv->_agn.init_evtlog_size =
1473                         priv->cfg->base_params->max_event_log_size;
1474         priv->_agn.init_errlog_ptr = pieces.init_errlog_ptr;
1475         priv->_agn.inst_evtlog_ptr = pieces.inst_evtlog_ptr;
1476         if (pieces.inst_evtlog_size)
1477                 priv->_agn.inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
1478         else
1479                 priv->_agn.inst_evtlog_size =
1480                         priv->cfg->base_params->max_event_log_size;
1481         priv->_agn.inst_errlog_ptr = pieces.inst_errlog_ptr;
1482
1483         priv->new_scan_threshold_behaviour =
1484                 !!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
1485
1486         if ((priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE) &&
1487             (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN)) {
1488                 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
1489                 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1490         } else
1491                 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1492
1493         if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1494                 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1495         else
1496                 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1497
1498         /*
1499          * figure out the offset of chain noise reset and gain commands
1500          * base on the size of standard phy calibration commands table size
1501          */
1502         if (ucode_capa.standard_phy_calibration_size >
1503             IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
1504                 ucode_capa.standard_phy_calibration_size =
1505                         IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
1506
1507         priv->_agn.phy_calib_chain_noise_reset_cmd =
1508                 ucode_capa.standard_phy_calibration_size;
1509         priv->_agn.phy_calib_chain_noise_gain_cmd =
1510                 ucode_capa.standard_phy_calibration_size + 1;
1511
1512         /**************************************************
1513          * This is still part of probe() in a sense...
1514          *
1515          * 9. Setup and register with mac80211 and debugfs
1516          **************************************************/
1517         err = iwl_mac_setup_register(priv, &ucode_capa);
1518         if (err)
1519                 goto out_unbind;
1520
1521         err = iwl_dbgfs_register(priv, DRV_NAME);
1522         if (err)
1523                 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
1524
1525         err = sysfs_create_group(&priv->pci_dev->dev.kobj,
1526                                         &iwl_attribute_group);
1527         if (err) {
1528                 IWL_ERR(priv, "failed to create sysfs device attributes\n");
1529                 goto out_unbind;
1530         }
1531
1532         /* We have our copies now, allow OS release its copies */
1533         release_firmware(ucode_raw);
1534         complete(&priv->_agn.firmware_loading_complete);
1535         return;
1536
1537  try_again:
1538         /* try next, if any */
1539         if (iwl_request_firmware(priv, false))
1540                 goto out_unbind;
1541         release_firmware(ucode_raw);
1542         return;
1543
1544  err_pci_alloc:
1545         IWL_ERR(priv, "failed to allocate pci memory\n");
1546         iwl_dealloc_ucode_pci(priv);
1547  out_unbind:
1548         complete(&priv->_agn.firmware_loading_complete);
1549         device_release_driver(&priv->pci_dev->dev);
1550         release_firmware(ucode_raw);
1551 }
1552
1553 static const char *desc_lookup_text[] = {
1554         "OK",
1555         "FAIL",
1556         "BAD_PARAM",
1557         "BAD_CHECKSUM",
1558         "NMI_INTERRUPT_WDG",
1559         "SYSASSERT",
1560         "FATAL_ERROR",
1561         "BAD_COMMAND",
1562         "HW_ERROR_TUNE_LOCK",
1563         "HW_ERROR_TEMPERATURE",
1564         "ILLEGAL_CHAN_FREQ",
1565         "VCC_NOT_STABLE",
1566         "FH_ERROR",
1567         "NMI_INTERRUPT_HOST",
1568         "NMI_INTERRUPT_ACTION_PT",
1569         "NMI_INTERRUPT_UNKNOWN",
1570         "UCODE_VERSION_MISMATCH",
1571         "HW_ERROR_ABS_LOCK",
1572         "HW_ERROR_CAL_LOCK_FAIL",
1573         "NMI_INTERRUPT_INST_ACTION_PT",
1574         "NMI_INTERRUPT_DATA_ACTION_PT",
1575         "NMI_TRM_HW_ER",
1576         "NMI_INTERRUPT_TRM",
1577         "NMI_INTERRUPT_BREAK_POINT"
1578         "DEBUG_0",
1579         "DEBUG_1",
1580         "DEBUG_2",
1581         "DEBUG_3",
1582 };
1583
1584 static struct { char *name; u8 num; } advanced_lookup[] = {
1585         { "NMI_INTERRUPT_WDG", 0x34 },
1586         { "SYSASSERT", 0x35 },
1587         { "UCODE_VERSION_MISMATCH", 0x37 },
1588         { "BAD_COMMAND", 0x38 },
1589         { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1590         { "FATAL_ERROR", 0x3D },
1591         { "NMI_TRM_HW_ERR", 0x46 },
1592         { "NMI_INTERRUPT_TRM", 0x4C },
1593         { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1594         { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1595         { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1596         { "NMI_INTERRUPT_HOST", 0x66 },
1597         { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1598         { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1599         { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1600         { "ADVANCED_SYSASSERT", 0 },
1601 };
1602
1603 static const char *desc_lookup(u32 num)
1604 {
1605         int i;
1606         int max = ARRAY_SIZE(desc_lookup_text);
1607
1608         if (num < max)
1609                 return desc_lookup_text[num];
1610
1611         max = ARRAY_SIZE(advanced_lookup) - 1;
1612         for (i = 0; i < max; i++) {
1613                 if (advanced_lookup[i].num == num)
1614                         break;
1615         }
1616         return advanced_lookup[i].name;
1617 }
1618
1619 #define ERROR_START_OFFSET  (1 * sizeof(u32))
1620 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
1621
1622 void iwl_dump_nic_error_log(struct iwl_priv *priv)
1623 {
1624         u32 base;
1625         struct iwl_error_event_table table;
1626
1627         base = priv->device_pointers.error_event_table;
1628         if (priv->ucode_type == IWL_UCODE_INIT) {
1629                 if (!base)
1630                         base = priv->_agn.init_errlog_ptr;
1631         } else {
1632                 if (!base)
1633                         base = priv->_agn.inst_errlog_ptr;
1634         }
1635
1636         if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1637                 IWL_ERR(priv,
1638                         "Not valid error log pointer 0x%08X for %s uCode\n",
1639                         base,
1640                         (priv->ucode_type == IWL_UCODE_INIT)
1641                                         ? "Init" : "RT");
1642                 return;
1643         }
1644
1645         iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
1646
1647         if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1648                 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1649                 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1650                         priv->status, table.valid);
1651         }
1652
1653         priv->isr_stats.err_code = table.error_id;
1654
1655         trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
1656                                       table.data1, table.data2, table.line,
1657                                       table.blink1, table.blink2, table.ilink1,
1658                                       table.ilink2, table.bcon_time, table.gp1,
1659                                       table.gp2, table.gp3, table.ucode_ver,
1660                                       table.hw_ver, table.brd_ver);
1661         IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1662                 desc_lookup(table.error_id));
1663         IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1664         IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1665         IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1666         IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1667         IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1668         IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1669         IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1670         IWL_ERR(priv, "0x%08X | line\n", table.line);
1671         IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1672         IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1673         IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1674         IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1675         IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1676         IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1677         IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1678         IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1679         IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1680         IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1681 }
1682
1683 #define EVENT_START_OFFSET  (4 * sizeof(u32))
1684
1685 /**
1686  * iwl_print_event_log - Dump error event log to syslog
1687  *
1688  */
1689 static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1690                                u32 num_events, u32 mode,
1691                                int pos, char **buf, size_t bufsz)
1692 {
1693         u32 i;
1694         u32 base;       /* SRAM byte address of event log header */
1695         u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1696         u32 ptr;        /* SRAM byte address of log data */
1697         u32 ev, time, data; /* event log data */
1698         unsigned long reg_flags;
1699
1700         if (num_events == 0)
1701                 return pos;
1702
1703         base = priv->device_pointers.log_event_table;
1704         if (priv->ucode_type == IWL_UCODE_INIT) {
1705                 if (!base)
1706                         base = priv->_agn.init_evtlog_ptr;
1707         } else {
1708                 if (!base)
1709                         base = priv->_agn.inst_evtlog_ptr;
1710         }
1711
1712         if (mode == 0)
1713                 event_size = 2 * sizeof(u32);
1714         else
1715                 event_size = 3 * sizeof(u32);
1716
1717         ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1718
1719         /* Make sure device is powered up for SRAM reads */
1720         spin_lock_irqsave(&priv->reg_lock, reg_flags);
1721         iwl_grab_nic_access(priv);
1722
1723         /* Set starting address; reads will auto-increment */
1724         iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
1725         rmb();
1726
1727         /* "time" is actually "data" for mode 0 (no timestamp).
1728         * place event id # at far right for easier visual parsing. */
1729         for (i = 0; i < num_events; i++) {
1730                 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1731                 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1732                 if (mode == 0) {
1733                         /* data, ev */
1734                         if (bufsz) {
1735                                 pos += scnprintf(*buf + pos, bufsz - pos,
1736                                                 "EVT_LOG:0x%08x:%04u\n",
1737                                                 time, ev);
1738                         } else {
1739                                 trace_iwlwifi_dev_ucode_event(priv, 0,
1740                                         time, ev);
1741                                 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1742                                         time, ev);
1743                         }
1744                 } else {
1745                         data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1746                         if (bufsz) {
1747                                 pos += scnprintf(*buf + pos, bufsz - pos,
1748                                                 "EVT_LOGT:%010u:0x%08x:%04u\n",
1749                                                  time, data, ev);
1750                         } else {
1751                                 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1752                                         time, data, ev);
1753                                 trace_iwlwifi_dev_ucode_event(priv, time,
1754                                         data, ev);
1755                         }
1756                 }
1757         }
1758
1759         /* Allow device to power down */
1760         iwl_release_nic_access(priv);
1761         spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1762         return pos;
1763 }
1764
1765 /**
1766  * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1767  */
1768 static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1769                                     u32 num_wraps, u32 next_entry,
1770                                     u32 size, u32 mode,
1771                                     int pos, char **buf, size_t bufsz)
1772 {
1773         /*
1774          * display the newest DEFAULT_LOG_ENTRIES entries
1775          * i.e the entries just before the next ont that uCode would fill.
1776          */
1777         if (num_wraps) {
1778                 if (next_entry < size) {
1779                         pos = iwl_print_event_log(priv,
1780                                                 capacity - (size - next_entry),
1781                                                 size - next_entry, mode,
1782                                                 pos, buf, bufsz);
1783                         pos = iwl_print_event_log(priv, 0,
1784                                                   next_entry, mode,
1785                                                   pos, buf, bufsz);
1786                 } else
1787                         pos = iwl_print_event_log(priv, next_entry - size,
1788                                                   size, mode, pos, buf, bufsz);
1789         } else {
1790                 if (next_entry < size) {
1791                         pos = iwl_print_event_log(priv, 0, next_entry,
1792                                                   mode, pos, buf, bufsz);
1793                 } else {
1794                         pos = iwl_print_event_log(priv, next_entry - size,
1795                                                   size, mode, pos, buf, bufsz);
1796                 }
1797         }
1798         return pos;
1799 }
1800
1801 #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1802
1803 int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1804                             char **buf, bool display)
1805 {
1806         u32 base;       /* SRAM byte address of event log header */
1807         u32 capacity;   /* event log capacity in # entries */
1808         u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
1809         u32 num_wraps;  /* # times uCode wrapped to top of log */
1810         u32 next_entry; /* index of next entry to be written by uCode */
1811         u32 size;       /* # entries that we'll print */
1812         u32 logsize;
1813         int pos = 0;
1814         size_t bufsz = 0;
1815
1816         base = priv->device_pointers.log_event_table;
1817         if (priv->ucode_type == IWL_UCODE_INIT) {
1818                 logsize = priv->_agn.init_evtlog_size;
1819                 if (!base)
1820                         base = priv->_agn.init_evtlog_ptr;
1821         } else {
1822                 logsize = priv->_agn.inst_evtlog_size;
1823                 if (!base)
1824                         base = priv->_agn.inst_evtlog_ptr;
1825         }
1826
1827         if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
1828                 IWL_ERR(priv,
1829                         "Invalid event log pointer 0x%08X for %s uCode\n",
1830                         base,
1831                         (priv->ucode_type == IWL_UCODE_INIT)
1832                                         ? "Init" : "RT");
1833                 return -EINVAL;
1834         }
1835
1836         /* event log header */
1837         capacity = iwl_read_targ_mem(priv, base);
1838         mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1839         num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1840         next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1841
1842         if (capacity > logsize) {
1843                 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1844                         capacity, logsize);
1845                 capacity = logsize;
1846         }
1847
1848         if (next_entry > logsize) {
1849                 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1850                         next_entry, logsize);
1851                 next_entry = logsize;
1852         }
1853
1854         size = num_wraps ? capacity : next_entry;
1855
1856         /* bail out if nothing in log */
1857         if (size == 0) {
1858                 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1859                 return pos;
1860         }
1861
1862         /* enable/disable bt channel inhibition */
1863         priv->bt_ch_announce = iwlagn_bt_ch_announce;
1864
1865 #ifdef CONFIG_IWLWIFI_DEBUG
1866         if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1867                 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1868                         ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1869 #else
1870         size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1871                 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1872 #endif
1873         IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1874                 size);
1875
1876 #ifdef CONFIG_IWLWIFI_DEBUG
1877         if (display) {
1878                 if (full_log)
1879                         bufsz = capacity * 48;
1880                 else
1881                         bufsz = size * 48;
1882                 *buf = kmalloc(bufsz, GFP_KERNEL);
1883                 if (!*buf)
1884                         return -ENOMEM;
1885         }
1886         if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1887                 /*
1888                  * if uCode has wrapped back to top of log,
1889                  * start at the oldest entry,
1890                  * i.e the next one that uCode would fill.
1891                  */
1892                 if (num_wraps)
1893                         pos = iwl_print_event_log(priv, next_entry,
1894                                                 capacity - next_entry, mode,
1895                                                 pos, buf, bufsz);
1896                 /* (then/else) start at top of log */
1897                 pos = iwl_print_event_log(priv, 0,
1898                                           next_entry, mode, pos, buf, bufsz);
1899         } else
1900                 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1901                                                 next_entry, size, mode,
1902                                                 pos, buf, bufsz);
1903 #else
1904         pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1905                                         next_entry, size, mode,
1906                                         pos, buf, bufsz);
1907 #endif
1908         return pos;
1909 }
1910
1911 static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1912 {
1913         struct iwl_ct_kill_config cmd;
1914         struct iwl_ct_kill_throttling_config adv_cmd;
1915         unsigned long flags;
1916         int ret = 0;
1917
1918         spin_lock_irqsave(&priv->lock, flags);
1919         iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
1920                     CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1921         spin_unlock_irqrestore(&priv->lock, flags);
1922         priv->thermal_throttle.ct_kill_toggle = false;
1923
1924         if (priv->cfg->base_params->support_ct_kill_exit) {
1925                 adv_cmd.critical_temperature_enter =
1926                         cpu_to_le32(priv->hw_params.ct_kill_threshold);
1927                 adv_cmd.critical_temperature_exit =
1928                         cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
1929
1930                 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1931                                        sizeof(adv_cmd), &adv_cmd);
1932                 if (ret)
1933                         IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1934                 else
1935                         IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1936                                         "succeeded, "
1937                                         "critical temperature enter is %d,"
1938                                         "exit is %d\n",
1939                                        priv->hw_params.ct_kill_threshold,
1940                                        priv->hw_params.ct_kill_exit_threshold);
1941         } else {
1942                 cmd.critical_temperature_R =
1943                         cpu_to_le32(priv->hw_params.ct_kill_threshold);
1944
1945                 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
1946                                        sizeof(cmd), &cmd);
1947                 if (ret)
1948                         IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1949                 else
1950                         IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1951                                         "succeeded, "
1952                                         "critical temperature is %d\n",
1953                                         priv->hw_params.ct_kill_threshold);
1954         }
1955 }
1956
1957 static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
1958 {
1959         struct iwl_calib_cfg_cmd calib_cfg_cmd;
1960         struct iwl_host_cmd cmd = {
1961                 .id = CALIBRATION_CFG_CMD,
1962                 .len = { sizeof(struct iwl_calib_cfg_cmd), },
1963                 .data = { &calib_cfg_cmd, },
1964         };
1965
1966         memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
1967         calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
1968         calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
1969
1970         return iwl_send_cmd(priv, &cmd);
1971 }
1972
1973
1974 /**
1975  * iwl_alive_start - called after REPLY_ALIVE notification received
1976  *                   from protocol/runtime uCode (initialization uCode's
1977  *                   Alive gets handled by iwl_init_alive_start()).
1978  */
1979 int iwl_alive_start(struct iwl_priv *priv)
1980 {
1981         int ret = 0;
1982         struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1983
1984         iwl_reset_ict(priv);
1985
1986         IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1987
1988         /* After the ALIVE response, we can send host commands to the uCode */
1989         set_bit(STATUS_ALIVE, &priv->status);
1990
1991         /* Enable watchdog to monitor the driver tx queues */
1992         iwl_setup_watchdog(priv);
1993
1994         if (iwl_is_rfkill(priv))
1995                 return -ERFKILL;
1996
1997         /* download priority table before any calibration request */
1998         if (priv->cfg->bt_params &&
1999             priv->cfg->bt_params->advanced_bt_coexist) {
2000                 /* Configure Bluetooth device coexistence support */
2001                 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
2002                 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
2003                 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
2004                 priv->cfg->ops->hcmd->send_bt_config(priv);
2005                 priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
2006                 iwlagn_send_prio_tbl(priv);
2007
2008                 /* FIXME: w/a to force change uCode BT state machine */
2009                 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
2010                                          BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2011                 if (ret)
2012                         return ret;
2013                 ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
2014                                          BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
2015                 if (ret)
2016                         return ret;
2017         }
2018         if (priv->hw_params.calib_rt_cfg)
2019                 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg);
2020
2021         ieee80211_wake_queues(priv->hw);
2022
2023         priv->active_rate = IWL_RATES_MASK;
2024
2025         /* Configure Tx antenna selection based on H/W config */
2026         if (priv->cfg->ops->hcmd->set_tx_ant)
2027                 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
2028
2029         if (iwl_is_associated_ctx(ctx)) {
2030                 struct iwl_rxon_cmd *active_rxon =
2031                                 (struct iwl_rxon_cmd *)&ctx->active;
2032                 /* apply any changes in staging */
2033                 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2034                 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2035         } else {
2036                 struct iwl_rxon_context *tmp;
2037                 /* Initialize our rx_config data */
2038                 for_each_context(priv, tmp)
2039                         iwl_connection_init_rx_config(priv, tmp);
2040
2041                 if (priv->cfg->ops->hcmd->set_rxon_chain)
2042                         priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2043         }
2044
2045         if (!priv->cfg->bt_params || (priv->cfg->bt_params &&
2046             !priv->cfg->bt_params->advanced_bt_coexist)) {
2047                 /*
2048                  * default is 2-wire BT coexexistence support
2049                  */
2050                 priv->cfg->ops->hcmd->send_bt_config(priv);
2051         }
2052
2053         iwl_reset_run_time_calib(priv);
2054
2055         set_bit(STATUS_READY, &priv->status);
2056
2057         /* Configure the adapter for unassociated operation */
2058         ret = iwlagn_commit_rxon(priv, ctx);
2059         if (ret)
2060                 return ret;
2061
2062         /* At this point, the NIC is initialized and operational */
2063         iwl_rf_kill_ct_config(priv);
2064
2065         IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2066
2067         return iwl_power_update_mode(priv, true);
2068 }
2069
2070 static void iwl_cancel_deferred_work(struct iwl_priv *priv);
2071
2072 static void __iwl_down(struct iwl_priv *priv)
2073 {
2074         int exit_pending;
2075
2076         IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
2077
2078         iwl_scan_cancel_timeout(priv, 200);
2079
2080         exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
2081
2082         /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2083          * to prevent rearm timer */
2084         del_timer_sync(&priv->watchdog);
2085
2086         iwl_clear_ucode_stations(priv, NULL);
2087         iwl_dealloc_bcast_stations(priv);
2088         iwl_clear_driver_stations(priv);
2089
2090         /* reset BT coex data */
2091         priv->bt_status = 0;
2092         if (priv->cfg->bt_params)
2093                 priv->bt_traffic_load =
2094                          priv->cfg->bt_params->bt_init_traffic_load;
2095         else
2096                 priv->bt_traffic_load = 0;
2097         priv->bt_full_concurrent = false;
2098         priv->bt_ci_compliance = 0;
2099
2100         /* Wipe out the EXIT_PENDING status bit if we are not actually
2101          * exiting the module */
2102         if (!exit_pending)
2103                 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2104
2105         if (priv->mac80211_registered)
2106                 ieee80211_stop_queues(priv->hw);
2107
2108         /* Clear out all status bits but a few that are stable across reset */
2109         priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
2110                                 STATUS_RF_KILL_HW |
2111                         test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
2112                                 STATUS_GEO_CONFIGURED |
2113                         test_bit(STATUS_FW_ERROR, &priv->status) <<
2114                                 STATUS_FW_ERROR |
2115                        test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2116                                 STATUS_EXIT_PENDING;
2117
2118         iwlagn_stop_device(priv);
2119
2120         dev_kfree_skb(priv->beacon_skb);
2121         priv->beacon_skb = NULL;
2122 }
2123
2124 static void iwl_down(struct iwl_priv *priv)
2125 {
2126         mutex_lock(&priv->mutex);
2127         __iwl_down(priv);
2128         mutex_unlock(&priv->mutex);
2129
2130         iwl_cancel_deferred_work(priv);
2131 }
2132
2133 #define HW_READY_TIMEOUT (50)
2134
2135 /* Note: returns poll_bit return value, which is >= 0 if success */
2136 static int iwl_set_hw_ready(struct iwl_priv *priv)
2137 {
2138         int ret;
2139
2140         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2141                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
2142
2143         /* See if we got it */
2144         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2145                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2146                                 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
2147                                 HW_READY_TIMEOUT);
2148
2149         IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : "");
2150         return ret;
2151 }
2152
2153 /* Note: returns standard 0/-ERROR code */
2154 int iwl_prepare_card_hw(struct iwl_priv *priv)
2155 {
2156         int ret;
2157
2158         IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
2159
2160         ret = iwl_set_hw_ready(priv);
2161         if (ret >= 0)
2162                 return 0;
2163
2164         /* If HW is not ready, prepare the conditions to check again */
2165         iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
2166                         CSR_HW_IF_CONFIG_REG_PREPARE);
2167
2168         ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
2169                         ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
2170                         CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
2171
2172         if (ret < 0)
2173                 return ret;
2174
2175         /* HW should be ready by now, check again. */
2176         ret = iwl_set_hw_ready(priv);
2177         if (ret >= 0)
2178                 return 0;
2179         return ret;
2180 }
2181
2182 #define MAX_HW_RESTARTS 5
2183
2184 static int __iwl_up(struct iwl_priv *priv)
2185 {
2186         struct iwl_rxon_context *ctx;
2187         int ret;
2188
2189         lockdep_assert_held(&priv->mutex);
2190
2191         if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2192                 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2193                 return -EIO;
2194         }
2195
2196         for_each_context(priv, ctx) {
2197                 ret = iwlagn_alloc_bcast_station(priv, ctx);
2198                 if (ret) {
2199                         iwl_dealloc_bcast_stations(priv);
2200                         return ret;
2201                 }
2202         }
2203
2204         ret = iwlagn_run_init_ucode(priv);
2205         if (ret) {
2206                 IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
2207                 goto error;
2208         }
2209
2210         ret = iwlagn_load_ucode_wait_alive(priv,
2211                                            &priv->ucode_rt,
2212                                            IWL_UCODE_REGULAR);
2213         if (ret) {
2214                 IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
2215                 goto error;
2216         }
2217
2218         ret = iwl_alive_start(priv);
2219         if (ret)
2220                 goto error;
2221         return 0;
2222
2223  error:
2224         set_bit(STATUS_EXIT_PENDING, &priv->status);
2225         __iwl_down(priv);
2226         clear_bit(STATUS_EXIT_PENDING, &priv->status);
2227
2228         IWL_ERR(priv, "Unable to initialize device.\n");
2229         return ret;
2230 }
2231
2232
2233 /*****************************************************************************
2234  *
2235  * Workqueue callbacks
2236  *
2237  *****************************************************************************/
2238
2239 static void iwl_bg_run_time_calib_work(struct work_struct *work)
2240 {
2241         struct iwl_priv *priv = container_of(work, struct iwl_priv,
2242                         run_time_calib_work);
2243
2244         mutex_lock(&priv->mutex);
2245
2246         if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2247             test_bit(STATUS_SCANNING, &priv->status)) {
2248                 mutex_unlock(&priv->mutex);
2249                 return;
2250         }
2251
2252         if (priv->start_calib) {
2253                 iwl_chain_noise_calibration(priv);
2254                 iwl_sensitivity_calibration(priv);
2255         }
2256
2257         mutex_unlock(&priv->mutex);
2258 }
2259
2260 static void iwlagn_prepare_restart(struct iwl_priv *priv)
2261 {
2262         struct iwl_rxon_context *ctx;
2263         bool bt_full_concurrent;
2264         u8 bt_ci_compliance;
2265         u8 bt_load;
2266         u8 bt_status;
2267
2268         lockdep_assert_held(&priv->mutex);
2269
2270         for_each_context(priv, ctx)
2271                 ctx->vif = NULL;
2272         priv->is_open = 0;
2273
2274         /*
2275          * __iwl_down() will clear the BT status variables,
2276          * which is correct, but when we restart we really
2277          * want to keep them so restore them afterwards.
2278          *
2279          * The restart process will later pick them up and
2280          * re-configure the hw when we reconfigure the BT
2281          * command.
2282          */
2283         bt_full_concurrent = priv->bt_full_concurrent;
2284         bt_ci_compliance = priv->bt_ci_compliance;
2285         bt_load = priv->bt_traffic_load;
2286         bt_status = priv->bt_status;
2287
2288         __iwl_down(priv);
2289
2290         priv->bt_full_concurrent = bt_full_concurrent;
2291         priv->bt_ci_compliance = bt_ci_compliance;
2292         priv->bt_traffic_load = bt_load;
2293         priv->bt_status = bt_status;
2294 }
2295
2296 static void iwl_bg_restart(struct work_struct *data)
2297 {
2298         struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2299
2300         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2301                 return;
2302
2303         if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
2304                 mutex_lock(&priv->mutex);
2305                 iwlagn_prepare_restart(priv);
2306                 mutex_unlock(&priv->mutex);
2307                 iwl_cancel_deferred_work(priv);
2308                 ieee80211_restart_hw(priv->hw);
2309         } else {
2310                 WARN_ON(1);
2311         }
2312 }
2313
2314 static void iwl_bg_rx_replenish(struct work_struct *data)
2315 {
2316         struct iwl_priv *priv =
2317             container_of(data, struct iwl_priv, rx_replenish);
2318
2319         if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2320                 return;
2321
2322         mutex_lock(&priv->mutex);
2323         iwlagn_rx_replenish(priv);
2324         mutex_unlock(&priv->mutex);
2325 }
2326
2327 static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2328                                  struct ieee80211_channel *chan,
2329                                  enum nl80211_channel_type channel_type,
2330                                  unsigned int wait)
2331 {
2332         struct iwl_priv *priv = hw->priv;
2333         int ret;
2334
2335         /* Not supported if we don't have PAN */
2336         if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) {
2337                 ret = -EOPNOTSUPP;
2338                 goto free;
2339         }
2340
2341         /* Not supported on pre-P2P firmware */
2342         if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
2343                                         BIT(NL80211_IFTYPE_P2P_CLIENT))) {
2344                 ret = -EOPNOTSUPP;
2345                 goto free;
2346         }
2347
2348         mutex_lock(&priv->mutex);
2349
2350         if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) {
2351                 /*
2352                  * If the PAN context is free, use the normal
2353                  * way of doing remain-on-channel offload + TX.
2354                  */
2355                 ret = 1;
2356                 goto out;
2357         }
2358
2359         /* TODO: queue up if scanning? */
2360         if (test_bit(STATUS_SCANNING, &priv->status) ||
2361             priv->_agn.offchan_tx_skb) {
2362                 ret = -EBUSY;
2363                 goto out;
2364         }
2365
2366         /*
2367          * max_scan_ie_len doesn't include the blank SSID or the header,
2368          * so need to add that again here.
2369          */
2370         if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) {
2371                 ret = -ENOBUFS;
2372                 goto out;
2373         }
2374
2375         priv->_agn.offchan_tx_skb = skb;
2376         priv->_agn.offchan_tx_timeout = wait;
2377         priv->_agn.offchan_tx_chan = chan;
2378
2379         ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif,
2380                                 IWL_SCAN_OFFCH_TX, chan->band);
2381         if (ret)
2382                 priv->_agn.offchan_tx_skb = NULL;
2383  out:
2384         mutex_unlock(&priv->mutex);
2385  free:
2386         if (ret < 0)
2387                 kfree_skb(skb);
2388
2389         return ret;
2390 }
2391
2392 static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw)
2393 {
2394         struct iwl_priv *priv = hw->priv;
2395         int ret;
2396
2397         mutex_lock(&priv->mutex);
2398
2399         if (!priv->_agn.offchan_tx_skb) {
2400                 ret = -EINVAL;
2401                 goto unlock;
2402         }
2403
2404         priv->_agn.offchan_tx_skb = NULL;
2405
2406         ret = iwl_scan_cancel_timeout(priv, 200);
2407         if (ret)
2408                 ret = -EIO;
2409 unlock:
2410         mutex_unlock(&priv->mutex);
2411
2412         return ret;
2413 }
2414
2415 /*****************************************************************************
2416  *
2417  * mac80211 entry point functions
2418  *
2419  *****************************************************************************/
2420
2421 static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
2422         {
2423                 .max = 1,
2424                 .types = BIT(NL80211_IFTYPE_STATION),
2425         },
2426         {
2427                 .max = 1,
2428                 .types = BIT(NL80211_IFTYPE_AP),
2429         },
2430 };
2431
2432 static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
2433         {
2434                 .max = 2,
2435                 .types = BIT(NL80211_IFTYPE_STATION),
2436         },
2437 };
2438
2439 static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
2440         {
2441                 .max = 1,
2442                 .types = BIT(NL80211_IFTYPE_STATION),
2443         },
2444         {
2445                 .max = 1,
2446                 .types = BIT(NL80211_IFTYPE_P2P_GO) |
2447                          BIT(NL80211_IFTYPE_AP),
2448         },
2449 };
2450
2451 static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
2452         {
2453                 .max = 2,
2454                 .types = BIT(NL80211_IFTYPE_STATION),
2455         },
2456         {
2457                 .max = 1,
2458                 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
2459         },
2460 };
2461
2462 static const struct ieee80211_iface_combination
2463 iwlagn_iface_combinations_dualmode[] = {
2464         { .num_different_channels = 1,
2465           .max_interfaces = 2,
2466           .beacon_int_infra_match = true,
2467           .limits = iwlagn_sta_ap_limits,
2468           .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
2469         },
2470         { .num_different_channels = 1,
2471           .max_interfaces = 2,
2472           .limits = iwlagn_2sta_limits,
2473           .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
2474         },
2475 };
2476
2477 static const struct ieee80211_iface_combination
2478 iwlagn_iface_combinations_p2p[] = {
2479         { .num_different_channels = 1,
2480           .max_interfaces = 2,
2481           .beacon_int_infra_match = true,
2482           .limits = iwlagn_p2p_sta_go_limits,
2483           .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
2484         },
2485         { .num_different_channels = 1,
2486           .max_interfaces = 2,
2487           .limits = iwlagn_p2p_2sta_limits,
2488           .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
2489         },
2490 };
2491
2492 /*
2493  * Not a mac80211 entry point function, but it fits in with all the
2494  * other mac80211 functions grouped here.
2495  */
2496 static int iwl_mac_setup_register(struct iwl_priv *priv,
2497                                   struct iwlagn_ucode_capabilities *capa)
2498 {
2499         int ret;
2500         struct ieee80211_hw *hw = priv->hw;
2501         struct iwl_rxon_context *ctx;
2502
2503         hw->rate_control_algorithm = "iwl-agn-rs";
2504
2505         /* Tell mac80211 our characteristics */
2506         hw->flags = IEEE80211_HW_SIGNAL_DBM |
2507                     IEEE80211_HW_AMPDU_AGGREGATION |
2508                     IEEE80211_HW_NEED_DTIM_PERIOD |
2509                     IEEE80211_HW_SPECTRUM_MGMT |
2510                     IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2511
2512         hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2513
2514         hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2515                      IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2516
2517         if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
2518                 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
2519                              IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2520
2521         if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
2522                 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
2523
2524         hw->sta_data_size = sizeof(struct iwl_station_priv);
2525         hw->vif_data_size = sizeof(struct iwl_vif_priv);
2526
2527         for_each_context(priv, ctx) {
2528                 hw->wiphy->interface_modes |= ctx->interface_modes;
2529                 hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
2530         }
2531
2532         BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
2533
2534         if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
2535                 hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
2536                 hw->wiphy->n_iface_combinations =
2537                         ARRAY_SIZE(iwlagn_iface_combinations_p2p);
2538         } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
2539                 hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
2540                 hw->wiphy->n_iface_combinations =
2541                         ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
2542         }
2543
2544         hw->wiphy->max_remain_on_channel_duration = 1000;
2545
2546         hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
2547                             WIPHY_FLAG_DISABLE_BEACON_HINTS |
2548                             WIPHY_FLAG_IBSS_RSN;
2549
2550         /*
2551          * For now, disable PS by default because it affects
2552          * RX performance significantly.
2553          */
2554         hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2555
2556         hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2557         /* we create the 802.11 header and a zero-length SSID element */
2558         hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
2559
2560         /* Default value; 4 EDCA QOS priorities */
2561         hw->queues = 4;
2562
2563         hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2564
2565         if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2566                 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2567                         &priv->bands[IEEE80211_BAND_2GHZ];
2568         if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2569                 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2570                         &priv->bands[IEEE80211_BAND_5GHZ];
2571
2572         iwl_leds_init(priv);
2573
2574         ret = ieee80211_register_hw(priv->hw);
2575         if (ret) {
2576                 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2577                 return ret;
2578         }
2579         priv->mac80211_registered = 1;
2580
2581         return 0;
2582 }
2583
2584
2585 static int iwlagn_mac_start(struct ieee80211_hw *hw)
2586 {
2587         struct iwl_priv *priv = hw->priv;
2588         int ret;
2589
2590         IWL_DEBUG_MAC80211(priv, "enter\n");
2591
2592         /* we should be verifying the device is ready to be opened */
2593         mutex_lock(&priv->mutex);
2594         ret = __iwl_up(priv);
2595         mutex_unlock(&priv->mutex);
2596         if (ret)
2597                 return ret;
2598
2599         IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2600
2601         /* Now we should be done, and the READY bit should be set. */
2602         if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
2603                 ret = -EIO;
2604
2605         iwlagn_led_enable(priv);
2606
2607         priv->is_open = 1;
2608         IWL_DEBUG_MAC80211(priv, "leave\n");
2609         return 0;
2610 }
2611
2612 static void iwlagn_mac_stop(struct ieee80211_hw *hw)
2613 {
2614         struct iwl_priv *priv = hw->priv;
2615
2616         IWL_DEBUG_MAC80211(priv, "enter\n");
2617
2618         if (!priv->is_open)
2619                 return;
2620
2621         priv->is_open = 0;
2622
2623         iwl_down(priv);
2624
2625         flush_workqueue(priv->workqueue);
2626
2627         /* User space software may expect getting rfkill changes
2628          * even if interface is down */
2629         iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2630         iwl_enable_rfkill_int(priv);
2631
2632         IWL_DEBUG_MAC80211(priv, "leave\n");
2633 }
2634
2635 static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2636 {
2637         struct iwl_priv *priv = hw->priv;
2638
2639         IWL_DEBUG_MACDUMP(priv, "enter\n");
2640
2641         IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2642                      ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2643
2644         if (iwlagn_tx_skb(priv, skb))
2645                 dev_kfree_skb_any(skb);
2646
2647         IWL_DEBUG_MACDUMP(priv, "leave\n");
2648 }
2649
2650 static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
2651                                        struct ieee80211_vif *vif,
2652                                        struct ieee80211_key_conf *keyconf,
2653                                        struct ieee80211_sta *sta,
2654                                        u32 iv32, u16 *phase1key)
2655 {
2656         struct iwl_priv *priv = hw->priv;
2657         struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2658
2659         IWL_DEBUG_MAC80211(priv, "enter\n");
2660
2661         iwl_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
2662                             iv32, phase1key);
2663
2664         IWL_DEBUG_MAC80211(priv, "leave\n");
2665 }
2666
2667 static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2668                               struct ieee80211_vif *vif,
2669                               struct ieee80211_sta *sta,
2670                               struct ieee80211_key_conf *key)
2671 {
2672         struct iwl_priv *priv = hw->priv;
2673         struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2674         struct iwl_rxon_context *ctx = vif_priv->ctx;
2675         int ret;
2676         u8 sta_id;
2677         bool is_default_wep_key = false;
2678
2679         IWL_DEBUG_MAC80211(priv, "enter\n");
2680
2681         if (iwlagn_mod_params.sw_crypto) {
2682                 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2683                 return -EOPNOTSUPP;
2684         }
2685
2686         /*
2687          * To support IBSS RSN, don't program group keys in IBSS, the
2688          * hardware will then not attempt to decrypt the frames.
2689          */
2690         if (vif->type == NL80211_IFTYPE_ADHOC &&
2691             !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
2692                 return -EOPNOTSUPP;
2693
2694         sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
2695         if (sta_id == IWL_INVALID_STATION)
2696                 return -EINVAL;
2697
2698         mutex_lock(&priv->mutex);
2699         iwl_scan_cancel_timeout(priv, 100);
2700
2701         /*
2702          * If we are getting WEP group key and we didn't receive any key mapping
2703          * so far, we are in legacy wep mode (group key only), otherwise we are
2704          * in 1X mode.
2705          * In legacy wep mode, we use another host command to the uCode.
2706          */
2707         if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2708              key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
2709             !sta) {
2710                 if (cmd == SET_KEY)
2711                         is_default_wep_key = !ctx->key_mapping_keys;
2712                 else
2713                         is_default_wep_key =
2714                                         (key->hw_key_idx == HW_KEY_DEFAULT);
2715         }
2716
2717         switch (cmd) {
2718         case SET_KEY:
2719                 if (is_default_wep_key)
2720                         ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
2721                 else
2722                         ret = iwl_set_dynamic_key(priv, vif_priv->ctx,
2723                                                   key, sta_id);
2724
2725                 IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
2726                 break;
2727         case DISABLE_KEY:
2728                 if (is_default_wep_key)
2729                         ret = iwl_remove_default_wep_key(priv, ctx, key);
2730                 else
2731                         ret = iwl_remove_dynamic_key(priv, ctx, key, sta_id);
2732
2733                 IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
2734                 break;
2735         default:
2736                 ret = -EINVAL;
2737         }
2738
2739         mutex_unlock(&priv->mutex);
2740         IWL_DEBUG_MAC80211(priv, "leave\n");
2741
2742         return ret;
2743 }
2744
2745 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2746                                    struct ieee80211_vif *vif,
2747                                    enum ieee80211_ampdu_mlme_action action,
2748                                    struct ieee80211_sta *sta, u16 tid, u16 *ssn,
2749                                    u8 buf_size)
2750 {
2751         struct iwl_priv *priv = hw->priv;
2752         int ret = -EINVAL;
2753         struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
2754
2755         IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2756                      sta->addr, tid);
2757
2758         if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
2759                 return -EACCES;
2760
2761         mutex_lock(&priv->mutex);
2762
2763         switch (action) {
2764         case IEEE80211_AMPDU_RX_START:
2765                 IWL_DEBUG_HT(priv, "start Rx\n");
2766                 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
2767                 break;
2768         case IEEE80211_AMPDU_RX_STOP:
2769                 IWL_DEBUG_HT(priv, "stop Rx\n");
2770                 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
2771                 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2772                         ret = 0;
2773                 break;
2774         case IEEE80211_AMPDU_TX_START:
2775                 IWL_DEBUG_HT(priv, "start Tx\n");
2776                 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
2777                 if (ret == 0) {
2778                         priv->_agn.agg_tids_count++;
2779                         IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
2780                                      priv->_agn.agg_tids_count);
2781                 }
2782                 break;
2783         case IEEE80211_AMPDU_TX_STOP:
2784                 IWL_DEBUG_HT(priv, "stop Tx\n");
2785                 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
2786                 if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
2787                         priv->_agn.agg_tids_count--;
2788                         IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
2789                                      priv->_agn.agg_tids_count);
2790                 }
2791                 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2792                         ret = 0;
2793                 if (priv->cfg->ht_params &&
2794                     priv->cfg->ht_params->use_rts_for_aggregation) {
2795                         /*
2796                          * switch off RTS/CTS if it was previously enabled
2797                          */
2798                         sta_priv->lq_sta.lq.general_params.flags &=
2799                                 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2800                         iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
2801                                         &sta_priv->lq_sta.lq, CMD_ASYNC, false);
2802                 }
2803                 break;
2804         case IEEE80211_AMPDU_TX_OPERATIONAL:
2805                 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2806
2807                 iwlagn_txq_agg_queue_setup(priv, sta, tid, buf_size);
2808
2809                 /*
2810                  * If the limit is 0, then it wasn't initialised yet,
2811                  * use the default. We can do that since we take the
2812                  * minimum below, and we don't want to go above our
2813                  * default due to hardware restrictions.
2814                  */
2815                 if (sta_priv->max_agg_bufsize == 0)
2816                         sta_priv->max_agg_bufsize =
2817                                 LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2818
2819                 /*
2820                  * Even though in theory the peer could have different
2821                  * aggregation reorder buffer sizes for different sessions,
2822                  * our ucode doesn't allow for that and has a global limit
2823                  * for each station. Therefore, use the minimum of all the
2824                  * aggregation sessions and our default value.
2825                  */
2826                 sta_priv->max_agg_bufsize =
2827                         min(sta_priv->max_agg_bufsize, buf_size);
2828
2829                 if (priv->cfg->ht_params &&
2830                     priv->cfg->ht_params->use_rts_for_aggregation) {
2831                         /*
2832                          * switch to RTS/CTS if it is the prefer protection
2833                          * method for HT traffic
2834                          */
2835
2836                         sta_priv->lq_sta.lq.general_params.flags |=
2837                                 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
2838                 }
2839
2840                 sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
2841                         sta_priv->max_agg_bufsize;
2842
2843                 iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
2844                                 &sta_priv->lq_sta.lq, CMD_ASYNC, false);
2845
2846                 IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
2847                          sta->addr, tid);
2848                 ret = 0;
2849                 break;
2850         }
2851         mutex_unlock(&priv->mutex);
2852
2853         return ret;
2854 }
2855
2856 static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
2857                               struct ieee80211_vif *vif,
2858                               struct ieee80211_sta *sta)
2859 {
2860         struct iwl_priv *priv = hw->priv;
2861         struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2862         struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2863         bool is_ap = vif->type == NL80211_IFTYPE_STATION;
2864         int ret;
2865         u8 sta_id;
2866
2867         IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
2868                         sta->addr);
2869         mutex_lock(&priv->mutex);
2870         IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
2871                         sta->addr);
2872         sta_priv->common.sta_id = IWL_INVALID_STATION;
2873
2874         atomic_set(&sta_priv->pending_frames, 0);
2875         if (vif->type == NL80211_IFTYPE_AP)
2876                 sta_priv->client = true;
2877
2878         ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
2879                                      is_ap, sta, &sta_id);
2880         if (ret) {
2881                 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
2882                         sta->addr, ret);
2883                 /* Should we return success if return code is EEXIST ? */
2884                 mutex_unlock(&priv->mutex);
2885                 return ret;
2886         }
2887
2888         sta_priv->common.sta_id = sta_id;
2889
2890         /* Initialize rate scaling */
2891         IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
2892                        sta->addr);
2893         iwl_rs_rate_init(priv, sta, sta_id);
2894         mutex_unlock(&priv->mutex);
2895
2896         return 0;
2897 }
2898
2899 static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
2900                                 struct ieee80211_channel_switch *ch_switch)
2901 {
2902         struct iwl_priv *priv = hw->priv;
2903         const struct iwl_channel_info *ch_info;
2904         struct ieee80211_conf *conf = &hw->conf;
2905         struct ieee80211_channel *channel = ch_switch->channel;
2906         struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2907         /*
2908          * MULTI-FIXME
2909          * When we add support for multiple interfaces, we need to
2910          * revisit this. The channel switch command in the device
2911          * only affects the BSS context, but what does that really
2912          * mean? And what if we get a CSA on the second interface?
2913          * This needs a lot of work.
2914          */
2915         struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2916         u16 ch;
2917
2918         IWL_DEBUG_MAC80211(priv, "enter\n");
2919
2920         mutex_lock(&priv->mutex);
2921
2922         if (iwl_is_rfkill(priv))
2923                 goto out;
2924
2925         if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2926             test_bit(STATUS_SCANNING, &priv->status) ||
2927             test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2928                 goto out;
2929
2930         if (!iwl_is_associated_ctx(ctx))
2931                 goto out;
2932
2933         if (!priv->cfg->ops->lib->set_channel_switch)
2934                 goto out;
2935
2936         ch = channel->hw_value;
2937         if (le16_to_cpu(ctx->active.channel) == ch)
2938                 goto out;
2939
2940         ch_info = iwl_get_channel_info(priv, channel->band, ch);
2941         if (!is_channel_valid(ch_info)) {
2942                 IWL_DEBUG_MAC80211(priv, "invalid channel\n");
2943                 goto out;
2944         }
2945
2946         spin_lock_irq(&priv->lock);
2947
2948         priv->current_ht_config.smps = conf->smps_mode;
2949
2950         /* Configure HT40 channels */
2951         ctx->ht.enabled = conf_is_ht(conf);
2952         if (ctx->ht.enabled) {
2953                 if (conf_is_ht40_minus(conf)) {
2954                         ctx->ht.extension_chan_offset =
2955                                 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2956                         ctx->ht.is_40mhz = true;
2957                 } else if (conf_is_ht40_plus(conf)) {
2958                         ctx->ht.extension_chan_offset =
2959                                 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2960                         ctx->ht.is_40mhz = true;
2961                 } else {
2962                         ctx->ht.extension_chan_offset =
2963                                 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2964                         ctx->ht.is_40mhz = false;
2965                 }
2966         } else
2967                 ctx->ht.is_40mhz = false;
2968
2969         if ((le16_to_cpu(ctx->staging.channel) != ch))
2970                 ctx->staging.flags = 0;
2971
2972         iwl_set_rxon_channel(priv, channel, ctx);
2973         iwl_set_rxon_ht(priv, ht_conf);
2974         iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
2975
2976         spin_unlock_irq(&priv->lock);
2977
2978         iwl_set_rate(priv);
2979         /*
2980          * at this point, staging_rxon has the
2981          * configuration for channel switch
2982          */
2983         set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2984         priv->switch_channel = cpu_to_le16(ch);
2985         if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2986                 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2987                 priv->switch_channel = 0;
2988                 ieee80211_chswitch_done(ctx->vif, false);
2989         }
2990
2991 out:
2992         mutex_unlock(&priv->mutex);
2993         IWL_DEBUG_MAC80211(priv, "leave\n");
2994 }
2995
2996 static void iwlagn_configure_filter(struct ieee80211_hw *hw,
2997                                     unsigned int changed_flags,
2998                                     unsigned int *total_flags,
2999                                     u64 multicast)
3000 {
3001         struct iwl_priv *priv = hw->priv;
3002         __le32 filter_or = 0, filter_nand = 0;
3003         struct iwl_rxon_context *ctx;
3004
3005 #define CHK(test, flag) do { \
3006         if (*total_flags & (test))              \
3007                 filter_or |= (flag);            \
3008         else                                    \
3009                 filter_nand |= (flag);          \
3010         } while (0)
3011
3012         IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
3013                         changed_flags, *total_flags);
3014
3015         CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3016         /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
3017         CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3018         CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3019
3020 #undef CHK
3021
3022         mutex_lock(&priv->mutex);
3023
3024         for_each_context(priv, ctx) {
3025                 ctx->staging.filter_flags &= ~filter_nand;
3026                 ctx->staging.filter_flags |= filter_or;
3027
3028                 /*
3029                  * Not committing directly because hardware can perform a scan,
3030                  * but we'll eventually commit the filter flags change anyway.
3031                  */
3032         }
3033
3034         mutex_unlock(&priv->mutex);
3035
3036         /*
3037          * Receiving all multicast frames is always enabled by the
3038          * default flags setup in iwl_connection_init_rx_config()
3039          * since we currently do not support programming multicast
3040          * filters into the device.
3041          */
3042         *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3043                         FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3044 }
3045
3046 static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
3047 {
3048         struct iwl_priv *priv = hw->priv;
3049
3050         mutex_lock(&priv->mutex);
3051         IWL_DEBUG_MAC80211(priv, "enter\n");
3052
3053         if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
3054                 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
3055                 goto done;
3056         }
3057         if (iwl_is_rfkill(priv)) {
3058                 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
3059                 goto done;
3060         }
3061
3062         /*
3063          * mac80211 will not push any more frames for transmit
3064          * until the flush is completed
3065          */
3066         if (drop) {
3067                 IWL_DEBUG_MAC80211(priv, "send flush command\n");
3068                 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
3069                         IWL_ERR(priv, "flush request fail\n");
3070                         goto done;
3071                 }
3072         }
3073         IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
3074         iwlagn_wait_tx_queue_empty(priv);
3075 done:
3076         mutex_unlock(&priv->mutex);
3077         IWL_DEBUG_MAC80211(priv, "leave\n");
3078 }
3079
3080 static void iwlagn_disable_roc(struct iwl_priv *priv)
3081 {
3082         struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3083         struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
3084
3085         lockdep_assert_held(&priv->mutex);
3086
3087         if (!ctx->is_active)
3088                 return;
3089
3090         ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
3091         ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3092         iwl_set_rxon_channel(priv, chan, ctx);
3093         iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
3094
3095         priv->_agn.hw_roc_channel = NULL;
3096
3097         iwlagn_commit_rxon(priv, ctx);
3098
3099         ctx->is_active = false;
3100 }
3101
3102 static void iwlagn_bg_roc_done(struct work_struct *work)
3103 {
3104         struct iwl_priv *priv = container_of(work, struct iwl_priv,
3105                                              _agn.hw_roc_work.work);
3106
3107         mutex_lock(&priv->mutex);
3108         ieee80211_remain_on_channel_expired(priv->hw);
3109         iwlagn_disable_roc(priv);
3110         mutex_unlock(&priv->mutex);
3111 }
3112
3113 static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3114                                      struct ieee80211_channel *channel,
3115                                      enum nl80211_channel_type channel_type,
3116                                      int duration)
3117 {
3118         struct iwl_priv *priv = hw->priv;
3119         int err = 0;
3120
3121         if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3122                 return -EOPNOTSUPP;
3123
3124         if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
3125                                         BIT(NL80211_IFTYPE_P2P_CLIENT)))
3126                 return -EOPNOTSUPP;
3127
3128         mutex_lock(&priv->mutex);
3129
3130         if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
3131             test_bit(STATUS_SCAN_HW, &priv->status)) {
3132                 err = -EBUSY;
3133                 goto out;
3134         }
3135
3136         priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
3137         priv->_agn.hw_roc_channel = channel;
3138         priv->_agn.hw_roc_chantype = channel_type;
3139         priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
3140         iwlagn_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
3141         queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
3142                            msecs_to_jiffies(duration + 20));
3143
3144         msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
3145         ieee80211_ready_on_channel(priv->hw);
3146
3147  out:
3148         mutex_unlock(&priv->mutex);
3149
3150         return err;
3151 }
3152
3153 static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3154 {
3155         struct iwl_priv *priv = hw->priv;
3156
3157         if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3158                 return -EOPNOTSUPP;
3159
3160         cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
3161
3162         mutex_lock(&priv->mutex);
3163         iwlagn_disable_roc(priv);
3164         mutex_unlock(&priv->mutex);
3165
3166         return 0;
3167 }
3168
3169 /*****************************************************************************
3170  *
3171  * driver setup and teardown
3172  *
3173  *****************************************************************************/
3174
3175 static void iwl_setup_deferred_work(struct iwl_priv *priv)
3176 {
3177         priv->workqueue = create_singlethread_workqueue(DRV_NAME);
3178
3179         init_waitqueue_head(&priv->wait_command_queue);
3180
3181         INIT_WORK(&priv->restart, iwl_bg_restart);
3182         INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
3183         INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
3184         INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
3185         INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
3186         INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
3187         INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
3188         INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
3189
3190         iwl_setup_scan_deferred_work(priv);
3191
3192         if (priv->cfg->ops->lib->setup_deferred_work)
3193                 priv->cfg->ops->lib->setup_deferred_work(priv);
3194
3195         init_timer(&priv->statistics_periodic);
3196         priv->statistics_periodic.data = (unsigned long)priv;
3197         priv->statistics_periodic.function = iwl_bg_statistics_periodic;
3198
3199         init_timer(&priv->ucode_trace);
3200         priv->ucode_trace.data = (unsigned long)priv;
3201         priv->ucode_trace.function = iwl_bg_ucode_trace;
3202
3203         init_timer(&priv->watchdog);
3204         priv->watchdog.data = (unsigned long)priv;
3205         priv->watchdog.function = iwl_bg_watchdog;
3206
3207         tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3208                 iwl_irq_tasklet, (unsigned long)priv);
3209 }
3210
3211 static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3212 {
3213         if (priv->cfg->ops->lib->cancel_deferred_work)
3214                 priv->cfg->ops->lib->cancel_deferred_work(priv);
3215
3216         cancel_work_sync(&priv->run_time_calib_work);
3217         cancel_work_sync(&priv->beacon_update);
3218
3219         iwl_cancel_scan_deferred_work(priv);
3220
3221         cancel_work_sync(&priv->bt_full_concurrency);
3222         cancel_work_sync(&priv->bt_runtime_config);
3223
3224         del_timer_sync(&priv->statistics_periodic);
3225         del_timer_sync(&priv->ucode_trace);
3226 }
3227
3228 static void iwl_init_hw_rates(struct iwl_priv *priv,
3229                               struct ieee80211_rate *rates)
3230 {
3231         int i;
3232
3233         for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3234                 rates[i].bitrate = iwl_rates[i].ieee * 5;
3235                 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3236                 rates[i].hw_value_short = i;
3237                 rates[i].flags = 0;
3238                 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3239                         /*
3240                          * If CCK != 1M then set short preamble rate flag.
3241                          */
3242                         rates[i].flags |=
3243                                 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3244                                         0 : IEEE80211_RATE_SHORT_PREAMBLE;
3245                 }
3246         }
3247 }
3248
3249 static int iwl_init_drv(struct iwl_priv *priv)
3250 {
3251         int ret;
3252
3253         spin_lock_init(&priv->sta_lock);
3254         spin_lock_init(&priv->hcmd_lock);
3255
3256         mutex_init(&priv->mutex);
3257
3258         priv->ieee_channels = NULL;
3259         priv->ieee_rates = NULL;
3260         priv->band = IEEE80211_BAND_2GHZ;
3261
3262         priv->iw_mode = NL80211_IFTYPE_STATION;
3263         priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3264         priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3265         priv->_agn.agg_tids_count = 0;
3266
3267         /* initialize force reset */
3268         priv->force_reset[IWL_RF_RESET].reset_duration =
3269                 IWL_DELAY_NEXT_FORCE_RF_RESET;
3270         priv->force_reset[IWL_FW_RESET].reset_duration =
3271                 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3272
3273         priv->rx_statistics_jiffies = jiffies;
3274
3275         /* Choose which receivers/antennas to use */
3276         if (priv->cfg->ops->hcmd->set_rxon_chain)
3277                 priv->cfg->ops->hcmd->set_rxon_chain(priv,
3278                                         &priv->contexts[IWL_RXON_CTX_BSS]);
3279
3280         iwl_init_scan_params(priv);
3281
3282         /* init bt coex */
3283         if (priv->cfg->bt_params &&
3284             priv->cfg->bt_params->advanced_bt_coexist) {
3285                 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
3286                 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
3287                 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
3288                 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
3289                 priv->bt_duration = BT_DURATION_LIMIT_DEF;
3290                 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
3291         }
3292
3293         ret = iwl_init_channel_map(priv);
3294         if (ret) {
3295                 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3296                 goto err;
3297         }
3298
3299         ret = iwlcore_init_geos(priv);
3300         if (ret) {
3301                 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3302                 goto err_free_channel_map;
3303         }
3304         iwl_init_hw_rates(priv, priv->ieee_rates);
3305
3306         return 0;
3307
3308 err_free_channel_map:
3309         iwl_free_channel_map(priv);
3310 err:
3311         return ret;
3312 }
3313
3314 static void iwl_uninit_drv(struct iwl_priv *priv)
3315 {
3316         iwl_calib_free_results(priv);
3317         iwlcore_free_geos(priv);
3318         iwl_free_channel_map(priv);
3319         kfree(priv->scan_cmd);
3320         kfree(priv->beacon_cmd);
3321 }
3322
3323 struct ieee80211_ops iwlagn_hw_ops = {
3324         .tx = iwlagn_mac_tx,
3325         .start = iwlagn_mac_start,
3326         .stop = iwlagn_mac_stop,
3327         .add_interface = iwl_mac_add_interface,
3328         .remove_interface = iwl_mac_remove_interface,
3329         .change_interface = iwl_mac_change_interface,
3330         .config = iwlagn_mac_config,
3331         .configure_filter = iwlagn_configure_filter,
3332         .set_key = iwlagn_mac_set_key,
3333         .update_tkip_key = iwlagn_mac_update_tkip_key,
3334         .conf_tx = iwl_mac_conf_tx,
3335         .bss_info_changed = iwlagn_bss_info_changed,
3336         .ampdu_action = iwlagn_mac_ampdu_action,
3337         .hw_scan = iwl_mac_hw_scan,
3338         .sta_notify = iwlagn_mac_sta_notify,
3339         .sta_add = iwlagn_mac_sta_add,
3340         .sta_remove = iwl_mac_sta_remove,
3341         .channel_switch = iwlagn_mac_channel_switch,
3342         .flush = iwlagn_mac_flush,
3343         .tx_last_beacon = iwl_mac_tx_last_beacon,
3344         .remain_on_channel = iwl_mac_remain_on_channel,
3345         .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
3346         .offchannel_tx = iwl_mac_offchannel_tx,
3347         .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait,
3348         CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
3349         CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
3350 };
3351
3352 static u32 iwl_hw_detect(struct iwl_priv *priv)
3353 {
3354         u8 rev_id;
3355
3356         pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
3357         IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
3358         return iwl_read32(priv, CSR_HW_REV);
3359 }
3360
3361 static int iwl_set_hw_params(struct iwl_priv *priv)
3362 {
3363         priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3364         priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3365         if (iwlagn_mod_params.amsdu_size_8K)
3366                 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
3367         else
3368                 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
3369
3370         priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3371
3372         if (iwlagn_mod_params.disable_11n)
3373                 priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
3374
3375         /* Device-specific setup */
3376         return priv->cfg->ops->lib->set_hw_params(priv);
3377 }
3378
3379 static const u8 iwlagn_bss_ac_to_fifo[] = {
3380         IWL_TX_FIFO_VO,
3381         IWL_TX_FIFO_VI,
3382         IWL_TX_FIFO_BE,
3383         IWL_TX_FIFO_BK,
3384 };
3385
3386 static const u8 iwlagn_bss_ac_to_queue[] = {
3387         0, 1, 2, 3,
3388 };
3389
3390 static const u8 iwlagn_pan_ac_to_fifo[] = {
3391         IWL_TX_FIFO_VO_IPAN,
3392         IWL_TX_FIFO_VI_IPAN,
3393         IWL_TX_FIFO_BE_IPAN,
3394         IWL_TX_FIFO_BK_IPAN,
3395 };
3396
3397 static const u8 iwlagn_pan_ac_to_queue[] = {
3398         7, 6, 5, 4,
3399 };
3400
3401 /* This function both allocates and initializes hw and priv. */
3402 static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
3403 {
3404         struct iwl_priv *priv;
3405         /* mac80211 allocates memory for this device instance, including
3406          *   space for this driver's private structure */
3407         struct ieee80211_hw *hw;
3408
3409         hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
3410         if (hw == NULL) {
3411                 pr_err("%s: Can not allocate network device\n",
3412                        cfg->name);
3413                 goto out;
3414         }
3415
3416         priv = hw->priv;
3417         priv->hw = hw;
3418
3419 out:
3420         return hw;
3421 }
3422
3423 static void iwl_init_context(struct iwl_priv *priv)
3424 {
3425         int i;
3426
3427         /*
3428          * The default context is always valid,
3429          * more may be discovered when firmware
3430          * is loaded.
3431          */
3432         priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
3433
3434         for (i = 0; i < NUM_IWL_RXON_CTX; i++)
3435                 priv->contexts[i].ctxid = i;
3436
3437         priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
3438         priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
3439         priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
3440         priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
3441         priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
3442         priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
3443         priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
3444         priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
3445         priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
3446         priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
3447         priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
3448                 BIT(NL80211_IFTYPE_ADHOC);
3449         priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
3450                 BIT(NL80211_IFTYPE_STATION);
3451         priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
3452         priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
3453         priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
3454         priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
3455
3456         priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
3457         priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
3458                 REPLY_WIPAN_RXON_TIMING;
3459         priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
3460                 REPLY_WIPAN_RXON_ASSOC;
3461         priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
3462         priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
3463         priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
3464         priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
3465         priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
3466         priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
3467         priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
3468         priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
3469         priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
3470                 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
3471 #ifdef CONFIG_IWL_P2P
3472         priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
3473                 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
3474 #endif
3475         priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
3476         priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
3477         priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
3478
3479         BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
3480 }
3481
3482 int iwl_probe(struct pci_dev *pdev, struct iwl_cfg *cfg)
3483 {
3484         int err = 0;
3485         struct iwl_priv *priv;
3486         struct ieee80211_hw *hw;
3487         u16 pci_cmd, num_mac;
3488         u32 hw_rev;
3489
3490         /************************
3491          * 1. Allocating HW data
3492          ************************/
3493
3494         hw = iwl_alloc_all(cfg);
3495         if (!hw) {
3496                 err = -ENOMEM;
3497                 goto out;       }
3498         priv = hw->priv;
3499         /* At this point both hw and priv are allocated. */
3500
3501         SET_IEEE80211_DEV(hw, &pdev->dev);
3502
3503         IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3504         priv->cfg = cfg;
3505         priv->pci_dev = pdev;
3506         priv->inta_mask = CSR_INI_SET_MASK;
3507
3508         /* is antenna coupling more than 35dB ? */
3509         priv->bt_ant_couple_ok =
3510                 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
3511                 true : false;
3512
3513         /* enable/disable bt channel inhibition */
3514         priv->bt_ch_announce = iwlagn_bt_ch_announce;
3515         IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
3516                        (priv->bt_ch_announce) ? "On" : "Off");
3517
3518         if (iwl_alloc_traffic_mem(priv))
3519                 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
3520
3521         /**************************
3522          * 2. Initializing PCI bus
3523          **************************/
3524         pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3525                                 PCIE_LINK_STATE_CLKPM);
3526
3527         if (pci_enable_device(pdev)) {
3528                 err = -ENODEV;
3529                 goto out_ieee80211_free_hw;
3530         }
3531
3532         pci_set_master(pdev);
3533
3534         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
3535         if (!err)
3536                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
3537         if (err) {
3538                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3539                 if (!err)
3540                         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3541                 /* both attempts failed: */
3542                 if (err) {
3543                         IWL_WARN(priv, "No suitable DMA available.\n");
3544                         goto out_pci_disable_device;
3545                 }
3546         }
3547
3548         err = pci_request_regions(pdev, DRV_NAME);
3549         if (err)
3550                 goto out_pci_disable_device;
3551
3552         pci_set_drvdata(pdev, priv);
3553
3554
3555         /***********************
3556          * 3. Read REV register
3557          ***********************/
3558         priv->hw_base = pci_iomap(pdev, 0, 0);
3559         if (!priv->hw_base) {
3560                 err = -ENODEV;
3561                 goto out_pci_release_regions;
3562         }
3563
3564         IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
3565                 (unsigned long long) pci_resource_len(pdev, 0));
3566         IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
3567
3568         /* these spin locks will be used in apm_ops.init and EEPROM access
3569          * we should init now
3570          */
3571         spin_lock_init(&priv->reg_lock);
3572         spin_lock_init(&priv->lock);
3573
3574         /*
3575          * stop and reset the on-board processor just in case it is in a
3576          * strange state ... like being left stranded by a primary kernel
3577          * and this is now the kdump kernel trying to start up
3578          */
3579         iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3580
3581         hw_rev = iwl_hw_detect(priv);
3582         IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3583                 priv->cfg->name, hw_rev);
3584
3585         /* We disable the RETRY_TIMEOUT register (0x41) to keep
3586          * PCI Tx retries from interfering with C3 CPU state */
3587         pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3588
3589         if (iwl_prepare_card_hw(priv)) {
3590                 IWL_WARN(priv, "Failed, HW not ready\n");
3591                 goto out_iounmap;
3592         }
3593
3594         /*****************
3595          * 4. Read EEPROM
3596          *****************/
3597         /* Read the EEPROM */
3598         err = iwl_eeprom_init(priv, hw_rev);
3599         if (err) {
3600                 IWL_ERR(priv, "Unable to init EEPROM\n");
3601                 goto out_iounmap;
3602         }
3603         err = iwl_eeprom_check_version(priv);
3604         if (err)
3605                 goto out_free_eeprom;
3606
3607         err = iwl_eeprom_check_sku(priv);
3608         if (err)
3609                 goto out_free_eeprom;
3610
3611         /* extract MAC Address */
3612         iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
3613         IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
3614         priv->hw->wiphy->addresses = priv->addresses;
3615         priv->hw->wiphy->n_addresses = 1;
3616         num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
3617         if (num_mac > 1) {
3618                 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
3619                        ETH_ALEN);
3620                 priv->addresses[1].addr[5]++;
3621                 priv->hw->wiphy->n_addresses++;
3622         }
3623
3624         /* initialize all valid contexts */
3625         iwl_init_context(priv);
3626
3627         /************************
3628          * 5. Setup HW constants
3629          ************************/
3630         if (iwl_set_hw_params(priv)) {
3631                 IWL_ERR(priv, "failed to set hw parameters\n");
3632                 goto out_free_eeprom;
3633         }
3634
3635         /*******************
3636          * 6. Setup priv
3637          *******************/
3638
3639         err = iwl_init_drv(priv);
3640         if (err)
3641                 goto out_free_eeprom;
3642         /* At this point both hw and priv are initialized. */
3643
3644         /********************
3645          * 7. Setup services
3646          ********************/
3647         pci_enable_msi(priv->pci_dev);
3648
3649         iwl_alloc_isr_ict(priv);
3650
3651         err = request_irq(priv->pci_dev->irq, iwl_isr_ict,
3652                           IRQF_SHARED, DRV_NAME, priv);
3653         if (err) {
3654                 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
3655                 goto out_disable_msi;
3656         }
3657
3658         iwl_setup_deferred_work(priv);
3659         iwl_setup_rx_handlers(priv);
3660         iwl_testmode_init(priv);
3661
3662         /*********************************************
3663          * 8. Enable interrupts and read RFKILL state
3664          *********************************************/
3665
3666         /* enable rfkill interrupt: hw bug w/a */
3667         pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
3668         if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
3669                 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
3670                 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
3671         }
3672
3673         iwl_enable_rfkill_int(priv);
3674
3675         /* If platform's RF_KILL switch is NOT set to KILL */
3676         if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3677                 clear_bit(STATUS_RF_KILL_HW, &priv->status);
3678         else
3679                 set_bit(STATUS_RF_KILL_HW, &priv->status);
3680
3681         wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3682                 test_bit(STATUS_RF_KILL_HW, &priv->status));
3683
3684         iwl_power_initialize(priv);
3685         iwl_tt_initialize(priv);
3686
3687         init_completion(&priv->_agn.firmware_loading_complete);
3688
3689         err = iwl_request_firmware(priv, true);
3690         if (err)
3691                 goto out_destroy_workqueue;
3692
3693         return 0;
3694
3695  out_destroy_workqueue:
3696         destroy_workqueue(priv->workqueue);
3697         priv->workqueue = NULL;
3698         free_irq(priv->pci_dev->irq, priv);
3699  out_disable_msi:
3700         iwl_free_isr_ict(priv);
3701         pci_disable_msi(priv->pci_dev);
3702         iwl_uninit_drv(priv);
3703  out_free_eeprom:
3704         iwl_eeprom_free(priv);
3705  out_iounmap:
3706         pci_iounmap(pdev, priv->hw_base);
3707  out_pci_release_regions:
3708         pci_set_drvdata(pdev, NULL);
3709         pci_release_regions(pdev);
3710  out_pci_disable_device:
3711         pci_disable_device(pdev);
3712  out_ieee80211_free_hw:
3713         iwl_free_traffic_mem(priv);
3714         ieee80211_free_hw(priv->hw);
3715  out:
3716         return err;
3717 }
3718
3719 void __devexit iwl_remove(struct pci_dev *pdev)
3720 {
3721         struct iwl_priv *priv = pci_get_drvdata(pdev);
3722         unsigned long flags;
3723
3724         if (!priv)
3725                 return;
3726
3727         wait_for_completion(&priv->_agn.firmware_loading_complete);
3728
3729         IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3730
3731         iwl_dbgfs_unregister(priv);
3732         sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
3733
3734         /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3735          * to be called and iwl_down since we are removing the device
3736          * we need to set STATUS_EXIT_PENDING bit.
3737          */
3738         set_bit(STATUS_EXIT_PENDING, &priv->status);
3739
3740         iwl_testmode_cleanup(priv);
3741         iwl_leds_exit(priv);
3742
3743         if (priv->mac80211_registered) {
3744                 ieee80211_unregister_hw(priv->hw);
3745                 priv->mac80211_registered = 0;
3746         }
3747
3748         /* Reset to low power before unloading driver. */
3749         iwl_apm_stop(priv);
3750
3751         iwl_tt_exit(priv);
3752
3753         /* make sure we flush any pending irq or
3754          * tasklet for the driver
3755          */
3756         spin_lock_irqsave(&priv->lock, flags);
3757         iwl_disable_interrupts(priv);
3758         spin_unlock_irqrestore(&priv->lock, flags);
3759
3760         iwl_synchronize_irq(priv);
3761
3762         iwl_dealloc_ucode_pci(priv);
3763
3764         if (priv->rxq.bd)
3765                 iwlagn_rx_queue_free(priv, &priv->rxq);
3766         iwlagn_hw_txq_ctx_free(priv);
3767
3768         iwl_eeprom_free(priv);
3769
3770
3771         /*netif_stop_queue(dev); */
3772         flush_workqueue(priv->workqueue);
3773
3774         /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3775          * priv->workqueue... so we can't take down the workqueue
3776          * until now... */
3777         destroy_workqueue(priv->workqueue);
3778         priv->workqueue = NULL;
3779         iwl_free_traffic_mem(priv);
3780
3781         free_irq(priv->pci_dev->irq, priv);
3782         pci_disable_msi(priv->pci_dev);
3783         pci_iounmap(pdev, priv->hw_base);
3784         pci_release_regions(pdev);
3785         pci_disable_device(pdev);
3786         pci_set_drvdata(pdev, NULL);
3787
3788         iwl_uninit_drv(priv);
3789
3790         iwl_free_isr_ict(priv);
3791
3792         dev_kfree_skb(priv->beacon_skb);
3793
3794         ieee80211_free_hw(priv->hw);
3795 }
3796
3797
3798 /*****************************************************************************
3799  *
3800  * driver and module entry point
3801  *
3802  *****************************************************************************/
3803 static int __init iwl_init(void)
3804 {
3805
3806         int ret;
3807         pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
3808         pr_info(DRV_COPYRIGHT "\n");
3809
3810         ret = iwlagn_rate_control_register();
3811         if (ret) {
3812                 pr_err("Unable to register rate control algorithm: %d\n", ret);
3813                 return ret;
3814         }
3815
3816         ret = iwl_pci_register_driver();
3817
3818         if (ret)
3819                 goto error_register;
3820         return ret;
3821
3822 error_register:
3823         iwlagn_rate_control_unregister();
3824         return ret;
3825 }
3826
3827 static void __exit iwl_exit(void)
3828 {
3829         iwl_pci_unregister_driver();
3830         iwlagn_rate_control_unregister();
3831 }
3832
3833 module_exit(iwl_exit);
3834 module_init(iwl_init);
3835
3836 #ifdef CONFIG_IWLWIFI_DEBUG
3837 module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3838 MODULE_PARM_DESC(debug, "debug output mask");
3839 #endif
3840
3841 module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
3842 MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3843 module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
3844 MODULE_PARM_DESC(queues_num, "number of hw queues.");
3845 module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
3846 MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3847 module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
3848                    int, S_IRUGO);
3849 MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3850 module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
3851 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3852
3853 module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
3854                    S_IRUGO);
3855 MODULE_PARM_DESC(ucode_alternative,
3856                  "specify ucode alternative to use from ucode file");
3857
3858 module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
3859 MODULE_PARM_DESC(antenna_coupling,
3860                  "specify antenna coupling in dB (defualt: 0 dB)");
3861
3862 module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
3863 MODULE_PARM_DESC(bt_ch_inhibition,
3864                  "Disable BT channel inhibition (default: enable)");
3865
3866 module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
3867 MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
3868
3869 module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
3870 MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
3871
3872 /*
3873  * set bt_coex_active to true, uCode will do kill/defer
3874  * every time the priority line is asserted (BT is sending signals on the
3875  * priority line in the PCIx).
3876  * set bt_coex_active to false, uCode will ignore the BT activity and
3877  * perform the normal operation
3878  *
3879  * User might experience transmit issue on some platform due to WiFi/BT
3880  * co-exist problem. The possible behaviors are:
3881  *   Able to scan and finding all the available AP
3882  *   Not able to associate with any AP
3883  * On those platforms, WiFi communication can be restored by set
3884  * "bt_coex_active" module parameter to "false"
3885  *
3886  * default: bt_coex_active = true (BT_COEX_ENABLE)
3887  */
3888 module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
3889                 bool, S_IRUGO);
3890 MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
3891
3892 module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
3893 MODULE_PARM_DESC(led_mode, "0=system default, "
3894                 "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
3895
3896 /*
3897  * For now, keep using power level 1 instead of automatically
3898  * adjusting ...
3899  */
3900 module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
3901                 bool, S_IRUGO);
3902 MODULE_PARM_DESC(no_sleep_autoadjust,
3903                  "don't automatically adjust sleep level "
3904                  "according to maximum network latency (default: true)");