1 /******************************************************************************
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
31 * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets
32 * by defining IWL to either 3945 or 4965. The Makefile used when building
33 * the base targets will create base-3945.o and base-4965.o
35 * The eventual goal is to move as many of the #if IWL / #endif blocks out of
36 * this file and into the hardware specific implementation files (iwl-XXXX.c)
37 * and leave only the common (non #ifdef sprinkled) code in this file
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/version.h>
43 #include <linux/init.h>
44 #include <linux/pci.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/delay.h>
47 #include <linux/skbuff.h>
48 #include <linux/netdevice.h>
49 #include <linux/wireless.h>
50 #include <linux/firmware.h>
51 #include <linux/skbuff.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/if_arp.h>
56 #include <net/ieee80211_radiotap.h>
57 #include <net/mac80211.h>
59 #include <asm/div64.h>
65 #include "iwl-helpers.h"
67 #ifdef CONFIG_IWLWIFI_DEBUG
71 /******************************************************************************
75 ******************************************************************************/
77 /* module parameters */
78 int iwl_param_disable_hw_scan;
80 int iwl_param_disable; /* def: enable radio */
81 int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */
82 int iwl_param_hwcrypto; /* def: using software encryption */
83 int iwl_param_qos_enable = 1;
84 int iwl_param_queues_num = IWL_MAX_NUM_QUEUES;
87 * module name, copyright, version, etc.
88 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
91 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
93 #ifdef CONFIG_IWLWIFI_DEBUG
99 #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
105 #define IWLWIFI_VERSION "1.1.17k" VD VS
106 #define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
107 #define DRV_VERSION IWLWIFI_VERSION
109 /* Change firmware file name, using "-" and incrementing number,
110 * *only* when uCode interface or architecture changes so that it
111 * is not compatible with earlier drivers.
112 * This number will also appear in << 8 position of 1st dword of uCode file */
113 #define IWL4965_UCODE_API "-1"
115 MODULE_DESCRIPTION(DRV_DESCRIPTION);
116 MODULE_VERSION(DRV_VERSION);
117 MODULE_AUTHOR(DRV_COPYRIGHT);
118 MODULE_LICENSE("GPL");
120 __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
122 u16 fc = le16_to_cpu(hdr->frame_control);
123 int hdr_len = ieee80211_get_hdrlen(fc);
125 if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA))
126 return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN);
130 static const struct ieee80211_hw_mode *iwl_get_hw_mode(
131 struct iwl_priv *priv, int mode)
135 for (i = 0; i < 3; i++)
136 if (priv->modes[i].mode == mode)
137 return &priv->modes[i];
142 static int iwl_is_empty_essid(const char *essid, int essid_len)
144 /* Single white space is for Linksys APs */
145 if (essid_len == 1 && essid[0] == ' ')
148 /* Otherwise, if the entire essid is 0, we assume it is hidden */
151 if (essid[essid_len] != '\0')
158 static const char *iwl_escape_essid(const char *essid, u8 essid_len)
160 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
161 const char *s = essid;
164 if (iwl_is_empty_essid(essid, essid_len)) {
165 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
169 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
170 while (essid_len--) {
182 static void iwl_print_hex_dump(int level, void *p, u32 len)
184 #ifdef CONFIG_IWLWIFI_DEBUG
185 if (!(iwl_debug_level & level))
188 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
193 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
196 * Theory of operation
198 * A queue is a circular buffers with 'Read' and 'Write' pointers.
199 * 2 empty entries always kept in the buffer to protect from overflow.
201 * For Tx queue, there are low mark and high mark limits. If, after queuing
202 * the packet for Tx, free space become < low mark, Tx queue stopped. When
203 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
206 * The IWL operates with six queues, one receive queue in the device's
207 * sram, one transmit queue for sending commands to the device firmware,
208 * and four transmit queues for data.
209 ***************************************************/
211 static int iwl_queue_space(const struct iwl_queue *q)
213 int s = q->last_used - q->first_empty;
215 if (q->last_used > q->first_empty)
220 /* keep some reserve to not confuse empty and full situations */
227 /* XXX: n_bd must be power-of-two size */
228 static inline int iwl_queue_inc_wrap(int index, int n_bd)
230 return ++index & (n_bd - 1);
233 /* XXX: n_bd must be power-of-two size */
234 static inline int iwl_queue_dec_wrap(int index, int n_bd)
236 return --index & (n_bd - 1);
239 static inline int x2_queue_used(const struct iwl_queue *q, int i)
241 return q->first_empty > q->last_used ?
242 (i >= q->last_used && i < q->first_empty) :
243 !(i < q->last_used && i >= q->first_empty);
246 static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
251 return index & (q->n_window - 1);
254 static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
255 int count, int slots_num, u32 id)
258 q->n_window = slots_num;
261 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
262 * and iwl_queue_dec_wrap are broken. */
263 BUG_ON(!is_power_of_2(count));
265 /* slots_num must be power-of-two size, otherwise
266 * get_cmd_index is broken. */
267 BUG_ON(!is_power_of_2(slots_num));
269 q->low_mark = q->n_window / 4;
273 q->high_mark = q->n_window / 8;
274 if (q->high_mark < 2)
277 q->first_empty = q->last_used = 0;
282 static int iwl_tx_queue_alloc(struct iwl_priv *priv,
283 struct iwl_tx_queue *txq, u32 id)
285 struct pci_dev *dev = priv->pci_dev;
287 if (id != IWL_CMD_QUEUE_NUM) {
288 txq->txb = kmalloc(sizeof(txq->txb[0]) *
289 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
291 IWL_ERROR("kmalloc for auxilary BD "
292 "structures failed\n");
298 txq->bd = pci_alloc_consistent(dev,
299 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX,
303 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
304 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX);
320 int iwl_tx_queue_init(struct iwl_priv *priv,
321 struct iwl_tx_queue *txq, int slots_num, u32 txq_id)
323 struct pci_dev *dev = priv->pci_dev;
327 /* alocate command space + one big command for scan since scan
328 * command is very huge the system will not have two scan at the
330 len = sizeof(struct iwl_cmd) * slots_num;
331 if (txq_id == IWL_CMD_QUEUE_NUM)
332 len += IWL_MAX_SCAN_SIZE;
333 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
337 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
339 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
343 txq->need_update = 0;
345 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
346 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
347 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
348 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
350 iwl_hw_tx_queue_init(priv, txq);
356 * iwl_tx_queue_free - Deallocate DMA queue.
357 * @txq: Transmit queue to deallocate.
359 * Empty queue by removing and destroying all BD's.
360 * Free all buffers. txq itself is not freed.
363 void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
365 struct iwl_queue *q = &txq->q;
366 struct pci_dev *dev = priv->pci_dev;
372 /* first, empty all BD's */
373 for (; q->first_empty != q->last_used;
374 q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd))
375 iwl_hw_txq_free_tfd(priv, txq);
377 len = sizeof(struct iwl_cmd) * q->n_window;
378 if (q->id == IWL_CMD_QUEUE_NUM)
379 len += IWL_MAX_SCAN_SIZE;
381 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd);
383 /* free buffers belonging to queue itself */
385 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) *
386 txq->q.n_bd, txq->bd, txq->q.dma_addr);
393 /* 0 fill whole structure */
394 memset(txq, 0, sizeof(*txq));
397 const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
399 /*************** STATION TABLE MANAGEMENT ****
401 * NOTE: This needs to be overhauled to better synchronize between
402 * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c
404 * mac80211 should also be examined to determine if sta_info is duplicating
405 * the functionality provided here
408 /**************************************************************/
410 #if 0 /* temparary disable till we add real remove station */
411 static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
413 int index = IWL_INVALID_STATION;
417 spin_lock_irqsave(&priv->sta_lock, flags);
421 else if (is_broadcast_ether_addr(addr))
422 index = priv->hw_setting.bcast_sta_id;
424 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++)
425 if (priv->stations[i].used &&
426 !compare_ether_addr(priv->stations[i].sta.sta.addr,
432 if (unlikely(index == IWL_INVALID_STATION))
435 if (priv->stations[index].used) {
436 priv->stations[index].used = 0;
437 priv->num_stations--;
440 BUG_ON(priv->num_stations < 0);
443 spin_unlock_irqrestore(&priv->sta_lock, flags);
448 static void iwl_clear_stations_table(struct iwl_priv *priv)
452 spin_lock_irqsave(&priv->sta_lock, flags);
454 priv->num_stations = 0;
455 memset(priv->stations, 0, sizeof(priv->stations));
457 spin_unlock_irqrestore(&priv->sta_lock, flags);
460 u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags)
463 int index = IWL_INVALID_STATION;
464 struct iwl_station_entry *station;
465 unsigned long flags_spin;
466 DECLARE_MAC_BUF(mac);
468 spin_lock_irqsave(&priv->sta_lock, flags_spin);
471 else if (is_broadcast_ether_addr(addr))
472 index = priv->hw_setting.bcast_sta_id;
474 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) {
475 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
481 if (!priv->stations[i].used &&
482 index == IWL_INVALID_STATION)
487 /* These twh conditions has the same outcome but keep them separate
488 since they have different meaning */
489 if (unlikely(index == IWL_INVALID_STATION)) {
490 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
494 if (priv->stations[index].used &&
495 !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) {
496 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
501 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr));
502 station = &priv->stations[index];
504 priv->num_stations++;
506 memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
507 memcpy(station->sta.sta.addr, addr, ETH_ALEN);
508 station->sta.mode = 0;
509 station->sta.sta.sta_id = index;
510 station->sta.station_flags = 0;
512 #ifdef CONFIG_IWLWIFI_HT
513 /* BCAST station and IBSS stations do not work in HT mode */
514 if (index != priv->hw_setting.bcast_sta_id &&
515 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
516 iwl4965_set_ht_add_station(priv, index);
517 #endif /*CONFIG_IWLWIFI_HT*/
519 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
520 iwl_send_add_station(priv, &station->sta, flags);
525 /*************** DRIVER STATUS FUNCTIONS *****/
527 static inline int iwl_is_ready(struct iwl_priv *priv)
529 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
530 * set but EXIT_PENDING is not */
531 return test_bit(STATUS_READY, &priv->status) &&
532 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
533 !test_bit(STATUS_EXIT_PENDING, &priv->status);
536 static inline int iwl_is_alive(struct iwl_priv *priv)
538 return test_bit(STATUS_ALIVE, &priv->status);
541 static inline int iwl_is_init(struct iwl_priv *priv)
543 return test_bit(STATUS_INIT, &priv->status);
546 static inline int iwl_is_rfkill(struct iwl_priv *priv)
548 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
549 test_bit(STATUS_RF_KILL_SW, &priv->status);
552 static inline int iwl_is_ready_rf(struct iwl_priv *priv)
555 if (iwl_is_rfkill(priv))
558 return iwl_is_ready(priv);
561 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
563 #define IWL_CMD(x) case x : return #x
565 static const char *get_cmd_string(u8 cmd)
568 IWL_CMD(REPLY_ALIVE);
569 IWL_CMD(REPLY_ERROR);
571 IWL_CMD(REPLY_RXON_ASSOC);
572 IWL_CMD(REPLY_QOS_PARAM);
573 IWL_CMD(REPLY_RXON_TIMING);
574 IWL_CMD(REPLY_ADD_STA);
575 IWL_CMD(REPLY_REMOVE_STA);
576 IWL_CMD(REPLY_REMOVE_ALL_STA);
578 IWL_CMD(REPLY_RATE_SCALE);
579 IWL_CMD(REPLY_LEDS_CMD);
580 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
581 IWL_CMD(RADAR_NOTIFICATION);
582 IWL_CMD(REPLY_QUIET_CMD);
583 IWL_CMD(REPLY_CHANNEL_SWITCH);
584 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
585 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
586 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
587 IWL_CMD(POWER_TABLE_CMD);
588 IWL_CMD(PM_SLEEP_NOTIFICATION);
589 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
590 IWL_CMD(REPLY_SCAN_CMD);
591 IWL_CMD(REPLY_SCAN_ABORT_CMD);
592 IWL_CMD(SCAN_START_NOTIFICATION);
593 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
594 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
595 IWL_CMD(BEACON_NOTIFICATION);
596 IWL_CMD(REPLY_TX_BEACON);
597 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
598 IWL_CMD(QUIET_NOTIFICATION);
599 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
600 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
601 IWL_CMD(REPLY_BT_CONFIG);
602 IWL_CMD(REPLY_STATISTICS_CMD);
603 IWL_CMD(STATISTICS_NOTIFICATION);
604 IWL_CMD(REPLY_CARD_STATE_CMD);
605 IWL_CMD(CARD_STATE_NOTIFICATION);
606 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
607 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
608 IWL_CMD(SENSITIVITY_CMD);
609 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
610 IWL_CMD(REPLY_RX_PHY_CMD);
611 IWL_CMD(REPLY_RX_MPDU_CMD);
612 IWL_CMD(REPLY_4965_RX);
613 IWL_CMD(REPLY_COMPRESSED_BA);
620 #define HOST_COMPLETE_TIMEOUT (HZ / 2)
623 * iwl_enqueue_hcmd - enqueue a uCode command
624 * @priv: device private data point
625 * @cmd: a point to the ucode command structure
627 * The function returns < 0 values to indicate the operation is
628 * failed. On success, it turns the index (> 0) of command in the
631 static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
633 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
634 struct iwl_queue *q = &txq->q;
635 struct iwl_tfd_frame *tfd;
637 struct iwl_cmd *out_cmd;
639 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
640 dma_addr_t phys_addr;
644 /* If any of the command structures end up being larger than
645 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
646 * we will need to increase the size of the TFD entries */
647 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
648 !(cmd->meta.flags & CMD_SIZE_HUGE));
650 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
651 IWL_ERROR("No space for Tx\n");
655 spin_lock_irqsave(&priv->hcmd_lock, flags);
657 tfd = &txq->bd[q->first_empty];
658 memset(tfd, 0, sizeof(*tfd));
660 control_flags = (u32 *) tfd;
662 idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE);
663 out_cmd = &txq->cmd[idx];
665 out_cmd->hdr.cmd = cmd->id;
666 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
667 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
669 /* At this point, the out_cmd now has all of the incoming cmd
672 out_cmd->hdr.flags = 0;
673 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
674 INDEX_TO_SEQ(q->first_empty));
675 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
676 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
678 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
679 offsetof(struct iwl_cmd, hdr);
680 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
682 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
683 "%d bytes at %d[%d]:%d\n",
684 get_cmd_string(out_cmd->hdr.cmd),
685 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
686 fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM);
688 txq->need_update = 1;
689 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0);
690 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
691 iwl_tx_queue_update_write_ptr(priv, txq);
693 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
694 return ret ? ret : idx;
697 int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
701 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
703 /* An asynchronous command can not expect an SKB to be set. */
704 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
706 /* An asynchronous command MUST have a callback. */
707 BUG_ON(!cmd->meta.u.callback);
709 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
712 ret = iwl_enqueue_hcmd(priv, cmd);
714 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
715 get_cmd_string(cmd->id), ret);
721 int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
725 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
727 BUG_ON(cmd->meta.flags & CMD_ASYNC);
729 /* A synchronous command can not have a callback set. */
730 BUG_ON(cmd->meta.u.callback != NULL);
732 if (atomic_xchg(&entry, 1)) {
733 IWL_ERROR("Error sending %s: Already sending a host command\n",
734 get_cmd_string(cmd->id));
738 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
740 if (cmd->meta.flags & CMD_WANT_SKB)
741 cmd->meta.source = &cmd->meta;
743 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
746 IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n",
747 get_cmd_string(cmd->id), ret);
751 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
752 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
753 HOST_COMPLETE_TIMEOUT);
755 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
756 IWL_ERROR("Error sending %s: time out after %dms.\n",
757 get_cmd_string(cmd->id),
758 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
760 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
766 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
767 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
768 get_cmd_string(cmd->id));
772 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
773 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
774 get_cmd_string(cmd->id));
778 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
779 IWL_ERROR("Error: Response NULL in '%s'\n",
780 get_cmd_string(cmd->id));
789 if (cmd->meta.flags & CMD_WANT_SKB) {
790 struct iwl_cmd *qcmd;
792 /* Cancel the CMD_WANT_SKB flag for the cmd in the
793 * TX cmd queue. Otherwise in case the cmd comes
794 * in later, it will possibly set an invalid
795 * address (cmd->meta.source). */
796 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
797 qcmd->meta.flags &= ~CMD_WANT_SKB;
800 if (cmd->meta.u.skb) {
801 dev_kfree_skb_any(cmd->meta.u.skb);
802 cmd->meta.u.skb = NULL;
805 atomic_set(&entry, 0);
809 int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
811 /* A command can not be asynchronous AND expect an SKB to be set. */
812 BUG_ON((cmd->meta.flags & CMD_ASYNC) &&
813 (cmd->meta.flags & CMD_WANT_SKB));
815 if (cmd->meta.flags & CMD_ASYNC)
816 return iwl_send_cmd_async(priv, cmd);
818 return iwl_send_cmd_sync(priv, cmd);
821 int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
823 struct iwl_host_cmd cmd = {
829 return iwl_send_cmd_sync(priv, &cmd);
832 static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val)
834 struct iwl_host_cmd cmd = {
840 return iwl_send_cmd_sync(priv, &cmd);
843 int iwl_send_statistics_request(struct iwl_priv *priv)
845 return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
849 * iwl_rxon_add_station - add station into station table.
851 * there is only one AP station with id= IWL_AP_ID
852 * NOTE: mutex must be held before calling the this fnction
854 static int iwl_rxon_add_station(struct iwl_priv *priv,
855 const u8 *addr, int is_ap)
859 sta_id = iwl_add_station(priv, addr, is_ap, 0);
860 iwl4965_add_station(priv, addr, is_ap);
866 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
867 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
868 * @channel: Any channel valid for the requested phymode
870 * In addition to setting the staging RXON, priv->phymode is also set.
872 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
873 * in the staging RXON flag structure based on the phymode
875 static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel)
877 if (!iwl_get_channel_info(priv, phymode, channel)) {
878 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
883 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
884 (priv->phymode == phymode))
887 priv->staging_rxon.channel = cpu_to_le16(channel);
888 if (phymode == MODE_IEEE80211A)
889 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
891 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
893 priv->phymode = phymode;
895 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
901 * iwl_check_rxon_cmd - validate RXON structure is valid
903 * NOTE: This is really only useful during development and can eventually
904 * be #ifdef'd out once the driver is stable and folks aren't actively
907 static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
912 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
913 error |= le32_to_cpu(rxon->flags &
914 (RXON_FLG_TGJ_NARROW_BAND_MSK |
915 RXON_FLG_RADAR_DETECT_MSK));
917 IWL_WARNING("check 24G fields %d | %d\n",
920 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
921 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
923 IWL_WARNING("check 52 fields %d | %d\n",
925 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
927 IWL_WARNING("check 52 CCK %d | %d\n",
930 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
932 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
934 /* make sure basic rates 6Mbps and 1Mbps are supported */
935 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
936 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
938 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
940 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
942 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
944 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
945 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
947 IWL_WARNING("check CCK and short slot %d | %d\n",
950 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
951 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
953 IWL_WARNING("check CCK & auto detect %d | %d\n",
956 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
957 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
959 IWL_WARNING("check TGG and auto detect %d | %d\n",
963 IWL_WARNING("Tuning to channel %d\n",
964 le16_to_cpu(rxon->channel));
967 IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n");
974 * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit
975 * @priv: staging_rxon is comapred to active_rxon
977 * If the RXON structure is changing sufficient to require a new
978 * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1
979 * to indicate a new tune is required.
981 static int iwl_full_rxon_required(struct iwl_priv *priv)
984 /* These items are only settable from the full RXON command */
985 if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ||
986 compare_ether_addr(priv->staging_rxon.bssid_addr,
987 priv->active_rxon.bssid_addr) ||
988 compare_ether_addr(priv->staging_rxon.node_addr,
989 priv->active_rxon.node_addr) ||
990 compare_ether_addr(priv->staging_rxon.wlap_bssid_addr,
991 priv->active_rxon.wlap_bssid_addr) ||
992 (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) ||
993 (priv->staging_rxon.channel != priv->active_rxon.channel) ||
994 (priv->staging_rxon.air_propagation !=
995 priv->active_rxon.air_propagation) ||
996 (priv->staging_rxon.ofdm_ht_single_stream_basic_rates !=
997 priv->active_rxon.ofdm_ht_single_stream_basic_rates) ||
998 (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates !=
999 priv->active_rxon.ofdm_ht_dual_stream_basic_rates) ||
1000 (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) ||
1001 (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id))
1004 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
1005 * be updated with the RXON_ASSOC command -- however only some
1006 * flag transitions are allowed using RXON_ASSOC */
1008 /* Check if we are not switching bands */
1009 if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) !=
1010 (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK))
1013 /* Check if we are switching association toggle */
1014 if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) !=
1015 (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK))
1021 static int iwl_send_rxon_assoc(struct iwl_priv *priv)
1024 struct iwl_rx_packet *res = NULL;
1025 struct iwl_rxon_assoc_cmd rxon_assoc;
1026 struct iwl_host_cmd cmd = {
1027 .id = REPLY_RXON_ASSOC,
1028 .len = sizeof(rxon_assoc),
1029 .meta.flags = CMD_WANT_SKB,
1030 .data = &rxon_assoc,
1032 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1033 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1035 if ((rxon1->flags == rxon2->flags) &&
1036 (rxon1->filter_flags == rxon2->filter_flags) &&
1037 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1038 (rxon1->ofdm_ht_single_stream_basic_rates ==
1039 rxon2->ofdm_ht_single_stream_basic_rates) &&
1040 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1041 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1042 (rxon1->rx_chain == rxon2->rx_chain) &&
1043 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1044 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1048 rxon_assoc.flags = priv->staging_rxon.flags;
1049 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1050 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1051 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1052 rxon_assoc.reserved = 0;
1053 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1054 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1055 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1056 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1057 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1059 rc = iwl_send_cmd_sync(priv, &cmd);
1063 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1064 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1065 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1069 priv->alloc_rxb_skb--;
1070 dev_kfree_skb_any(cmd.meta.u.skb);
1076 * iwl_commit_rxon - commit staging_rxon to hardware
1078 * The RXON command in staging_rxon is commited to the hardware and
1079 * the active_rxon structure is updated with the new data. This
1080 * function correctly transitions out of the RXON_ASSOC_MSK state if
1081 * a HW tune is required based on the RXON structure changes.
1083 static int iwl_commit_rxon(struct iwl_priv *priv)
1085 /* cast away the const for active_rxon in this function */
1086 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1087 DECLARE_MAC_BUF(mac);
1090 if (!iwl_is_alive(priv))
1093 /* always get timestamp with Rx frame */
1094 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
1096 rc = iwl_check_rxon_cmd(&priv->staging_rxon);
1098 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
1102 /* If we don't need to send a full RXON, we can use
1103 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1104 * and other flags for the current radio configuration. */
1105 if (!iwl_full_rxon_required(priv)) {
1106 rc = iwl_send_rxon_assoc(priv);
1108 IWL_ERROR("Error setting RXON_ASSOC "
1109 "configuration (%d).\n", rc);
1113 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1118 /* station table will be cleared */
1119 priv->assoc_station_added = 0;
1121 #ifdef CONFIG_IWLWIFI_SENSITIVITY
1122 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1123 if (!priv->error_recovering)
1124 priv->start_calib = 0;
1126 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
1127 #endif /* CONFIG_IWLWIFI_SENSITIVITY */
1129 /* If we are currently associated and the new config requires
1130 * an RXON_ASSOC and the new config wants the associated mask enabled,
1131 * we must clear the associated from the active configuration
1132 * before we apply the new config */
1133 if (iwl_is_associated(priv) &&
1134 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1135 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1136 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1138 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1139 sizeof(struct iwl_rxon_cmd),
1140 &priv->active_rxon);
1142 /* If the mask clearing failed then we set
1143 * active_rxon back to what it was previously */
1145 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1146 IWL_ERROR("Error clearing ASSOC_MSK on current "
1147 "configuration (%d).\n", rc);
1152 IWL_DEBUG_INFO("Sending RXON\n"
1153 "* with%s RXON_FILTER_ASSOC_MSK\n"
1156 ((priv->staging_rxon.filter_flags &
1157 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1158 le16_to_cpu(priv->staging_rxon.channel),
1159 print_mac(mac, priv->staging_rxon.bssid_addr));
1161 /* Apply the new configuration */
1162 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1163 sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
1165 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1169 iwl_clear_stations_table(priv);
1171 #ifdef CONFIG_IWLWIFI_SENSITIVITY
1172 if (!priv->error_recovering)
1173 priv->start_calib = 0;
1175 priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT;
1176 iwl4965_init_sensitivity(priv, CMD_ASYNC, 1);
1177 #endif /* CONFIG_IWLWIFI_SENSITIVITY */
1179 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
1181 /* If we issue a new RXON command which required a tune then we must
1182 * send a new TXPOWER command or we won't be able to Tx any frames */
1183 rc = iwl_hw_reg_send_txpower(priv);
1185 IWL_ERROR("Error setting Tx power (%d).\n", rc);
1189 /* Add the broadcast address so we can send broadcast frames */
1190 if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) ==
1191 IWL_INVALID_STATION) {
1192 IWL_ERROR("Error adding BROADCAST address for transmit.\n");
1196 /* If we have set the ASSOC_MSK and we are in BSS mode then
1197 * add the IWL_AP_ID to the station rate table */
1198 if (iwl_is_associated(priv) &&
1199 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
1200 if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
1201 == IWL_INVALID_STATION) {
1202 IWL_ERROR("Error adding AP address for transmit.\n");
1205 priv->assoc_station_added = 1;
1211 static int iwl_send_bt_config(struct iwl_priv *priv)
1213 struct iwl_bt_cmd bt_cmd = {
1221 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1222 sizeof(struct iwl_bt_cmd), &bt_cmd);
1225 static int iwl_send_scan_abort(struct iwl_priv *priv)
1228 struct iwl_rx_packet *res;
1229 struct iwl_host_cmd cmd = {
1230 .id = REPLY_SCAN_ABORT_CMD,
1231 .meta.flags = CMD_WANT_SKB,
1234 /* If there isn't a scan actively going on in the hardware
1235 * then we are in between scan bands and not actually
1236 * actively scanning, so don't send the abort command */
1237 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
1238 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1242 rc = iwl_send_cmd_sync(priv, &cmd);
1244 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1248 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1249 if (res->u.status != CAN_ABORT_STATUS) {
1250 /* The scan abort will return 1 for success or
1251 * 2 for "failure". A failure condition can be
1252 * due to simply not being in an active scan which
1253 * can occur if we send the scan abort before we
1254 * the microcode has notified us that a scan is
1256 IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status);
1257 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1258 clear_bit(STATUS_SCAN_HW, &priv->status);
1261 dev_kfree_skb_any(cmd.meta.u.skb);
1266 static int iwl_card_state_sync_callback(struct iwl_priv *priv,
1267 struct iwl_cmd *cmd,
1268 struct sk_buff *skb)
1276 * Use: Sets the internal card state to enable, disable, or halt
1278 * When in the 'enable' state the card operates as normal.
1279 * When in the 'disable' state, the card enters into a low power mode.
1280 * When in the 'halt' state, the card is shut down and must be fully
1281 * restarted to come back on.
1283 static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1285 struct iwl_host_cmd cmd = {
1286 .id = REPLY_CARD_STATE_CMD,
1289 .meta.flags = meta_flag,
1292 if (meta_flag & CMD_ASYNC)
1293 cmd.meta.u.callback = iwl_card_state_sync_callback;
1295 return iwl_send_cmd(priv, &cmd);
1298 static int iwl_add_sta_sync_callback(struct iwl_priv *priv,
1299 struct iwl_cmd *cmd, struct sk_buff *skb)
1301 struct iwl_rx_packet *res = NULL;
1304 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
1308 res = (struct iwl_rx_packet *)skb->data;
1309 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1310 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1315 switch (res->u.add_sta.status) {
1316 case ADD_STA_SUCCESS_MSK:
1322 /* We didn't cache the SKB; let the caller free it */
1326 int iwl_send_add_station(struct iwl_priv *priv,
1327 struct iwl_addsta_cmd *sta, u8 flags)
1329 struct iwl_rx_packet *res = NULL;
1331 struct iwl_host_cmd cmd = {
1332 .id = REPLY_ADD_STA,
1333 .len = sizeof(struct iwl_addsta_cmd),
1334 .meta.flags = flags,
1338 if (flags & CMD_ASYNC)
1339 cmd.meta.u.callback = iwl_add_sta_sync_callback;
1341 cmd.meta.flags |= CMD_WANT_SKB;
1343 rc = iwl_send_cmd(priv, &cmd);
1345 if (rc || (flags & CMD_ASYNC))
1348 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
1349 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1350 IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n",
1356 switch (res->u.add_sta.status) {
1357 case ADD_STA_SUCCESS_MSK:
1358 IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n");
1362 IWL_WARNING("REPLY_ADD_STA failed\n");
1367 priv->alloc_rxb_skb--;
1368 dev_kfree_skb_any(cmd.meta.u.skb);
1373 static int iwl_update_sta_key_info(struct iwl_priv *priv,
1374 struct ieee80211_key_conf *keyconf,
1377 unsigned long flags;
1378 __le16 key_flags = 0;
1380 switch (keyconf->alg) {
1382 key_flags |= STA_KEY_FLG_CCMP;
1383 key_flags |= cpu_to_le16(
1384 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1385 key_flags &= ~STA_KEY_FLG_INVALID;
1393 spin_lock_irqsave(&priv->sta_lock, flags);
1394 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1395 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1396 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1399 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1401 priv->stations[sta_id].sta.key.key_flags = key_flags;
1402 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1403 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1405 spin_unlock_irqrestore(&priv->sta_lock, flags);
1407 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1408 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1412 static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
1414 unsigned long flags;
1416 spin_lock_irqsave(&priv->sta_lock, flags);
1417 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
1418 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo));
1419 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1420 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1421 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1422 spin_unlock_irqrestore(&priv->sta_lock, flags);
1424 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1425 iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1429 static void iwl_clear_free_frames(struct iwl_priv *priv)
1431 struct list_head *element;
1433 IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n",
1434 priv->frames_count);
1436 while (!list_empty(&priv->free_frames)) {
1437 element = priv->free_frames.next;
1439 kfree(list_entry(element, struct iwl_frame, list));
1440 priv->frames_count--;
1443 if (priv->frames_count) {
1444 IWL_WARNING("%d frames still in use. Did we lose one?\n",
1445 priv->frames_count);
1446 priv->frames_count = 0;
1450 static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
1452 struct iwl_frame *frame;
1453 struct list_head *element;
1454 if (list_empty(&priv->free_frames)) {
1455 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1457 IWL_ERROR("Could not allocate frame!\n");
1461 priv->frames_count++;
1465 element = priv->free_frames.next;
1467 return list_entry(element, struct iwl_frame, list);
1470 static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
1472 memset(frame, 0, sizeof(*frame));
1473 list_add(&frame->list, &priv->free_frames);
1476 unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
1477 struct ieee80211_hdr *hdr,
1478 const u8 *dest, int left)
1481 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1482 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1483 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1486 if (priv->ibss_beacon->len > left)
1489 memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);
1491 return priv->ibss_beacon->len;
1494 int iwl_rate_index_from_plcp(int plcp)
1498 if (plcp & RATE_MCS_HT_MSK) {
1501 if (i >= IWL_RATE_MIMO_6M_PLCP)
1502 i = i - IWL_RATE_MIMO_6M_PLCP;
1504 i += IWL_FIRST_OFDM_RATE;
1505 /* skip 9M not supported in ht*/
1506 if (i >= IWL_RATE_9M_INDEX)
1508 if ((i >= IWL_FIRST_OFDM_RATE) &&
1509 (i <= IWL_LAST_OFDM_RATE))
1512 for (i = 0; i < ARRAY_SIZE(iwl_rates); i++)
1513 if (iwl_rates[i].plcp == (plcp &0xFF))
1519 static u8 iwl_rate_get_lowest_plcp(int rate_mask)
1523 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1524 i = iwl_rates[i].next_ieee) {
1525 if (rate_mask & (1 << i))
1526 return iwl_rates[i].plcp;
1529 return IWL_RATE_INVALID;
1532 static int iwl_send_beacon_cmd(struct iwl_priv *priv)
1534 struct iwl_frame *frame;
1535 unsigned int frame_size;
1539 frame = iwl_get_free_frame(priv);
1542 IWL_ERROR("Could not obtain free frame buffer for beacon "
1547 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) {
1548 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic &
1550 if (rate == IWL_INVALID_RATE)
1551 rate = IWL_RATE_6M_PLCP;
1553 rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1554 if (rate == IWL_INVALID_RATE)
1555 rate = IWL_RATE_1M_PLCP;
1558 frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
1560 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1563 iwl_free_frame(priv, frame);
1568 /******************************************************************************
1570 * EEPROM related functions
1572 ******************************************************************************/
1574 static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac)
1576 memcpy(mac, priv->eeprom.mac_address, 6);
1580 * iwl_eeprom_init - read EEPROM contents
1582 * Load the EEPROM from adapter into priv->eeprom
1584 * NOTE: This routine uses the non-debug IO access functions.
1586 int iwl_eeprom_init(struct iwl_priv *priv)
1588 u16 *e = (u16 *)&priv->eeprom;
1589 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
1591 int sz = sizeof(priv->eeprom);
1596 /* The EEPROM structure has several padding buffers within it
1597 * and when adding new EEPROM maps is subject to programmer errors
1598 * which may be very difficult to identify without explicitly
1599 * checking the resulting size of the eeprom map. */
1600 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1602 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1603 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1607 rc = iwl_eeprom_aqcuire_semaphore(priv);
1609 IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n");
1613 /* eeprom is an array of 16bit values */
1614 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1615 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
1616 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1618 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1619 i += IWL_EEPROM_ACCESS_DELAY) {
1620 r = _iwl_read_restricted(priv, CSR_EEPROM_REG);
1621 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1623 udelay(IWL_EEPROM_ACCESS_DELAY);
1626 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1627 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1631 e[addr / 2] = le16_to_cpu(r >> 16);
1636 iwl_eeprom_release_semaphore(priv);
1640 /******************************************************************************
1642 * Misc. internal state and helper functions
1644 ******************************************************************************/
1645 #ifdef CONFIG_IWLWIFI_DEBUG
1648 * iwl_report_frame - dump frame to syslog during debug sessions
1650 * hack this function to show different aspects of received frames,
1651 * including selective frame dumps.
1652 * group100 parameter selects whether to show 1 out of 100 good frames.
1654 * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type
1655 * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats)
1656 * is 3945-specific and gives bad output for 4965. Need to split the
1657 * functionality, keep common stuff here.
1659 void iwl_report_frame(struct iwl_priv *priv,
1660 struct iwl_rx_packet *pkt,
1661 struct ieee80211_hdr *header, int group100)
1664 u32 print_summary = 0;
1665 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1682 struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1683 struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1684 struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt);
1685 u8 *data = IWL_RX_DATA(pkt);
1688 fc = le16_to_cpu(header->frame_control);
1689 seq_ctl = le16_to_cpu(header->seq_ctrl);
1692 channel = le16_to_cpu(rx_hdr->channel);
1693 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1694 rate_sym = rx_hdr->rate;
1695 length = le16_to_cpu(rx_hdr->len);
1697 /* end-of-frame status and timestamp */
1698 status = le32_to_cpu(rx_end->status);
1699 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1700 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1701 tsf = le64_to_cpu(rx_end->timestamp);
1703 /* signal statistics */
1704 rssi = rx_stats->rssi;
1705 agc = rx_stats->agc;
1706 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1707 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1709 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1711 /* if data frame is to us and all is good,
1712 * (optionally) print summary for only 1 out of every 100 */
1713 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1714 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1717 print_summary = 1; /* print each frame */
1718 else if (priv->framecnt_to_us < 100) {
1719 priv->framecnt_to_us++;
1722 priv->framecnt_to_us = 0;
1727 /* print summary for all other frames */
1731 if (print_summary) {
1736 title = "100Frames";
1737 else if (fc & IEEE80211_FCTL_RETRY)
1739 else if (ieee80211_is_assoc_response(fc))
1741 else if (ieee80211_is_reassoc_response(fc))
1743 else if (ieee80211_is_probe_response(fc)) {
1745 print_dump = 1; /* dump frame contents */
1746 } else if (ieee80211_is_beacon(fc)) {
1748 print_dump = 1; /* dump frame contents */
1749 } else if (ieee80211_is_atim(fc))
1751 else if (ieee80211_is_auth(fc))
1753 else if (ieee80211_is_deauth(fc))
1755 else if (ieee80211_is_disassoc(fc))
1760 rate = iwl_rate_index_from_plcp(rate_sym);
1764 rate = iwl_rates[rate].ieee / 2;
1766 /* print frame summary.
1767 * MAC addresses show just the last byte (for brevity),
1768 * but you can hack it to show more, if you'd like to. */
1770 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1771 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1772 title, fc, header->addr1[5],
1773 length, rssi, channel, rate);
1775 /* src/dst addresses assume managed mode */
1776 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1777 "src=0x%02x, rssi=%u, tim=%lu usec, "
1778 "phy=0x%02x, chnl=%d\n",
1779 title, fc, header->addr1[5],
1780 header->addr3[5], rssi,
1781 tsf_low - priv->scan_start_tsf,
1782 phy_flags, channel);
1786 iwl_print_hex_dump(IWL_DL_RX, data, length);
1790 static void iwl_unset_hw_setting(struct iwl_priv *priv)
1792 if (priv->hw_setting.shared_virt)
1793 pci_free_consistent(priv->pci_dev,
1794 sizeof(struct iwl_shared),
1795 priv->hw_setting.shared_virt,
1796 priv->hw_setting.shared_phys);
1800 * iwl_supported_rate_to_ie - fill in the supported rate in IE field
1802 * return : set the bit for each supported rate insert in ie
1804 static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1805 u16 basic_rate, int max_count)
1807 u16 ret_rates = 0, bit;
1813 for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) {
1814 if (bit & supported_rate) {
1816 rates[*ie] = iwl_rates[i].ieee |
1817 ((bit & basic_rate) ? 0x80 : 0x00);
1819 if (*ie >= max_count)
1827 #ifdef CONFIG_IWLWIFI_HT
1828 void static iwl_set_ht_capab(struct ieee80211_hw *hw,
1829 struct ieee80211_ht_capability *ht_cap,
1834 * iwl_fill_probe_req - fill in all required fields and IE for probe request
1836 static u16 iwl_fill_probe_req(struct iwl_priv *priv,
1837 struct ieee80211_mgmt *frame,
1838 int left, int is_direct)
1844 /* Make sure there is enough space for the probe request,
1845 * two mandatory IEs and the data */
1851 frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
1852 memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN);
1853 memcpy(frame->sa, priv->mac_addr, ETH_ALEN);
1854 memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN);
1855 frame->seq_ctrl = 0;
1857 /* fill in our indirect SSID IE */
1864 pos = &(frame->u.probe_req.variable[0]);
1865 *pos++ = WLAN_EID_SSID;
1868 /* fill in our direct SSID IE... */
1871 left -= 2 + priv->essid_len;
1874 /* ... fill it in... */
1875 *pos++ = WLAN_EID_SSID;
1876 *pos++ = priv->essid_len;
1877 memcpy(pos, priv->essid, priv->essid_len);
1878 pos += priv->essid_len;
1879 len += 2 + priv->essid_len;
1882 /* fill in supported rate */
1887 /* ... fill it in... */
1888 *pos++ = WLAN_EID_SUPP_RATES;
1890 ret_rates = priv->active_rate = priv->rates_mask;
1891 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1893 iwl_supported_rate_to_ie(pos, priv->active_rate,
1894 priv->active_rate_basic, left);
1897 ret_rates = ~ret_rates & priv->active_rate;
1902 /* fill in supported extended rate */
1907 /* ... fill it in... */
1908 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1910 iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left);
1914 #ifdef CONFIG_IWLWIFI_HT
1915 if (is_direct && priv->is_ht_enabled) {
1916 u8 use_wide_chan = 1;
1918 if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ)
1921 *pos++ = WLAN_EID_HT_CAPABILITY;
1922 *pos++ = sizeof(struct ieee80211_ht_capability);
1923 iwl_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos,
1925 len += 2 + sizeof(struct ieee80211_ht_capability);
1927 #endif /*CONFIG_IWLWIFI_HT */
1936 #ifdef CONFIG_IWLWIFI_QOS
1937 static int iwl_send_qos_params_command(struct iwl_priv *priv,
1938 struct iwl_qosparam_cmd *qos)
1941 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
1942 sizeof(struct iwl_qosparam_cmd), qos);
1945 static void iwl_reset_qos(struct iwl_priv *priv)
1951 unsigned long flags;
1954 spin_lock_irqsave(&priv->lock, flags);
1955 priv->qos_data.qos_active = 0;
1957 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
1958 if (priv->qos_data.qos_enable)
1959 priv->qos_data.qos_active = 1;
1960 if (!(priv->active_rate & 0xfff0)) {
1964 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1965 if (priv->qos_data.qos_enable)
1966 priv->qos_data.qos_active = 1;
1967 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
1972 if (priv->qos_data.qos_active)
1975 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
1976 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
1977 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
1978 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
1979 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
1981 if (priv->qos_data.qos_active) {
1983 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
1984 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
1985 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
1986 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
1987 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
1990 priv->qos_data.def_qos_parm.ac[i].cw_min =
1991 cpu_to_le16((cw_min + 1) / 2 - 1);
1992 priv->qos_data.def_qos_parm.ac[i].cw_max =
1993 cpu_to_le16(cw_max);
1994 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
1996 priv->qos_data.def_qos_parm.ac[i].edca_txop =
1999 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2001 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2004 priv->qos_data.def_qos_parm.ac[i].cw_min =
2005 cpu_to_le16((cw_min + 1) / 4 - 1);
2006 priv->qos_data.def_qos_parm.ac[i].cw_max =
2007 cpu_to_le16((cw_max + 1) / 2 - 1);
2008 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2009 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2011 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2014 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2017 for (i = 1; i < 4; i++) {
2018 priv->qos_data.def_qos_parm.ac[i].cw_min =
2019 cpu_to_le16(cw_min);
2020 priv->qos_data.def_qos_parm.ac[i].cw_max =
2021 cpu_to_le16(cw_max);
2022 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2023 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2024 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2027 IWL_DEBUG_QOS("set QoS to default \n");
2029 spin_unlock_irqrestore(&priv->lock, flags);
2032 static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
2034 unsigned long flags;
2039 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2042 if (!priv->qos_data.qos_enable)
2045 spin_lock_irqsave(&priv->lock, flags);
2046 priv->qos_data.def_qos_parm.qos_flags = 0;
2048 if (priv->qos_data.qos_cap.q_AP.queue_request &&
2049 !priv->qos_data.qos_cap.q_AP.txop_request)
2050 priv->qos_data.def_qos_parm.qos_flags |=
2051 QOS_PARAM_FLG_TXOP_TYPE_MSK;
2053 if (priv->qos_data.qos_active)
2054 priv->qos_data.def_qos_parm.qos_flags |=
2055 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
2057 spin_unlock_irqrestore(&priv->lock, flags);
2059 if (force || iwl_is_associated(priv)) {
2060 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n",
2061 priv->qos_data.qos_active);
2063 iwl_send_qos_params_command(priv,
2064 &(priv->qos_data.def_qos_parm));
2068 #endif /* CONFIG_IWLWIFI_QOS */
2070 * Power management (not Tx power!) functions
2072 #define MSEC_TO_USEC 1024
2074 #define NOSLP __constant_cpu_to_le16(0), 0, 0
2075 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
2076 #define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC)
2077 #define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \
2078 __constant_cpu_to_le32(X1), \
2079 __constant_cpu_to_le32(X2), \
2080 __constant_cpu_to_le32(X3), \
2081 __constant_cpu_to_le32(X4)}
2084 /* default power management (not Tx power) table values */
2086 static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = {
2087 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2088 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
2089 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0},
2090 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0},
2091 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1},
2092 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
2096 static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = {
2097 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
2098 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
2099 SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
2100 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300),
2101 SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
2102 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100),
2103 SLP_VEC(2, 6, 9, 9, 0xFF)}, 0},
2104 {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
2105 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25),
2106 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2109 int iwl_power_init_handle(struct iwl_priv *priv)
2112 struct iwl_power_mgr *pow_data;
2113 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC;
2116 IWL_DEBUG_POWER("Initialize power \n");
2118 pow_data = &(priv->power_data);
2120 memset(pow_data, 0, sizeof(*pow_data));
2122 pow_data->active_index = IWL_POWER_RANGE_0;
2123 pow_data->dtim_val = 0xffff;
2125 memcpy(&pow_data->pwr_range_0[0], &range_0[0], size);
2126 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
2128 rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm);
2132 struct iwl_powertable_cmd *cmd;
2134 IWL_DEBUG_POWER("adjust power command flags\n");
2136 for (i = 0; i < IWL_POWER_AC; i++) {
2137 cmd = &pow_data->pwr_range_0[i].cmd;
2140 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
2142 cmd->flags |= IWL_POWER_PCI_PM_MSK;
2148 static int iwl_update_power_cmd(struct iwl_priv *priv,
2149 struct iwl_powertable_cmd *cmd, u32 mode)
2154 struct iwl_power_vec_entry *range;
2156 struct iwl_power_mgr *pow_data;
2158 if (mode > IWL_POWER_INDEX_5) {
2159 IWL_DEBUG_POWER("Error invalid power mode \n");
2162 pow_data = &(priv->power_data);
2164 if (pow_data->active_index == IWL_POWER_RANGE_0)
2165 range = &pow_data->pwr_range_0[0];
2167 range = &pow_data->pwr_range_1[1];
2169 memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd));
2171 #ifdef IWL_MAC80211_DISABLE
2172 if (priv->assoc_network != NULL) {
2173 unsigned long flags;
2175 period = priv->assoc_network->tim.tim_period;
2177 #endif /*IWL_MAC80211_DISABLE */
2178 skip = range[mode].no_dtim;
2187 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
2189 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1];
2190 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period;
2191 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
2194 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) {
2195 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
2196 cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
2199 IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags);
2200 IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
2201 IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
2202 IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
2203 le32_to_cpu(cmd->sleep_interval[0]),
2204 le32_to_cpu(cmd->sleep_interval[1]),
2205 le32_to_cpu(cmd->sleep_interval[2]),
2206 le32_to_cpu(cmd->sleep_interval[3]),
2207 le32_to_cpu(cmd->sleep_interval[4]));
2212 static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode)
2214 u32 final_mode = mode;
2216 struct iwl_powertable_cmd cmd;
2218 /* If on battery, set to 3,
2219 * if plugged into AC power, set to CAM ("continuosly aware mode"),
2220 * else user level */
2222 case IWL_POWER_BATTERY:
2223 final_mode = IWL_POWER_INDEX_3;
2226 final_mode = IWL_POWER_MODE_CAM;
2233 cmd.keep_alive_beacons = 0;
2235 iwl_update_power_cmd(priv, &cmd, final_mode);
2237 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2239 if (final_mode == IWL_POWER_MODE_CAM)
2240 clear_bit(STATUS_POWER_PMI, &priv->status);
2242 set_bit(STATUS_POWER_PMI, &priv->status);
2247 int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2249 /* Filter incoming packets to determine if they are targeted toward
2250 * this network, discarding packets coming from ourselves */
2251 switch (priv->iw_mode) {
2252 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */
2253 /* packets from our adapter are dropped (echo) */
2254 if (!compare_ether_addr(header->addr2, priv->mac_addr))
2256 /* {broad,multi}cast packets to our IBSS go through */
2257 if (is_multicast_ether_addr(header->addr1))
2258 return !compare_ether_addr(header->addr3, priv->bssid);
2259 /* packets to our adapter go through */
2260 return !compare_ether_addr(header->addr1, priv->mac_addr);
2261 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */
2262 /* packets from our adapter are dropped (echo) */
2263 if (!compare_ether_addr(header->addr3, priv->mac_addr))
2265 /* {broad,multi}cast packets to our BSS go through */
2266 if (is_multicast_ether_addr(header->addr1))
2267 return !compare_ether_addr(header->addr2, priv->bssid);
2268 /* packets to our adapter go through */
2269 return !compare_ether_addr(header->addr1, priv->mac_addr);
2275 #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2277 const char *iwl_get_tx_fail_reason(u32 status)
2279 switch (status & TX_STATUS_MSK) {
2280 case TX_STATUS_SUCCESS:
2282 TX_STATUS_ENTRY(SHORT_LIMIT);
2283 TX_STATUS_ENTRY(LONG_LIMIT);
2284 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2285 TX_STATUS_ENTRY(MGMNT_ABORT);
2286 TX_STATUS_ENTRY(NEXT_FRAG);
2287 TX_STATUS_ENTRY(LIFE_EXPIRE);
2288 TX_STATUS_ENTRY(DEST_PS);
2289 TX_STATUS_ENTRY(ABORTED);
2290 TX_STATUS_ENTRY(BT_RETRY);
2291 TX_STATUS_ENTRY(STA_INVALID);
2292 TX_STATUS_ENTRY(FRAG_DROPPED);
2293 TX_STATUS_ENTRY(TID_DISABLE);
2294 TX_STATUS_ENTRY(FRAME_FLUSHED);
2295 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2296 TX_STATUS_ENTRY(TX_LOCKED);
2297 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2304 * iwl_scan_cancel - Cancel any currently executing HW scan
2306 * NOTE: priv->mutex is not required before calling this function
2308 static int iwl_scan_cancel(struct iwl_priv *priv)
2310 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2311 clear_bit(STATUS_SCANNING, &priv->status);
2315 if (test_bit(STATUS_SCANNING, &priv->status)) {
2316 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2317 IWL_DEBUG_SCAN("Queuing scan abort.\n");
2318 set_bit(STATUS_SCAN_ABORTING, &priv->status);
2319 queue_work(priv->workqueue, &priv->abort_scan);
2322 IWL_DEBUG_SCAN("Scan abort already in progress.\n");
2324 return test_bit(STATUS_SCANNING, &priv->status);
2331 * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
2332 * @ms: amount of time to wait (in milliseconds) for scan to abort
2334 * NOTE: priv->mutex must be held before calling this function
2336 static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2338 unsigned long now = jiffies;
2341 ret = iwl_scan_cancel(priv);
2343 mutex_unlock(&priv->mutex);
2344 while (!time_after(jiffies, now + msecs_to_jiffies(ms)) &&
2345 test_bit(STATUS_SCANNING, &priv->status))
2347 mutex_lock(&priv->mutex);
2349 return test_bit(STATUS_SCANNING, &priv->status);
2355 static void iwl_sequence_reset(struct iwl_priv *priv)
2357 /* Reset ieee stats */
2359 /* We don't reset the net_device_stats (ieee->stats) on
2362 priv->last_seq_num = -1;
2363 priv->last_frag_num = -1;
2364 priv->last_packet_time = 0;
2366 iwl_scan_cancel(priv);
2369 #define MAX_UCODE_BEACON_INTERVAL 4096
2370 #define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA)
2372 static __le16 iwl_adjust_beacon_interval(u16 beacon_val)
2375 u16 beacon_factor = 0;
2378 (beacon_val + MAX_UCODE_BEACON_INTERVAL)
2379 / MAX_UCODE_BEACON_INTERVAL;
2380 new_val = beacon_val / beacon_factor;
2382 return cpu_to_le16(new_val);
2385 static void iwl_setup_rxon_timing(struct iwl_priv *priv)
2387 u64 interval_tm_unit;
2389 unsigned long flags;
2390 struct ieee80211_conf *conf = NULL;
2393 conf = ieee80211_get_hw_conf(priv->hw);
2395 spin_lock_irqsave(&priv->lock, flags);
2396 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1);
2397 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0);
2399 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2401 tsf = priv->timestamp1;
2402 tsf = ((tsf << 32) | priv->timestamp0);
2404 beacon_int = priv->beacon_int;
2405 spin_unlock_irqrestore(&priv->lock, flags);
2407 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) {
2408 if (beacon_int == 0) {
2409 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2410 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
2412 priv->rxon_timing.beacon_interval =
2413 cpu_to_le16(beacon_int);
2414 priv->rxon_timing.beacon_interval =
2415 iwl_adjust_beacon_interval(
2416 le16_to_cpu(priv->rxon_timing.beacon_interval));
2419 priv->rxon_timing.atim_window = 0;
2421 priv->rxon_timing.beacon_interval =
2422 iwl_adjust_beacon_interval(conf->beacon_int);
2423 /* TODO: we need to get atim_window from upper stack
2424 * for now we set to 0 */
2425 priv->rxon_timing.atim_window = 0;
2429 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
2430 result = do_div(tsf, interval_tm_unit);
2431 priv->rxon_timing.beacon_init_val =
2432 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
2435 ("beacon interval %d beacon timer %d beacon tim %d\n",
2436 le16_to_cpu(priv->rxon_timing.beacon_interval),
2437 le32_to_cpu(priv->rxon_timing.beacon_init_val),
2438 le16_to_cpu(priv->rxon_timing.atim_window));
2441 static int iwl_scan_initiate(struct iwl_priv *priv)
2443 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2444 IWL_ERROR("APs don't scan.\n");
2448 if (!iwl_is_ready_rf(priv)) {
2449 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2453 if (test_bit(STATUS_SCANNING, &priv->status)) {
2454 IWL_DEBUG_SCAN("Scan already in progress.\n");
2458 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
2459 IWL_DEBUG_SCAN("Scan request while abort pending. "
2464 IWL_DEBUG_INFO("Starting scan...\n");
2465 priv->scan_bands = 2;
2466 set_bit(STATUS_SCANNING, &priv->status);
2467 priv->scan_start = jiffies;
2468 priv->scan_pass_start = priv->scan_start;
2470 queue_work(priv->workqueue, &priv->request_scan);
2475 static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
2477 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
2480 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
2482 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2487 static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode)
2489 if (phymode == MODE_IEEE80211A) {
2490 priv->staging_rxon.flags &=
2491 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2492 | RXON_FLG_CCK_MSK);
2493 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2495 /* Copied from iwl_bg_post_associate() */
2496 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2497 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2499 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2501 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2502 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2504 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
2505 priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK;
2506 priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK;
2511 * initilize rxon structure with default values fromm eeprom
2513 static void iwl_connection_init_rx_config(struct iwl_priv *priv)
2515 const struct iwl_channel_info *ch_info;
2517 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2519 switch (priv->iw_mode) {
2520 case IEEE80211_IF_TYPE_AP:
2521 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2524 case IEEE80211_IF_TYPE_STA:
2525 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2526 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2529 case IEEE80211_IF_TYPE_IBSS:
2530 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2531 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2532 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2533 RXON_FILTER_ACCEPT_GRP_MSK;
2536 case IEEE80211_IF_TYPE_MNTR:
2537 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2538 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2539 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2544 /* TODO: Figure out when short_preamble would be set and cache from
2546 if (!hw_to_local(priv->hw)->short_preamble)
2547 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2549 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2552 ch_info = iwl_get_channel_info(priv, priv->phymode,
2553 le16_to_cpu(priv->staging_rxon.channel));
2556 ch_info = &priv->channel_info[0];
2559 * in some case A channels are all non IBSS
2560 * in this case force B/G channel
2562 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2563 !(is_channel_ibss(ch_info)))
2564 ch_info = &priv->channel_info[0];
2566 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2567 if (is_channel_a_band(ch_info))
2568 priv->phymode = MODE_IEEE80211A;
2570 priv->phymode = MODE_IEEE80211G;
2572 iwl_set_flags_for_phymode(priv, priv->phymode);
2574 priv->staging_rxon.ofdm_basic_rates =
2575 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
2576 priv->staging_rxon.cck_basic_rates =
2577 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
2579 priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
2580 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
2581 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2582 memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN);
2583 priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff;
2584 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff;
2585 iwl4965_set_rxon_chain(priv);
2588 static int iwl_set_mode(struct iwl_priv *priv, int mode)
2590 if (!iwl_is_ready_rf(priv))
2593 if (mode == IEEE80211_IF_TYPE_IBSS) {
2594 const struct iwl_channel_info *ch_info;
2596 ch_info = iwl_get_channel_info(priv,
2598 le16_to_cpu(priv->staging_rxon.channel));
2600 if (!ch_info || !is_channel_ibss(ch_info)) {
2601 IWL_ERROR("channel %d not IBSS channel\n",
2602 le16_to_cpu(priv->staging_rxon.channel));
2607 cancel_delayed_work(&priv->scan_check);
2608 if (iwl_scan_cancel_timeout(priv, 100)) {
2609 IWL_WARNING("Aborted scan still in progress after 100ms\n");
2610 IWL_DEBUG_MAC80211("leaving - scan abort failed.\n");
2614 priv->iw_mode = mode;
2616 iwl_connection_init_rx_config(priv);
2617 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2619 iwl_clear_stations_table(priv);
2621 iwl_commit_rxon(priv);
2626 static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2627 struct ieee80211_tx_control *ctl,
2628 struct iwl_cmd *cmd,
2629 struct sk_buff *skb_frag,
2632 struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo;
2634 switch (keyinfo->alg) {
2636 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2637 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2638 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2643 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2646 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8,
2649 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2654 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP |
2655 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
2657 if (keyinfo->keylen == 13)
2658 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
2660 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen);
2662 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2663 "with key %d\n", ctl->key_idx);
2667 printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg);
2673 * handle build REPLY_TX command notification.
2675 static void iwl_build_tx_cmd_basic(struct iwl_priv *priv,
2676 struct iwl_cmd *cmd,
2677 struct ieee80211_tx_control *ctrl,
2678 struct ieee80211_hdr *hdr,
2679 int is_unicast, u8 std_id)
2682 u16 fc = le16_to_cpu(hdr->frame_control);
2683 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2685 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2686 if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) {
2687 tx_flags |= TX_CMD_FLG_ACK_MSK;
2688 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
2689 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2690 if (ieee80211_is_probe_response(fc) &&
2691 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
2692 tx_flags |= TX_CMD_FLG_TSF_MSK;
2694 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
2695 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2698 cmd->cmd.tx.sta_id = std_id;
2699 if (ieee80211_get_morefrag(hdr))
2700 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
2702 qc = ieee80211_get_qos_ctrl(hdr);
2704 cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf);
2705 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
2707 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2709 if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
2710 tx_flags |= TX_CMD_FLG_RTS_MSK;
2711 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2712 } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
2713 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2714 tx_flags |= TX_CMD_FLG_CTS_MSK;
2717 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
2718 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
2720 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
2721 if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
2722 if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ ||
2723 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
2724 cmd->cmd.tx.timeout.pm_frame_timeout =
2727 cmd->cmd.tx.timeout.pm_frame_timeout =
2730 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2732 cmd->cmd.tx.driver_txop = 0;
2733 cmd->cmd.tx.tx_flags = tx_flags;
2734 cmd->cmd.tx.next_frame_len = 0;
2737 static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2740 u16 fc = le16_to_cpu(hdr->frame_control);
2741 DECLARE_MAC_BUF(mac);
2743 /* If this frame is broadcast or not data then use the broadcast
2745 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2746 is_multicast_ether_addr(hdr->addr1))
2747 return priv->hw_setting.bcast_sta_id;
2749 switch (priv->iw_mode) {
2751 /* If this frame is part of a BSS network (we're a station), then
2752 * we use the AP's station id */
2753 case IEEE80211_IF_TYPE_STA:
2756 /* If we are an AP, then find the station, or use BCAST */
2757 case IEEE80211_IF_TYPE_AP:
2758 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2759 if (sta_id != IWL_INVALID_STATION)
2761 return priv->hw_setting.bcast_sta_id;
2763 /* If this frame is part of a IBSS network, then we use the
2764 * target specific station id */
2765 case IEEE80211_IF_TYPE_IBSS:
2766 sta_id = iwl_hw_find_station(priv, hdr->addr1);
2767 if (sta_id != IWL_INVALID_STATION)
2770 sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC);
2772 if (sta_id != IWL_INVALID_STATION)
2775 IWL_DEBUG_DROP("Station %s not in station map. "
2776 "Defaulting to broadcast...\n",
2777 print_mac(mac, hdr->addr1));
2778 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2779 return priv->hw_setting.bcast_sta_id;
2782 IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode);
2783 return priv->hw_setting.bcast_sta_id;
2788 * start REPLY_TX command process
2790 static int iwl_tx_skb(struct iwl_priv *priv,
2791 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2793 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2794 struct iwl_tfd_frame *tfd;
2796 int txq_id = ctl->queue;
2797 struct iwl_tx_queue *txq = NULL;
2798 struct iwl_queue *q = NULL;
2799 dma_addr_t phys_addr;
2800 dma_addr_t txcmd_phys;
2801 struct iwl_cmd *out_cmd = NULL;
2802 u16 len, idx, len_org;
2803 u8 id, hdr_len, unicast;
2808 u8 wait_write_ptr = 0;
2809 unsigned long flags;
2812 spin_lock_irqsave(&priv->lock, flags);
2813 if (iwl_is_rfkill(priv)) {
2814 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2818 if (!priv->interface_id) {
2819 IWL_DEBUG_DROP("Dropping - !priv->interface_id\n");
2823 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) {
2824 IWL_ERROR("ERROR: No TX rate available.\n");
2828 unicast = !is_multicast_ether_addr(hdr->addr1);
2831 fc = le16_to_cpu(hdr->frame_control);
2833 #ifdef CONFIG_IWLWIFI_DEBUG
2834 if (ieee80211_is_auth(fc))
2835 IWL_DEBUG_TX("Sending AUTH frame\n");
2836 else if (ieee80211_is_assoc_request(fc))
2837 IWL_DEBUG_TX("Sending ASSOC frame\n");
2838 else if (ieee80211_is_reassoc_request(fc))
2839 IWL_DEBUG_TX("Sending REASSOC frame\n");
2842 if (!iwl_is_associated(priv) &&
2843 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) {
2844 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2848 spin_unlock_irqrestore(&priv->lock, flags);
2850 hdr_len = ieee80211_get_hdrlen(fc);
2851 sta_id = iwl_get_sta_id(priv, hdr);
2852 if (sta_id == IWL_INVALID_STATION) {
2853 DECLARE_MAC_BUF(mac);
2855 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2856 print_mac(mac, hdr->addr1));
2860 IWL_DEBUG_RATE("station Id %d\n", sta_id);
2862 qc = ieee80211_get_qos_ctrl(hdr);
2864 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2865 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2867 hdr->seq_ctrl = cpu_to_le16(seq_number) |
2869 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2871 #ifdef CONFIG_IWLWIFI_HT
2872 #ifdef CONFIG_IWLWIFI_HT_AGG
2873 /* aggregation is on for this <sta,tid> */
2874 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG)
2875 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2876 #endif /* CONFIG_IWLWIFI_HT_AGG */
2877 #endif /* CONFIG_IWLWIFI_HT */
2879 txq = &priv->txq[txq_id];
2882 spin_lock_irqsave(&priv->lock, flags);
2884 tfd = &txq->bd[q->first_empty];
2885 memset(tfd, 0, sizeof(*tfd));
2886 control_flags = (u32 *) tfd;
2887 idx = get_cmd_index(q, q->first_empty, 0);
2889 memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info));
2890 txq->txb[q->first_empty].skb[0] = skb;
2891 memcpy(&(txq->txb[q->first_empty].status.control),
2892 ctl, sizeof(struct ieee80211_tx_control));
2893 out_cmd = &txq->cmd[idx];
2894 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
2895 memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx));
2896 out_cmd->hdr.cmd = REPLY_TX;
2897 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2898 INDEX_TO_SEQ(q->first_empty)));
2899 /* copy frags header */
2900 memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len);
2902 /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */
2903 len = priv->hw_setting.tx_cmd_len +
2904 sizeof(struct iwl_cmd_header) + hdr_len;
2907 len = (len + 3) & ~3;
2914 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
2915 offsetof(struct iwl_cmd, hdr);
2917 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
2919 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
2920 iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0);
2922 /* 802.11 null functions have no payload... */
2923 len = skb->len - hdr_len;
2925 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
2926 len, PCI_DMA_TODEVICE);
2927 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len);
2931 out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
2933 len = (u16)skb->len;
2934 out_cmd->cmd.tx.len = cpu_to_le16(len);
2936 /* TODO need this for burst mode later on */
2937 iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id);
2939 /* set is_hcca to 0; it probably will never be implemented */
2940 iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
2942 iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys,
2943 hdr, hdr_len, ctl, NULL);
2945 if (!ieee80211_get_morefrag(hdr)) {
2946 txq->need_update = 1;
2948 u8 tid = (u8)(le16_to_cpu(*qc) & 0xf);
2949 priv->stations[sta_id].tid[tid].seq_number = seq_number;
2953 txq->need_update = 0;
2956 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
2957 sizeof(out_cmd->cmd.tx));
2959 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2960 ieee80211_get_hdrlen(fc));
2962 iwl4965_tx_queue_update_wr_ptr(priv, txq, len);
2964 q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd);
2965 rc = iwl_tx_queue_update_write_ptr(priv, txq);
2966 spin_unlock_irqrestore(&priv->lock, flags);
2971 if ((iwl_queue_space(q) < q->high_mark)
2972 && priv->mac80211_registered) {
2973 if (wait_write_ptr) {
2974 spin_lock_irqsave(&priv->lock, flags);
2975 txq->need_update = 1;
2976 iwl_tx_queue_update_write_ptr(priv, txq);
2977 spin_unlock_irqrestore(&priv->lock, flags);
2980 ieee80211_stop_queue(priv->hw, ctl->queue);
2986 spin_unlock_irqrestore(&priv->lock, flags);
2991 static void iwl_set_rate(struct iwl_priv *priv)
2993 const struct ieee80211_hw_mode *hw = NULL;
2994 struct ieee80211_rate *rate;
2997 hw = iwl_get_hw_mode(priv, priv->phymode);
2999 priv->active_rate = 0;
3000 priv->active_rate_basic = 0;
3002 IWL_DEBUG_RATE("Setting rates for 802.11%c\n",
3003 hw->mode == MODE_IEEE80211A ?
3004 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g'));
3006 for (i = 0; i < hw->num_rates; i++) {
3007 rate = &(hw->rates[i]);
3008 if ((rate->val < IWL_RATE_COUNT) &&
3009 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3010 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
3011 rate->val, iwl_rates[rate->val].plcp,
3012 (rate->flags & IEEE80211_RATE_BASIC) ?
3014 priv->active_rate |= (1 << rate->val);
3015 if (rate->flags & IEEE80211_RATE_BASIC)
3016 priv->active_rate_basic |= (1 << rate->val);
3018 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
3019 rate->val, iwl_rates[rate->val].plcp);
3022 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
3023 priv->active_rate, priv->active_rate_basic);
3026 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
3027 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
3030 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
3031 priv->staging_rxon.cck_basic_rates =
3032 ((priv->active_rate_basic &
3033 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
3035 priv->staging_rxon.cck_basic_rates =
3036 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
3038 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
3039 priv->staging_rxon.ofdm_basic_rates =
3040 ((priv->active_rate_basic &
3041 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
3042 IWL_FIRST_OFDM_RATE) & 0xFF;
3044 priv->staging_rxon.ofdm_basic_rates =
3045 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3048 static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
3050 unsigned long flags;
3052 if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status))
3055 IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n",
3056 disable_radio ? "OFF" : "ON");
3058 if (disable_radio) {
3059 iwl_scan_cancel(priv);
3060 /* FIXME: This is a workaround for AP */
3061 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3062 spin_lock_irqsave(&priv->lock, flags);
3063 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3064 CSR_UCODE_SW_BIT_RFKILL);
3065 spin_unlock_irqrestore(&priv->lock, flags);
3066 iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0);
3067 set_bit(STATUS_RF_KILL_SW, &priv->status);
3072 spin_lock_irqsave(&priv->lock, flags);
3073 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3075 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3076 spin_unlock_irqrestore(&priv->lock, flags);
3081 spin_lock_irqsave(&priv->lock, flags);
3082 iwl_read32(priv, CSR_UCODE_DRV_GP1);
3083 if (!iwl_grab_restricted_access(priv))
3084 iwl_release_restricted_access(priv);
3085 spin_unlock_irqrestore(&priv->lock, flags);
3087 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
3088 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
3089 "disabled by HW switch\n");
3093 queue_work(priv->workqueue, &priv->restart);
3097 void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3098 u32 decrypt_res, struct ieee80211_rx_status *stats)
3101 le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control);
3103 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
3106 if (!(fc & IEEE80211_FCTL_PROTECTED))
3109 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3110 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3111 case RX_RES_STATUS_SEC_TYPE_TKIP:
3112 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3113 RX_RES_STATUS_BAD_ICV_MIC)
3114 stats->flag |= RX_FLAG_MMIC_ERROR;
3115 case RX_RES_STATUS_SEC_TYPE_WEP:
3116 case RX_RES_STATUS_SEC_TYPE_CCMP:
3117 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3118 RX_RES_STATUS_DECRYPT_OK) {
3119 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
3120 stats->flag |= RX_FLAG_DECRYPTED;
3129 void iwl_handle_data_packet_monitor(struct iwl_priv *priv,
3130 struct iwl_rx_mem_buffer *rxb,
3131 void *data, short len,
3132 struct ieee80211_rx_status *stats,
3135 struct iwl_rt_rx_hdr *iwl_rt;
3137 /* First cache any information we need before we overwrite
3138 * the information provided in the skb from the hardware */